Repository: terrapower/armi Branch: main Commit: 65813317319d Files: 703 Total size: 6.8 MB Directory structure: gitextract_528z7ijz/ ├── .github/ │ ├── .codecov.yml │ ├── pull_request_template.md │ └── workflows/ │ ├── coverage.yaml │ ├── docs.yaml │ ├── find_test_crumbs.py │ ├── licensechecker.yaml │ ├── linting.yaml │ ├── mac_tests.yaml │ ├── stale.yaml │ ├── unittests.yaml │ ├── validatemanifest.py │ ├── validatemanifest.yaml │ ├── wheels.yaml │ └── wintests.yaml ├── .gitignore ├── .gitmodules ├── .licenserc.json ├── AUTHORS ├── CONTRIBUTING.md ├── LICENSE.md ├── README.rst ├── armi/ │ ├── __init__.py │ ├── __main__.py │ ├── _bootstrap.py │ ├── apps.py │ ├── bookkeeping/ │ │ ├── __init__.py │ │ ├── db/ │ │ │ ├── __init__.py │ │ │ ├── compareDB3.py │ │ │ ├── database.py │ │ │ ├── databaseInterface.py │ │ │ ├── factory.py │ │ │ ├── jaggedArray.py │ │ │ ├── layout.py │ │ │ ├── passiveDBLoadPlugin.py │ │ │ ├── permissions.py │ │ │ ├── tests/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_comparedb3.py │ │ │ │ ├── test_database.py │ │ │ │ ├── test_databaseInterface.py │ │ │ │ ├── test_jaggedArray.py │ │ │ │ ├── test_layout.py │ │ │ │ └── test_passiveDBLoadPlugin.py │ │ │ └── typedefs.py │ │ ├── historyTracker.py │ │ ├── mainInterface.py │ │ ├── memoryProfiler.py │ │ ├── report/ │ │ │ ├── __init__.py │ │ │ ├── data.py │ │ │ ├── reportInterface.py │ │ │ ├── reportingUtils.py │ │ │ └── tests/ │ │ │ ├── __init__.py │ │ │ └── test_report.py │ │ ├── snapshotInterface.py │ │ ├── tests/ │ │ │ ├── __init__.py │ │ │ ├── _constants.py │ │ │ ├── test_historyTracker.py │ │ │ ├── test_memoryProfiler.py │ │ │ └── test_snapshot.py │ │ └── visualization/ │ │ ├── __init__.py │ │ ├── dumper.py │ │ ├── entryPoint.py │ │ ├── tests/ │ │ │ ├── __init__.py │ │ │ ├── test_vis.py │ │ │ └── test_xdmf.py │ │ ├── utils.py │ │ ├── vtk.py │ │ └── xdmf.py │ ├── cases/ │ │ ├── __init__.py │ │ ├── case.py │ │ ├── inputModifiers/ │ │ │ ├── __init__.py │ │ │ ├── inputModifiers.py │ │ │ ├── neutronicsModifiers.py │ │ │ ├── pinTypeInputModifiers.py │ │ │ └── tests/ │ │ │ ├── __init__.py │ │ │ ├── test_inputModifiers.py │ │ │ └── test_pinTypeInputModifiers.py │ │ ├── suite.py │ │ ├── suiteBuilder.py │ │ └── tests/ │ │ ├── __init__.py │ │ ├── test_cases.py │ │ └── test_suiteBuilder.py │ ├── cli/ │ │ ├── __init__.py │ │ ├── checkInputs.py │ │ ├── cleanTemps.py │ │ ├── clone.py │ │ ├── compareCases.py │ │ ├── database.py │ │ ├── entryPoint.py │ │ ├── gridGui.py │ │ ├── migrateInputs.py │ │ ├── modify.py │ │ ├── reportsEntryPoint.py │ │ ├── run.py │ │ ├── runSuite.py │ │ └── tests/ │ │ ├── __init__.py │ │ ├── test_runEntryPoint.py │ │ └── test_runSuite.py │ ├── conftest.py │ ├── context.py │ ├── interfaces.py │ ├── matProps/ │ │ ├── __init__.py │ │ ├── constituent.py │ │ ├── function.py │ │ ├── interpolationFunctions.py │ │ ├── material.py │ │ ├── materialType.py │ │ ├── piecewiseFunction.py │ │ ├── point.py │ │ ├── prop.py │ │ ├── reference.py │ │ ├── symbolicFunction.py │ │ ├── tableFunction.py │ │ ├── tableFunction1D.py │ │ ├── tableFunction2D.py │ │ └── tests/ │ │ ├── __init__.py │ │ ├── invalidTestFiles/ │ │ │ ├── badFileFormat.YAML │ │ │ ├── badProperty.yaml │ │ │ └── duplicateComposition.yaml │ │ ├── testDir1/ │ │ │ ├── a.yaml │ │ │ └── b.yaml │ │ ├── testDir2/ │ │ │ ├── c.yml │ │ │ └── d.yaml │ │ ├── testDir3/ │ │ │ ├── a.yaml │ │ │ └── e.yaml │ │ ├── testDir4/ │ │ │ └── sampleProperty.yaml │ │ ├── testMaterialsData/ │ │ │ ├── materialA.yaml │ │ │ ├── materialB.yaml │ │ │ └── materialsSubDir/ │ │ │ ├── materialC.yaml │ │ │ └── materialD.yaml │ │ ├── test_1DSymbolicFunction.py │ │ ├── test_composition.py │ │ ├── test_constituent.py │ │ ├── test_functions.py │ │ ├── test_hashing.py │ │ ├── test_interpolationFunctions.py │ │ ├── test_material.py │ │ ├── test_materialType.py │ │ ├── test_parsing.py │ │ ├── test_performance.py │ │ ├── test_piecewiseFunction.py │ │ ├── test_point.py │ │ ├── test_property.py │ │ ├── test_references.py │ │ ├── test_symbolicFunction.py │ │ └── test_tableFunctions.py │ ├── materials/ │ │ ├── __init__.py │ │ ├── air.py │ │ ├── alloy200.py │ │ ├── b4c.py │ │ ├── be9.py │ │ ├── caH2.py │ │ ├── californium.py │ │ ├── concrete.py │ │ ├── copper.py │ │ ├── cs.py │ │ ├── custom.py │ │ ├── graphite.py │ │ ├── hafnium.py │ │ ├── hastelloyN.py │ │ ├── ht9.py │ │ ├── inconel.py │ │ ├── inconel600.py │ │ ├── inconel625.py │ │ ├── inconel800.py │ │ ├── inconelPE16.py │ │ ├── inconelX750.py │ │ ├── lead.py │ │ ├── leadBismuth.py │ │ ├── lithium.py │ │ ├── magnesium.py │ │ ├── material.py │ │ ├── mgO.py │ │ ├── mixture.py │ │ ├── molybdenum.py │ │ ├── mox.py │ │ ├── nZ.py │ │ ├── potassium.py │ │ ├── scandiumOxide.py │ │ ├── siC.py │ │ ├── sodium.py │ │ ├── sodiumChloride.py │ │ ├── sulfur.py │ │ ├── tZM.py │ │ ├── tantalum.py │ │ ├── tests/ │ │ │ ├── __init__.py │ │ │ ├── test__init__.py │ │ │ ├── test_air.py │ │ │ ├── test_b4c.py │ │ │ ├── test_be9.py │ │ │ ├── test_fluids.py │ │ │ ├── test_graphite.py │ │ │ ├── test_lithium.py │ │ │ ├── test_materials.py │ │ │ ├── test_sic.py │ │ │ ├── test_sulfur.py │ │ │ ├── test_thoriumOxide.py │ │ │ ├── test_uZr.py │ │ │ └── test_water.py │ │ ├── thU.py │ │ ├── thorium.py │ │ ├── thoriumOxide.py │ │ ├── uZr.py │ │ ├── uranium.py │ │ ├── uraniumOxide.py │ │ ├── void.py │ │ ├── water.py │ │ ├── yttriumOxide.py │ │ ├── zincOxide.py │ │ └── zr.py │ ├── meta.py │ ├── migration/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── m0_1_3.py │ │ ├── m0_1_6.py │ │ └── tests/ │ │ ├── __init__.py │ │ ├── test_m0_1_6.py │ │ └── test_migration_base.py │ ├── mpiActions.py │ ├── nucDirectory/ │ │ ├── __init__.py │ │ ├── elements.py │ │ ├── nucDir.py │ │ ├── nuclideBases.py │ │ ├── tests/ │ │ │ ├── __init__.py │ │ │ ├── test_elements.py │ │ │ ├── test_nucDirectory.py │ │ │ ├── test_nuclideBases.py │ │ │ ├── test_thermalScattering.py │ │ │ └── test_transmutations.py │ │ ├── thermalScattering.py │ │ └── transmutations.py │ ├── nuclearDataIO/ │ │ ├── __init__.py │ │ ├── cccc/ │ │ │ ├── __init__.py │ │ │ ├── cccc.py │ │ │ ├── compxs.py │ │ │ ├── dif3d.py │ │ │ ├── fixsrc.py │ │ │ ├── gamiso.py │ │ │ ├── geodst.py │ │ │ ├── isotxs.py │ │ │ ├── labels.py │ │ │ ├── nhflux.py │ │ │ ├── pmatrx.py │ │ │ ├── pwdint.py │ │ │ ├── rtflux.py │ │ │ ├── rzflux.py │ │ │ └── tests/ │ │ │ ├── __init__.py │ │ │ ├── fixtures/ │ │ │ │ ├── labels.binary │ │ │ │ ├── simple_cartesian.pwdint │ │ │ │ ├── simple_cartesian.rtflux │ │ │ │ ├── simple_cartesian.rzflux │ │ │ │ ├── simple_hexz.dif3d │ │ │ │ ├── simple_hexz.geodst │ │ │ │ ├── simple_hexz.nhflux │ │ │ │ └── simple_hexz.nhflux.variant │ │ │ ├── test_cccc.py │ │ │ ├── test_compxs.py │ │ │ ├── test_dif3d.py │ │ │ ├── test_fixsrc.py │ │ │ ├── test_gamiso.py │ │ │ ├── test_geodst.py │ │ │ ├── test_isotxs.py │ │ │ ├── test_labels.py │ │ │ ├── test_nhflux.py │ │ │ ├── test_pmatrx.py │ │ │ ├── test_pwdint.py │ │ │ ├── test_rtflux.py │ │ │ └── test_rzflux.py │ │ ├── nuclearFileMetadata.py │ │ ├── tests/ │ │ │ ├── __init__.py │ │ │ ├── fixtures/ │ │ │ │ ├── AA.gamiso │ │ │ │ ├── AA.pmatrx │ │ │ │ ├── AB.gamiso │ │ │ │ ├── AB.pmatrx │ │ │ │ ├── ISOAA │ │ │ │ ├── ISOAB │ │ │ │ ├── combined-AA-AB.gamiso │ │ │ │ ├── combined-AA-AB.isotxs │ │ │ │ ├── combined-AA-AB.pmatrx │ │ │ │ ├── combined-and-lumped-AA-AB.gamiso │ │ │ │ ├── combined-and-lumped-AA-AB.isotxs │ │ │ │ ├── combined-and-lumped-AA-AB.pmatrx │ │ │ │ ├── mc2v3-AA.gamiso │ │ │ │ ├── mc2v3-AA.isotxs │ │ │ │ ├── mc2v3-AA.pmatrx │ │ │ │ ├── mc2v3-AB.gamiso │ │ │ │ ├── mc2v3-AB.isotxs │ │ │ │ └── mc2v3-AB.pmatrx │ │ │ ├── library-file-generation/ │ │ │ │ ├── combine-AA-AB.inp │ │ │ │ ├── combine-and-lump-AA-AB.inp │ │ │ │ ├── mc2v3-AA.inp │ │ │ │ └── mc2v3-AB.inp │ │ │ ├── simple_hexz.inp │ │ │ ├── test_xsCollections.py │ │ │ ├── test_xsLibraries.py │ │ │ └── test_xsNuclides.py │ │ ├── xsCollections.py │ │ ├── xsLibraries.py │ │ └── xsNuclides.py │ ├── operators/ │ │ ├── __init__.py │ │ ├── operator.py │ │ ├── operatorMPI.py │ │ ├── runTypes.py │ │ ├── snapshots.py │ │ └── tests/ │ │ ├── __init__.py │ │ ├── test_operatorSnapshots.py │ │ └── test_operators.py │ ├── physics/ │ │ ├── __init__.py │ │ ├── constants.py │ │ ├── executers.py │ │ ├── fuelCycle/ │ │ │ ├── __init__.py │ │ │ ├── assemblyRotationAlgorithms.py │ │ │ ├── fuelHandlerFactory.py │ │ │ ├── fuelHandlerInterface.py │ │ │ ├── fuelHandlers.py │ │ │ ├── hexAssemblyFuelMgmtUtils.py │ │ │ ├── settings.py │ │ │ ├── tests/ │ │ │ │ ├── __init__.py │ │ │ │ ├── _customFuelHandlerModule.py │ │ │ │ ├── test_assemblyRotationAlgorithms.py │ │ │ │ ├── test_fuelHandlerFactory.py │ │ │ │ ├── test_fuelHandlers.py │ │ │ │ ├── test_hexAssemblyFuelMgmtUtils.py │ │ │ │ └── test_utils.py │ │ │ └── utils.py │ │ ├── fuelPerformance/ │ │ │ ├── __init__.py │ │ │ ├── executers.py │ │ │ ├── parameters.py │ │ │ ├── plugin.py │ │ │ ├── settings.py │ │ │ ├── tests/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_executers.py │ │ │ │ ├── test_fuelPerformancePlugin.py │ │ │ │ ├── test_fuelPerformanceSymmetry.py │ │ │ │ └── test_fuelPerformanceUtils.py │ │ │ └── utils.py │ │ ├── neutronics/ │ │ │ ├── __init__.py │ │ │ ├── const.py │ │ │ ├── crossSectionGroupManager.py │ │ │ ├── crossSectionSettings.py │ │ │ ├── diffIsotxs.py │ │ │ ├── energyGroups.py │ │ │ ├── fissionProductModel/ │ │ │ │ ├── __init__.py │ │ │ │ ├── fissionProductModel.py │ │ │ │ ├── fissionProductModelSettings.py │ │ │ │ ├── lumpedFissionProduct.py │ │ │ │ └── tests/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_fissionProductModel.py │ │ │ │ └── test_lumpedFissionProduct.py │ │ │ ├── globalFlux/ │ │ │ │ ├── __init__.py │ │ │ │ ├── globalFluxInterface.py │ │ │ │ └── tests/ │ │ │ │ ├── __init__.py │ │ │ │ └── test_globalFluxInterface.py │ │ │ ├── isotopicDepletion/ │ │ │ │ ├── __init__.py │ │ │ │ ├── crossSectionTable.py │ │ │ │ └── isotopicDepletionInterface.py │ │ │ ├── latticePhysics/ │ │ │ │ ├── __init__.py │ │ │ │ ├── latticePhysicsInterface.py │ │ │ │ ├── latticePhysicsWriter.py │ │ │ │ └── tests/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_latticeInterface.py │ │ │ │ └── test_latticeWriter.py │ │ │ ├── macroXSGenerationInterface.py │ │ │ ├── parameters.py │ │ │ ├── plugin.py │ │ │ ├── settings.py │ │ │ └── tests/ │ │ │ ├── ISOXA │ │ │ ├── __init__.py │ │ │ ├── rzmflxYA │ │ │ ├── test_crossSectionManager.py │ │ │ ├── test_crossSectionSettings.py │ │ │ ├── test_crossSectionTable.py │ │ │ ├── test_energyGroups.py │ │ │ ├── test_macroXSGenerationInterface.py │ │ │ ├── test_neutronicsPlugin.py │ │ │ └── test_neutronicsSymmetry.py │ │ ├── safety/ │ │ │ └── __init__.py │ │ ├── tests/ │ │ │ ├── __init__.py │ │ │ └── test_executers.py │ │ └── thermalHydraulics/ │ │ ├── __init__.py │ │ ├── const.py │ │ ├── parameters.py │ │ ├── plugin.py │ │ └── tests/ │ │ ├── __init__.py │ │ └── test_thermalHydraulicsSymmetry.py │ ├── pluginManager.py │ ├── plugins.py │ ├── reactor/ │ │ ├── __init__.py │ │ ├── assemblies.py │ │ ├── assemblyParameters.py │ │ ├── blockParameters.py │ │ ├── blocks/ │ │ │ ├── __init__.py │ │ │ ├── block.py │ │ │ ├── cartesianBlock.py │ │ │ ├── hexBlock.py │ │ │ └── thRZBlock.py │ │ ├── blueprints/ │ │ │ ├── __init__.py │ │ │ ├── assemblyBlueprint.py │ │ │ ├── blockBlueprint.py │ │ │ ├── componentBlueprint.py │ │ │ ├── gridBlueprint.py │ │ │ ├── isotopicOptions.py │ │ │ ├── reactorBlueprint.py │ │ │ └── tests/ │ │ │ ├── __init__.py │ │ │ ├── test_assemblyBlueprints.py │ │ │ ├── test_blockBlueprints.py │ │ │ ├── test_blueprints.py │ │ │ ├── test_componentBlueprint.py │ │ │ ├── test_customIsotopics.py │ │ │ ├── test_gridBlueprints.py │ │ │ ├── test_materialModifications.py │ │ │ └── test_reactorBlueprints.py │ │ ├── components/ │ │ │ ├── __init__.py │ │ │ ├── basicShapes.py │ │ │ ├── complexShapes.py │ │ │ ├── component.py │ │ │ ├── componentParameters.py │ │ │ ├── tests/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_basicShapes.py │ │ │ │ └── test_complexShapes.py │ │ │ └── volumetricShapes.py │ │ ├── composites.py │ │ ├── converters/ │ │ │ ├── __init__.py │ │ │ ├── axialExpansionChanger/ │ │ │ │ ├── __init__.py │ │ │ │ ├── assemblyAxialLinkage.py │ │ │ │ ├── axialExpansionChanger.py │ │ │ │ ├── expansionData.py │ │ │ │ └── redistributeMass.py │ │ │ ├── blockConverters.py │ │ │ ├── geometryConverters.py │ │ │ ├── meshConverters.py │ │ │ ├── parameterSweeps/ │ │ │ │ ├── __init__.py │ │ │ │ ├── generalParameterSweepConverters.py │ │ │ │ └── tests/ │ │ │ │ ├── __init__.py │ │ │ │ └── test_paramSweepConverters.py │ │ │ ├── pinTypeBlockConverters.py │ │ │ ├── tests/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_assemblyAxialLinkage.py │ │ │ │ ├── test_axialExpansionChanger.py │ │ │ │ ├── test_axialExpansionChanger_MultiPin.py │ │ │ │ ├── test_blockConverter.py │ │ │ │ ├── test_geometryConverters.py │ │ │ │ ├── test_meshConverters.py │ │ │ │ ├── test_pinTypeBlockConverters.py │ │ │ │ └── test_uniformMesh.py │ │ │ └── uniformMesh.py │ │ ├── cores.py │ │ ├── excoreStructure.py │ │ ├── flags.py │ │ ├── geometry.py │ │ ├── grids/ │ │ │ ├── __init__.py │ │ │ ├── axial.py │ │ │ ├── cartesian.py │ │ │ ├── constants.py │ │ │ ├── grid.py │ │ │ ├── hexagonal.py │ │ │ ├── locations.py │ │ │ ├── structuredGrid.py │ │ │ ├── tests/ │ │ │ │ ├── __init__.py │ │ │ │ └── test_grids.py │ │ │ └── thetarz.py │ │ ├── parameters/ │ │ │ ├── __init__.py │ │ │ ├── exceptions.py │ │ │ ├── parameterCollections.py │ │ │ ├── parameterDefinitions.py │ │ │ └── resolveCollections.py │ │ ├── reactorParameters.py │ │ ├── reactors.py │ │ ├── spentFuelPool.py │ │ ├── tests/ │ │ │ ├── __init__.py │ │ │ ├── test_assemblies.py │ │ │ ├── test_blocks.py │ │ │ ├── test_components.py │ │ │ ├── test_composites.py │ │ │ ├── test_cores.py │ │ │ ├── test_excoreStructures.py │ │ │ ├── test_flags.py │ │ │ ├── test_geometry.py │ │ │ ├── test_hexBlockRotate.py │ │ │ ├── test_parameters.py │ │ │ ├── test_reactors.py │ │ │ ├── test_rz_reactors.py │ │ │ ├── test_zones.py │ │ │ └── zonesFile.yaml │ │ └── zones.py │ ├── resources/ │ │ ├── burn-chain.yaml │ │ └── mcc-nuclides.yaml │ ├── runLog.py │ ├── settings/ │ │ ├── __init__.py │ │ ├── caseSettings.py │ │ ├── fwSettings/ │ │ │ ├── __init__.py │ │ │ ├── databaseSettings.py │ │ │ ├── globalSettings.py │ │ │ ├── reportSettings.py │ │ │ ├── tests/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_fwSettings.py │ │ │ │ └── test_tightCouplingSettings.py │ │ │ └── tightCouplingSettings.py │ │ ├── setting.py │ │ ├── settingsIO.py │ │ ├── settingsValidation.py │ │ └── tests/ │ │ ├── __init__.py │ │ ├── test_inspectors.py │ │ ├── test_settings.py │ │ └── test_settingsIO.py │ ├── testing/ │ │ ├── __init__.py │ │ ├── reactors/ │ │ │ ├── anl-afci-177/ │ │ │ │ ├── anl-afci-177-blueprints.yaml │ │ │ │ ├── anl-afci-177-coreMap.yaml │ │ │ │ ├── anl-afci-177-fuelManagement.py │ │ │ │ └── anl-afci-177.yaml │ │ │ ├── c5g7/ │ │ │ │ ├── c5g7-blueprints.yaml │ │ │ │ └── c5g7-settings.yaml │ │ │ ├── godiva/ │ │ │ │ ├── godiva-blueprints.yaml │ │ │ │ └── godiva.armi.unittest.yaml │ │ │ ├── smallHexReactor/ │ │ │ │ ├── smallHexReactor-bp.yaml │ │ │ │ └── smallHexReactor.yaml │ │ │ └── thirdSmallHexReactor/ │ │ │ ├── thirdSmallHexReactor-bp.yaml │ │ │ └── thirdSmallHexReactor.yaml │ │ ├── resources/ │ │ │ └── armiRun-SHUFFLES.yaml │ │ ├── singleMixedAssembly.py │ │ ├── symmetryTesting.py │ │ └── tests/ │ │ ├── __init__.py │ │ └── test_symmetryTesting.py │ ├── tests/ │ │ ├── 1DslabXSByCompTest.yaml │ │ ├── ISOAA │ │ ├── __init__.py │ │ ├── armiRun.yaml │ │ ├── detailedAxialExpansion/ │ │ │ ├── armiRun.yaml │ │ │ ├── refSmallCoreGrid.yaml │ │ │ ├── refSmallReactor.yaml │ │ │ └── refSmallReactorBase.yaml │ │ ├── mockRunLogs.py │ │ ├── refSmallCartesian.yaml │ │ ├── refSmallCoreGrid.yaml │ │ ├── refSmallReactor.yaml │ │ ├── refSmallReactorBase.yaml │ │ ├── refSmallReactorShuffleLogic.py │ │ ├── refSmallSfpGrid.yaml │ │ ├── refTestCartesian.yaml │ │ ├── smallestTestReactor/ │ │ │ ├── armiRunSmallest.yaml │ │ │ ├── refOneBlockReactor.yaml │ │ │ └── refSmallestReactor.yaml │ │ ├── test_apps.py │ │ ├── test_armiTestHelper.py │ │ ├── test_cartesian.py │ │ ├── test_context.py │ │ ├── test_interfaces.py │ │ ├── test_lwrInputs.py │ │ ├── test_mpiActions.py │ │ ├── test_mpiFeatures.py │ │ ├── test_mpiParameters.py │ │ ├── test_plugins.py │ │ ├── test_runLog.py │ │ ├── test_symmetry.py │ │ ├── test_tests.py │ │ ├── test_user_plugins.py │ │ ├── tutorials/ │ │ │ ├── data_model.ipynb │ │ │ ├── param_sweep.ipynb │ │ │ └── pin-rotations.ipynb │ │ ├── zpprTest.yaml │ │ └── zpprTestGeom.yaml │ └── utils/ │ ├── __init__.py │ ├── asciimaps.py │ ├── codeTiming.py │ ├── customExceptions.py │ ├── densityTools.py │ ├── directoryChangers.py │ ├── directoryChangersMpi.py │ ├── dynamicImporter.py │ ├── flags.py │ ├── gridEditor.py │ ├── hexagon.py │ ├── iterables.py │ ├── mathematics.py │ ├── outputCache.py │ ├── parsing.py │ ├── pathTools.py │ ├── plotting.py │ ├── properties.py │ ├── reportPlotting.py │ ├── tabulate.py │ ├── tests/ │ │ ├── __init__.py │ │ ├── resources/ │ │ │ ├── lower/ │ │ │ │ ├── includeA.yaml │ │ │ │ └── includeB.yaml │ │ │ └── root.yaml │ │ ├── test_asciimaps.py │ │ ├── test_codeTiming.py │ │ ├── test_custom_exceptions.py │ │ ├── test_densityTools.py │ │ ├── test_directoryChangers.py │ │ ├── test_directoryChangersMpi.py │ │ ├── test_flags.py │ │ ├── test_hexagon.py │ │ ├── test_iterables.py │ │ ├── test_mathematics.py │ │ ├── test_outputCache.py │ │ ├── test_parsing.py │ │ ├── test_pathTools.py │ │ ├── test_plotting.py │ │ ├── test_properties.py │ │ ├── test_reportPlotting.py │ │ ├── test_tabulate.py │ │ ├── test_textProcessors.py │ │ ├── test_triangle.py │ │ ├── test_units.py │ │ └── test_utils.py │ ├── textProcessors.py │ ├── triangle.py │ └── units.py ├── doc/ │ ├── .static/ │ │ ├── __init__.py │ │ ├── automateScr.py │ │ ├── cleanup_test_results.py │ │ ├── css/ │ │ │ └── theme_fixes.css │ │ ├── dochelpers.py │ │ ├── looseCouplingIllustration.dot │ │ └── tightCouplingIllustration.dot │ ├── Makefile │ ├── __init__.py │ ├── conf.py │ ├── developer/ │ │ ├── documenting.rst │ │ ├── entrypoints.rst │ │ ├── first_time_contributors.rst │ │ ├── guide.rst │ │ ├── index.rst │ │ ├── making_armi_based_apps.rst │ │ ├── parallel_coding.rst │ │ ├── profiling.rst │ │ ├── standards_and_practices.rst │ │ ├── testing.rst │ │ └── tooling.rst │ ├── gallery-src/ │ │ ├── README.rst │ │ ├── analysis/ │ │ │ ├── README.rst │ │ │ ├── run_blockMcnpMaterialCard.py │ │ │ ├── run_hexBlockToRZConversion.py │ │ │ └── run_hexReactorToRZ.py │ │ └── framework/ │ │ ├── README.rst │ │ ├── run_blockVolumeFractions.py │ │ ├── run_chartOfNuclides.py │ │ ├── run_computeReactionRates.py │ │ ├── run_fuelManagement.py │ │ ├── run_grids1_hex.py │ │ ├── run_grids2_cartesian.py │ │ ├── run_grids3_rzt.py │ │ ├── run_isotxs.py │ │ ├── run_isotxs2_matrix.py │ │ ├── run_materials.py │ │ ├── run_programmaticReactorDefinition.py │ │ ├── run_reactorFacemap.py │ │ └── run_transmutationMatrix.py │ ├── getTestResults.py │ ├── glossary.rst │ ├── index.rst │ ├── installation.rst │ ├── make.bat │ ├── qa_docs/ │ │ ├── index.rst │ │ ├── scr/ │ │ │ ├── 0.1.rst │ │ │ ├── 0.2.rst │ │ │ ├── 0.3.rst │ │ │ ├── 0.4.rst │ │ │ ├── 0.5.rst │ │ │ ├── 0.6.rst │ │ │ ├── index.rst │ │ │ └── latest_scr.rst │ │ ├── sdid.rst │ │ ├── srsd/ │ │ │ ├── bookkeeping_reqs.rst │ │ │ ├── cases_reqs.rst │ │ │ ├── cli_reqs.rst │ │ │ ├── framework_reqs.rst │ │ │ ├── materials_reqs.rst │ │ │ ├── nucDirectory_reqs.rst │ │ │ ├── nuclearDataIO_reqs.rst │ │ │ ├── physics_reqs.rst │ │ │ ├── reactors_reqs.rst │ │ │ ├── runLog_reqs.rst │ │ │ ├── settings_reqs.rst │ │ │ └── utils_reqs.rst │ │ ├── srsd.rst │ │ └── str.rst │ ├── readme.rst │ ├── release/ │ │ └── index.rst │ ├── skip_str.py │ ├── tutorials/ │ │ ├── data_model.nblink │ │ ├── index.rst │ │ ├── making_your_first_app.rst │ │ ├── materials_demo.ipynb │ │ ├── nuclide_demo.ipynb │ │ ├── param_sweep.nblink │ │ ├── pin-rotations.nblink │ │ ├── walkthrough_inputs.rst │ │ └── walkthrough_lwr_inputs.rst │ └── user/ │ ├── _gallery/ │ │ └── index.rst │ ├── accessingEntryPoints.rst │ ├── index.rst │ ├── inputs.rst │ ├── manual_data_access.rst │ ├── outputs.rst │ ├── params_report.rst │ ├── physics_coupling.rst │ ├── radial_and_axial_expansion.rst │ ├── settings_report.rst │ ├── spatial_block_data.rst │ ├── symmetry_handling.rst │ └── user_install.rst └── pyproject.toml ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/.codecov.yml ================================================ coverage: status: project: default: target: 80% # the required coverage value threshold: 1% # allows a 1% drop from the previous base commit coverage ================================================ FILE: .github/pull_request_template.md ================================================ ## What is the change? Why is it being made? ## SCR Information One-Sentence Rationale: TBD One-line Impact on Requirements: NA --- ## Checklist - [ ] This PR has only [one purpose or idea](https://terrapower.github.io/armi/developer/tooling.html#one-idea-one-pr). - [ ] [Tests](https://terrapower.github.io/armi/developer/tooling.html#test-it) have been added/updated to verify any new/changed code. - [ ] The [documentation](https://terrapower.github.io/armi/developer/tooling.html#document-it) is still up-to-date in the `doc` folder. - [ ] The code style follows [good practices](https://terrapower.github.io/armi/developer/standards_and_practices.html). - [ ] The dependencies are still up-to-date in `pyproject.toml`. ================================================ FILE: .github/workflows/coverage.yaml ================================================ name: Coverage permissions: contents: read on: push: branches: - main paths-ignore: - 'doc/**' pull_request: paths-ignore: - 'doc/**' concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: build: # Deploying coverage to codecov.io should not happen on forks if: github.repository == 'terrapower/armi' runs-on: ubuntu-24.04 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} steps: - uses: actions/checkout@v2 - name: Setup Python uses: actions/setup-python@v2 with: python-version: '3.13' - name: Install ARMI and MPI run: | sudo apt-get -y install libopenmpi-dev pip install -e .[memprof,mpi,test] pip install codecov - name: Run Coverage run: | set -x coverage run --rcfile=pyproject.toml -m pytest -n 4 --cov=armi --cov-config=pyproject.toml --cov-report=xml --ignore=venv armi mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --cov-report=xml --cov-append --ignore=venv armi/tests/test_mpiFeatures.py || true mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --cov-report=xml --cov-append --ignore=venv armi/tests/test_mpiParameters.py || true mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --cov-report=xml --cov-append --ignore=venv armi/tests/test_mpiDirectoryChangers.py || true coverage combine --rcfile=pyproject.toml --keep -a coverage report --rcfile=pyproject.toml -i --skip-empty --skip-covered --sort=cover --fail-under=90 - name: Publish to codecov.io continue-on-error: true if: github.ref == 'refs/heads/main' uses: codecov/codecov-action@v5 with: fail_ci_if_error: false token: ${{ secrets.CODECOV_TOKEN }} ================================================ FILE: .github/workflows/docs.yaml ================================================ name: Documentation on: push: branches: - main pull_request: concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: build: # Building and deploying docs is broken on forked repos if: github.repository == 'terrapower/armi' runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - name: Setup Python uses: actions/setup-python@v5 with: python-version: 3.13 - name: Update package index run: sudo apt-get update - name: Install apt-get libs run: sudo apt-get -y install texlive-xetex=2021.20220204-1 texlive-latex-base=2021.20220204-1 texlive-fonts-recommended=2021.20220204-1 texlive-latex-extra=2021.20220204-1 texlive-full=2021.20220204-1 pandoc libopenmpi-dev - name: Setup Graphviz uses: ts-graphviz/setup-graphviz@v2.0.2 - name: Make html/pdf Docs continue-on-error: true env: GH_TOKEN: ${{ github.token }} PR_NUMBER: ${{ github.ref == 'refs/heads/main' && -1 || github.event.number }} GIT_COMMIT: ${{ github.sha }} run: | echo "Installing ARMI..." set -x pip install -U pip pip install -e .[memprof,mpi,test,docs] echo "Run unit tests..." pytest --junit-xml=test_results.xml -v -n 4 armi > pytest_verbose.log mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi1.xml armi/tests/test_mpiFeatures.py > pytest_verbose_mpi1.log mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi2.xml armi/tests/test_mpiParameters.py > pytest_verbose_mpi2.log mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi3.xml armi/utils/tests/test_directoryChangersMpi.py > pytest_verbose_mpi3.log python doc/.static/cleanup_test_results.py test_results.xml echo "Git magic so the SCR will build on GitHub Actions..." git fetch --depth=2000 echo "Build HTML docs..." cd doc git submodule init git submodule update make html echo "Build PDF docs..." make latex cd _build/latex/ latexmk -pdf -f -interaction=nonstopmode ARMI.tex - name: Deploy if: github.ref == 'refs/heads/main' uses: JamesIves/github-pages-deploy-action@v4.6.1 with: token: ${{ secrets.ACCESS_TOKEN }} repository-name: ${{ github.repository_owner }}/terrapower.github.io branch: main folder: doc/_build/html target-folder: armi - name: Archive HTML Docs if: github.ref != 'refs/heads/main' uses: actions/upload-artifact@v4 with: name: html-docs path: doc/_build/html retention-days: 5 - name: Archive PDF Docs uses: actions/upload-artifact@v4 with: name: pdf-docs path: doc/_build/latex/ARMI.pdf retention-days: 5 ================================================ FILE: .github/workflows/find_test_crumbs.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This script exists so we can determine if new tests in CI are leaving crumbs.""" import subprocess # A list of objects we expect during a run, and don't mind (like pycache dirs). IGNORED_OBJECTS = [ ".pytest_cache", ".tox", "__pycache__", "armi.egg-info", "logs/", ] def main(): # use "git clean" to find all non-tracked files proc = subprocess.Popen(["git", "clean", "-xnd"], stdout=subprocess.PIPE) lines = proc.communicate()[0].decode("utf-8").split("\n") # clean up the whitespace lines = [ln.strip() for ln in lines if len(ln.strip())] # ignore certain untracked object, like __pycache__ dirs for ignore in IGNORED_OBJECTS: lines = [ln for ln in lines if ignore not in ln] # fail hard if there are still untracked files if len(lines): for line in lines: print(line) raise ValueError("The workspace is dirty; the tests are leaving crumbs!") if __name__ == "__main__": main() ================================================ FILE: .github/workflows/licensechecker.yaml ================================================ name: Check License Lines permissions: contents: read on: [push] jobs: check-license-lines: runs-on: ubuntu-24.04 steps: - uses: actions/checkout@master - name: Check License Lines uses: kt3k/license_checker@v1.0.6 ================================================ FILE: .github/workflows/linting.yaml ================================================ name: Linting permissions: contents: read on: [push] jobs: build: runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 - name: Setup Python uses: actions/setup-python@v5 with: python-version: '3.13' - name: Run Linter run: | set -x pip install -e .[test] ruff format --check . ruff check . ================================================ FILE: .github/workflows/mac_tests.yaml ================================================ name: ARMI MacOS Tests permissions: contents: read on: push: branches: - main paths-ignore: - 'doc/**' pull_request: paths-ignore: - 'doc/**' concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: build: if: github.repository == 'terrapower/armi' runs-on: macos-14 steps: - uses: actions/checkout@v2 - name: Setup Python uses: actions/setup-python@v2 with: python-version: '3.11' - name: Upgrade PIP run: python -m pip install --upgrade pip - name: Run Unit Tests on MacOS run: | brew install openmpi pip install -e .[memprof,mpi,test] pytest -n 4 armi ================================================ FILE: .github/workflows/stale.yaml ================================================ # This workflow warns and then closes PRs that have had no activity for a specified amount of time. # # You can adjust the behavior by modifying this file. # For more information, see: https://github.com/actions/stale name: Mark Stale PRs on: schedule: # once a day at 3:14 AM - cron: '14 3 * * *' permissions: pull-requests: write jobs: stale: # This workflow is not designed to make sense on forks if: github.repository == 'terrapower/armi' runs-on: ubuntu-24.04 steps: - uses: actions/stale@v8 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-pr-message: "This pull request has been automatically marked as stale because it has not had any activity in the last 100 days. It will be closed in 7 days if no further activity occurs. Thank you for your contributions." stale-pr-label: "stale" days-before-pr-stale: 100 days-before-pr-close: 7 days-before-issue-stale: -1 operations-per-run: 100 ================================================ FILE: .github/workflows/unittests.yaml ================================================ name: ARMI unit tests permissions: contents: read on: push: paths-ignore: - 'doc/**' concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: build: runs-on: ubuntu-24.04 strategy: matrix: python: [3.9, '3.10', '3.11', '3.12', '3.13', '3.14'] steps: - uses: actions/checkout@v4 - name: Setup Python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Install mpi libs run: sudo apt-get -y install libopenmpi-dev - name: Run Tests run: | set -x pip install -e .[memprof,mpi,test] pytest -n 4 armi mpiexec -n 2 --use-hwthread-cpus pytest armi/tests/test_mpiFeatures.py mpiexec -n 2 --use-hwthread-cpus pytest armi/tests/test_mpiParameters.py mpiexec -n 2 --use-hwthread-cpus pytest armi/utils/tests/test_directoryChangersMpi.py ================================================ FILE: .github/workflows/validatemanifest.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Validating the package-data in the pyproject.toml. Validate that we aren't trying to include files that don't exist. """ import os from glob import glob import toml # CONSTANTS ARMI_DIR = "armi/" PRPROJECT = "pyproject.toml" def main(): # parse the data files out of the pyproject.toml txt = open(PRPROJECT, "r").read() data = toml.loads(txt) fileChunks = data["tool"]["setuptools"]["package-data"]["armi"] # loop through each line in the package-data and find all the file paths errors = [] for i, line in enumerate(fileChunks): # make sure the file exists path = ARMI_DIR + line.strip() if "*" in path: paths = [f for f in glob(path) if len(f) > 3] if not len(paths): errors.append((i, path)) else: if not os.path.exists(path): errors.append((i, path)) # If there were any missing files, raise an Error. if errors: for i, line in errors: print("Nonexistant file on line {}: {}".format(i, line)) raise ValueError("Package-data file is incorrect: includes non-existant files.") if __name__ == "__main__": main() ================================================ FILE: .github/workflows/validatemanifest.yaml ================================================ name: Validate Manifest permissions: contents: read on: [push] jobs: build: runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v2 - name: Setup Python uses: actions/setup-python@v2 with: python-version: '3.11' - name: Validate Manifest run: | pip install toml python .github/workflows/validatemanifest.py ================================================ FILE: .github/workflows/wheels.yaml ================================================ name: Build Wheel permissions: contents: read on: push: branches: - main jobs: build: if: github.repository == 'terrapower/armi' runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 - name: Setup Python uses: actions/setup-python@v5 with: python-version: "3.13" - name: Install PIP Packages run: | pip install -U pip pip install -e . pip install -U wheel - name: Build Wheels run: | mkdir dist pip wheel . -w dist/ chmod 664 dist/armi*.whl - name: Archive PIP wheel artifacts uses: actions/upload-artifact@v4 with: name: armi-wheels path: | dist/armi*.whl retention-days: 7 ================================================ FILE: .github/workflows/wintests.yaml ================================================ name: ARMI Windows tests permissions: contents: read on: push: branches: - main paths-ignore: - 'doc/**' pull_request: paths-ignore: - 'doc/**' concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: build: runs-on: windows-2022 steps: - uses: actions/checkout@v2 - name: Setup Python uses: actions/setup-python@v2 with: python-version: '3.11' - name: Upgrade PIP run: python -m pip install --upgrade pip - name: Run Unit Tests on Windows run: | pip install mpi4py==3.1.6 pip install -e .[memprof,mpi,test] pytest -n 4 armi - name: Find Test Crumbs run: python .github/workflows/find_test_crumbs.py ================================================ FILE: .gitignore ================================================ # No non-source python resources *.pyc *.pyd *.pyo *.pyx # No build artifacts *.aux *.dll *.fdb_latexmk *.fls *.lib armi/tests/tutorials/case-suite bdist*/ bin build coverage.lcov coverage.xml coverage_results.* dist*/ doc/.apidocs doc/_build doc/anl-afci-177 doc/gallery doc/gallery-src/framework/*.yaml doc/tutorials/anl-afci-177* doc/tutorials/case-suite doc/user/tutorials htmlcov/ monkeytype.* test_results.* wheelhouse # No workspace crumbs **/.coverage* **/__pycache__ **/logs/* *.ascii *.egg-info/ *.sublime-project *.sublime-workspace *.temp *~ .*.swp .cache/ .coverage .DS_Store .externalToolBuilders/ .hypothesis/ .idea/ .ipynb_checkpoints .metadata .mypy_cache/ .project .pydevproject .pytest_cache/ .ruff_cache/ .settings .tox .vim-bookmarks .vscode armi-venv/* dump-temp-* dump-tests* phabricator-lint.txt pytest_verbose.log pytestdebug.log python_details.log reportsOutputFiles/ system_info.log tags temp-* venv*/ # Ignore certain data files *.avi *.diff *.flux_bg *.flux_ufg *.h5 *.html *.mp4 *.nucdata *.out *.ppm *.sum *.txt *.vtd *.vtu *.xdmf *dlayxs* ================================================ FILE: .gitmodules ================================================ [submodule "doc/tutorials/armi-example-app"] path = doc/tutorials/armi-example-app url = https://github.com/terrapower/armi-example-app.git ================================================ FILE: .licenserc.json ================================================ { "**/*.py": "# Copyright " } ================================================ FILE: AUTHORS ================================================ # This is the list of ARMI's contributors. # # This may not list everyone who has ever contributed code, important ideas, or discussions to ARMI. But this is a good # faith attempt to give credit where it is due. TerraPower, LLC Aaron Reynolds (aaronjamesreynolds) Aidan McDonald (AidanMcDonald) Alex James (alexhjames) Antoine Margeride (amargeride) Arrielle Opotowsky (opotowsky) Ashley Thompson (Ashlita6) Bharat Medasani (mbk-tp) Brandon LaFleur (bdlafleur) Brian Sculac (bsculac) Casey Stocking (clstocking) Chris Keckler (keckler) Chris Wong (crswong888) Christen McKenzie (chris10mckenz) David Pham (dpham-materials) Drew Johnson (drewejohnson, drewj-tp, drewj-usnctech) Dustin Langewisch (dlangewisch) Evan Albright Graham Malmgren Hunter Smith (HunterPSmith) Jacob Hader (jakehader) James Marshall Jason Meng (jasonbmeng) Jeff Baylor (jeffbaylor) Jinan Yang (jyang-TP) John Stilley (john-science) Jonathon Shimwell (shimwell) Joshua Chen (joshuavictorchen) Kayla Clements (clemekay) Lim Swee Kiat (greentfrapp) Mark Onufer (onufer) Michael Castillo (kasticrunch, mcastillo10) Michael Huang (LMikeH) Michael Jarrett (mgjarrett) Michael Johnson (mikepjohnson) Mitch Young (youngmit) Nick Touran (ntouran, partofthething) Nicole Powell (nipowell) Paul Romano (paulromano) Peter McNabb Samual Miller (sammiller11235) Scott Yak (scottyak) Tian Jing (TianJingwd) Tommy Cisneros (sombrereau) Tony Alberti (albeanth) Virinder Sandhu (Nebbychadnezzar) Wyatt Scherer (wcscherer) Zachary Prince (zachmprince) ================================================ FILE: CONTRIBUTING.md ================================================ # Contribution License Agreement For information on how to contribute to ARMI, see [our official documentation](https://terrapower.github.io/armi/developer/first_time_contributors.html). This Contribution License Agreement (**"Agreement"**) is agreed to by the party signing below (**"You"**), and conveys certain license rights to TerraPower, LLC and its affiliates (**"TerraPower"**) for Your contributions to TerraPower open source projects. This Agreement is effective as of the latest signature date below. ## 1. Definitions. **"Code"** means the computer software code, whether in human-readable or machine-executable form, that is delivered by You to TerraPower under this Agreement. **"Project"** means any of the projects owned or managed by TerraPower in which software is offered under a license approved by the Open Source Initiative (OSI) ([www.opensource.org](http://www.opensource.org)) and documentation offered under an OSI or a Creative Commons license (https://creativecommons.org/licenses). **"Submit"** is the act of uploading, submitting, transmitting, or distributing code or other content to any Project, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Project for the purpose of discussing and improving that Project, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Submission." **"Submission"** means the Code and any other copyrightable material Submitted by You, including any associated comments and documentation. ## 2. Your Submission. You must agree to the terms of this Agreement before making a Submission to any Project. This Agreement covers any and all Submissions that You, now or in the future (except as described in Section 4 below), Submit to any Project. ## 3. Originality of Work. You represent that each of Your Submissions is entirely Your original work. Should You wish to Submit materials that are not Your original work, You may Submit them separately to the Project if You (a) retain all copyright and license information that was in the materials as You received them, (b) in the description accompanying Your Submission, include the phrase "Submission containing materials of a third party:" followed by the names of the third party and any licenses or other restrictions of which You are aware, and (c) follow any other instructions in the Project’s written guidelines concerning Submissions. ## 4. Your Employer. References to "employer" in this Agreement include Your employer or anyone else for whom You are acting in making Your Submission, e.g. as a contractor, vendor, or agent. If Your Submission is made in the course of Your work for an employer or Your employer has intellectual property rights in Your Submission by contract or applicable law, You must secure permission from Your employer to make the Submission before signing this Agreement. In that case, the term "You" in this Agreement will refer to You and the employer collectively. If You change employers in the future and desire to Submit additional Submissions for the new employer, then You agree to sign a new Agreement and secure permission from the new employer before Submitting those Submissions. ## 5. Licenses. ### a. Copyright License. You grant TerraPower, and those who receive the Submission directly or indirectly from TerraPower, a perpetual, worldwide, non-exclusive, royalty-free, irrevocable license in the Submission to reproduce, prepare derivative works of, publicly display, publicly perform, and distribute the Submission and such derivative works, and to sublicense any or all of the foregoing rights to third parties. ### b. Patent License. You grant TerraPower, and those who receive the Submission directly or indirectly from TerraPower, a perpetual, worldwide, non-exclusive, royalty-free, irrevocable license under Your patent claims that are necessarily infringed by the Submission or the combination of the Submission with the Project to which it was Submitted to make, have made, use, offer to sell, sell and import or otherwise dispose of the Submission alone or with the Project. ### c. Other Rights Reserved. Each party reserves all rights not expressly granted in this Agreement. No additional licenses or rights whatsoever (including, without limitation, any implied licenses) are granted by implication, exhaustion, estoppel or otherwise. ## 6. Representations and Warranties. You represent that You are legally entitled to grant the above licenses. You represent that each of Your Submissions is entirely Your original work (except as You may have disclosed under Section 3). You represent that You have secured permission from Your employer to make the Submission in cases where Your Submission is made in the course of Your work for Your employer or Your employer has intellectual property rights in Your Submission by contract or applicable law. If You are signing this Agreement on behalf of Your employer, You represent and warrant that You have the necessary authority to bind the listed employer to the obligations contained in this Agreement. You are not expected to provide support for Your Submission, unless You choose to do so. UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING, AND EXCEPT FOR THE WARRANTIES EXPRESSLY STATED IN SECTIONS 3, 4, AND 6, THE SUBMISSION PROVIDED UNDER THIS AGREEMENT IS PROVIDED WITHOUT WARRANTY OF ANY KIND, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTY OF NONINFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. ## 7. Notice to TerraPower. You agree to notify TerraPower in writing of any facts or circumstances of which You later become aware that would make Your representations in this Agreement inaccurate in any respect. ## 8. Information about Submissions. You agree that contributions to Projects and information about contributions may be maintained indefinitely and disclosed publicly, including Your name and other information that You submit with Your Submission. ## 9. Governing Law/Jurisdiction. This Agreement is governed by the laws of the State of Washington, USA and the parties consent to exclusive jurisdiction and venue in the federal courts located in King County, Washington, USA unless no federal subject matter jurisdiction exists, in which case the parties consent to exclusive jurisdiction and venue in the Superior Court of King County, Washington, USA. The parties waive all defenses of lack of personal jurisdiction and forum non-conveniens. ## 10. Entire Agreement/Assignment. This Agreement is the entire agreement between the parties, and supersedes any and all prior agreements, understandings or communications, written or oral, between the parties relating to the subject matter hereof. This Agreement may be assigned by TerraPower. Please select one of the options below and sign as indicated. By signing, You accept and agree to the terms of this Contribution License Agreement for Your present and future Submissions to TerraPower. ================================================ FILE: LICENSE.md ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2020 TerraPower, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.rst ================================================ |Build Status| |Code Coverage| |Commit Activity| |Good First Issues| ################# ARMI Introduction ################# The Advanced Reactor Modeling Interface (ARMI\ :sup:`®`) is an open-source tool that streamlines your nuclear reactor design/analysis needs by providing a software *reactor at your fingertips* and a rich ecosystem of utilities working in concert. It is made for and by professional reactor analysis teams and is maintained by `TerraPower LLC `_, a nuclear technology development company. ARMI: * Provides a hub-and-spoke mechanism to standardize communication and coupling between physics kernels and the specialist analysts who use them, * Facilitates the creation and execution of detailed models and complex analysis methodologies, * Provides an ecosystem within which to rapidly and collaboratively build new analysis and physics simulation capabilities, and * Provides useful utilities to assist in reactor development. A few demos of ARMI can be seen in the `ARMI example gallery `_. Using ARMI plus a collection of ARMI-aware physics plugins, an engineering team can perform a full analysis of a reactor system and then repeat the same level of analysis with some changed input parameters for almost no additional cost. Even better, thousands of perturbed cases can be executed in parallel on large clusters, helping conceptual design teams home in on an optimal design. Or design teams can analyze sensitivities all the way from, for example, an impurity in a control material to the peak structural temperature in a design-basis transient. .. note:: ARMI does not come with a full selection of physics kernels. They will need to be acquired or developed for your specific project in order to make full use of this tool. Many of the example use-cases discussed in this manual require functionality that is not included in the open-source ARMI Framework. In general, ARMI aims to enhance the quality, ease, and rigor of computational nuclear reactor design and analysis. Additional high-level overview about this system can be found in [#touranarmi]_. .. list-table:: Quick links :widths: 30 70 * - Source code - https://github.com/terrapower/armi * - Documentation - https://terrapower.github.io/armi * - First time contributor's guide - https://terrapower.github.io/armi/developer/first_time_contributors.html * - Bug tracker - https://github.com/terrapower/armi/issues * - Plugin directory - https://github.com/terrapower/armi-plugin-directory * - Contact - armi-devs@terrapower.com Quick start *********** Before starting, you need to have `Python `_ 3.9+. Get the ARMI code, install the prerequisites, and fire up the launcher with the following commands. You probably want to do this in a virtual environment as described in the `Installation documentation `_. Otherwise, the dependencies could conflict with your system dependencies. First, upgrade your version of pip:: $ pip install -U pip>=22.1 Now clone and install ARMI:: $ git clone https://github.com/terrapower/armi $ cd armi $ pip install -e . $ armi --help The ARMI tests are meant to be run using `pytest `_ locally:: $ pip install -e ".[test]" $ pytest -n 4 armi From here, we recommend going through a few of our `gallery examples `_ and `tutorials `_ to start touring the features and capabilities and then move on to the `User Manual `_. Background ********** Nuclear reactor design requires, among other things, answers to the following questions: * Where are the neutrons? How fast are they moving? In which direction? * How quickly are atomic nuclei splitting? How long until the fuel runs out? How many atoms in the structure are being energetically displaced? * How much heat do these reactions produce? How quickly must coolant flow past the fuel to maintain appropriate temperatures? What are the temperatures of the fuel, coolant, and structure? * Can the structural arrangement support itself given the temperatures and pressures induced by the flowing coolant? For how long? * If a pump loses power or a control rod accidentally withdraws, how quickly will the chain reaction stop while keeping radiation contained? * How much used nuclear fuel is generated per useful energy produced? How long until it decays to stability? * Where and when should we move the fuel to most economically maintain the chain reaction? * What's the dose and activation above the head and in the secondary loop? * How does containment handle various postulated accidents? * How does the building handle earthquakes? Digital computers have assisted in nuclear technology development since the days of the ENIAC in the 1940s. We now understand reactor physics well enough to build detailed simulations, which can answer many of these design questions in a cost-effective, and flexible manner. This allows us to simulate all kinds of different reactors with different fuels, coolants, moderators, power levels, safety systems, and power cycles. We can run our virtual reactors through the decades, tossing various off-normal conditions at them now and then, to see how they perform in terms of capability, economics, and safety. Perhaps surprisingly, some nuclear software written in the 1960s is still in use today. These codes are validated against physical experiments that no longer exist. Meanwhile, new cutting-edge nuclear software is being developed for todays powerful computers. Both old and new, these tools are often challenging to operate and coordinate to produce a full reactor analysis. The ARMI approach was born out of this situation: how can we best leverage an eclectic mix of legacy and modern tools with a small team to do full-scope analysis? We built a framework that lets us automate the tedious, uncoupled, and error-prone parts of reactor engineering/analysis work. We can turn around a very meaningful and detailed core analysis given a major change (e.g. change power by 50%) in just a few weeks. We can dispatch hundreds of parameter sweeps to multiple machines and then perform multi-objective optimization on the resulting design space. The ARMI system is largely written in the Python programming language. Its high-level nature allows nuclear and mechanical engineers to rapidly automate their analysis tasks from their sub-specialties. This helps eliminate the translation step between computer-scientists and power plant design engineers. This allows good division of labor: the computer scientists can focus on the overall performance and maintainability of the framework, while the power plant engineers focus on power plant engineering. We have spent over 10 years developing this system. Because of ARMI's high-level nature, we believe we can collaborate effectively with all ongoing reactor software developments. Communication and coupling ************************** ARMI provides a central place for all physics kernels to interact: the Reactor Model. All modules read *state* information from this Reactor and write their output to it. This common interface allows seamless communication and coupling between different physics sub-specialties. If you plug one new physics kernel into ARMI, it becomes coupled to N other kernels. The ARMI Framework, depicted in green below, is the majority of the open source package. Several skeletal analysis routines are included as well to perform basic data management and to help align efforts on external physics kernels. .. figure:: https://terrapower.github.io/armi/_static/armiSchematicView.png :figclass: align-center **Figure 1.** The schematic representation of the ARMI data model. Automation ********** ARMI can quickly and easily produce complex input files with high levels of detail in various approximations. This enables users to perform rapid high-fidelity analyses to make sure all important physics are captured. It also enables sensitivity studies of different modeling approximations (e.g. symmetries, transport vs. diffusion vs. Monte Carlo, subchannel vs. CFD, etc.). .. figure:: https://terrapower.github.io/armi/_static/armiGeometries.png :figclass: align-center **Figure 2.** A variety of approximations in hexagonal geometry (1/3-core, full core, pin detailed, etc.) are shown, all derived from one consistent input file. ARMI supports Cartesian, Hex, RZ, and RZTheta geometric grids and includes many geometric components. Additionally, users can provide custom geometric elements. New analysis and physics capabilities ************************************* The ARMI reactor model is fully accessible via a Python-based API, meaning that power-users and developers have full access to the details of the plant at all times. Developers adding new physics features can take advantage of the ARMI data management structure by simply reading and writing to the Reactor state. Leveraging the infrastructure of ARMI, progress can be made rapidly. Power-user analysts can modify the plant in many ways. For instance, removing all sodium coolant is a one-liner:: core.setNumberDensity('NA23',0.0) and finding the peak power density is easy:: core.getMaxParam('pdens') Any ARMI state can be written out to whichever format the user desires, meaning that nominally identical cases can be produced for multiple similar codes in sensitivity studies. To read power densities, simply read them off the assembly objects. Instead of producing spreadsheets and making plots manually, analysts may write scripts to generate output reports that run automatically. Writing a module within ARMI automatically features access to the ARMI API, including: * Cross section processing * Material properties * Thermal expansion * Database persistence * Data visualization * A code testing, documentation, and version control system Use cases ********* Given an input describing a reactor, a typical ARMI run loops over a set of plugins in a certain sequence. Some plugins trigger third-party simulation codes, producing input files for them, executing them, and translating the output back onto the reactor model. Other plugins perform physics simulations directly. For example, one ARMI sequence may involve the calculation of: * nuclear cross sections * global flux and power * subchannel temperatures * duct wall pressures * cladding strain and wastage * fission gas pressure * reactivity feedbacks * flow orificing * the equilibrium fuel cycle * control rod worth * shutdown margin * frequency stability margins * peak cladding temperature * transient analysis * total levelized cost of electricity for the run Another ARMI sequence may simply compute the cost of feed uranium and enrichment in an initial core and quit. Larger siumulations may also run through the multi-objective design optimization system, which runs many cases with input perturbations to help find the best overall system, considering all important physics at the same time. Other interest may come from the following: The Research Scientist ====================== A nuclear reactor research scientist, at a national lab or university, may benefit from ARMI. An ARMI workflow can reduce the time spent on data management. ARMI can handle the tedium so that researchers can better focus on designing and testing their research. For example, if an ARMI input file describing the FFTF reactor is provided, the researcher can start running benchmark cases with their new code method very rapidly, rather than spending the time building their own FFTF model. If someone wants to try varying nuclear cross sections by a percent here and there to compute sensitivities, ARMI is a perfect platform upon which to operate. If a reactor designer wants to try out a new Machine Learning algorithm for fuel management, plugging it into ARMI and having it run on all the physics kernels of the ARMI ecosystem will be a great way to prove its true value (note that this requires a rich ARMI physics ecosystem). The Nuclear Startup Engineer ============================ As various companies evaluate their ideas, they need tools for analysis. They can pick up ARMI and save 10 years of development and hit the ground running by plugging in their design-specific physics kernels and proprietary design inputs. ARMI's parameter sweep features, reactor model, and parallel utilities will all come in handy immediately. Operating and Vendor Engineers ============================== People at well-established utilities or vendors can hook ARMI into their legacy systems and increase their overall productivity. The Enthusiast ============== If an enthusiast wants to try out a reactor idea they have, they can use ARMI (plus some physics kernels) to quickly get some performance metrics. They can see if their idea has wings, and if it does, they can then find a way to bring it to engineering and commercial reality. History of ARMI *************** ARMI was originally created by TerraPower, LLC near Seattle WA starting in 2009. Its founding mission was to determine the optimal fuel management operations required to transition a fresh Traveling Wave Reactor core from startup into an equilibrium state. It started out automating the Argonne National Lab (ANL) fast reactor neutronics codes, MC2 and REBUS. The reactor model design was made with the intention of adding other physics capabilities later. Soon, simple thermal hydraulics were added and it's grown ever since. It has continuously evolved towards a general reactor analysis framework. Following requests by outside parties to use ARMI, we started working on a more modular architecture for ARMI, allowing some of the intertwined physics capabilities to be separated out as plugins from the standalone framework. The nuclear industry is small, and it faces many challenges. It also has a tradition of secrecy. As a result, there is risk of overlapping work being done by other entities. We hypothesize that collaborating on software systems can help align some efforts worldwide, increasing quality and efficiency. In reactor development, the idea is generally cheap. It's the shakedown, technology and supply chain development, engineering demo, and commercial demo that are the hard parts. Thus, ARMI was released under an open-source license in 2019 to facilitate mutually beneficial collaboration across the nuclear industry, where many teams are independently developing similar reactor analysis/automation frameworks. We also hope that if more people can rapidly analyze the performance of their reactor ideas, limited available funding can be spent more effectively. System Requirements ******************* Being largely written in the Python programming language, the ARMI system works on most platforms. It can perform meaningful analysis on a single laptop, but the full value of design optimization and large problems is realized with parallel runs over large clusters (using the optional ``mpi4py`` library). .. _getting-help: Getting Help ************ You can get help with ARMI by either making issues on our `github page `_ or by e-mailing armi-devs@terrapower.com. Disclaimers *********** Due to TerraPower goals and priorities, many ARMI modules were developed with the sodium-cooled fast reactors as a target, and are not necessarily yet optimized for other plants. This is a known issue with code organization and we are working on it. On the other hand, the framework is sufficiently general that people have modeled other reactor types with ARMI, including thermal reactors. ARMI was developed within a rapidly changing R&D environment. It evolved accordingly, and naturally carries some legacy. We continuously attempt to identify and update problematic parts of the code. Users should understand that ARMI is not a polished consumer software product, but rather a powerful and flexible engineering tool. It has the potential to accelerate work on many kinds of reactors. ARMI has been written to support specific engineering/design tasks. As such, polish in the GUIs and output is somewhat lacking. The ARMI framework uses the ``camelCase`` style, which is not the standard style for Python. As this is an issue of style, it is not considered worth the API-breaking cost to our downstream users to change it. License ******* TerraPower and ARMI are registered trademarks of TerraPower, LLC. Other trademarks and registered trademarks used in this Manual are the property of the respective trademark holders. The ARMI system is licensed as follows: .. code-block:: none Copyright 2009 TerraPower, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Be careful when including any dependency in ARMI (say in the ``pyproject.toml`` file) not to include anything with a license that supersedes our Apache license. For instance, any third-party Python library included in ARMI with a GPL license will make the whole project fall under the GPL license. But a lot of potential users of ARMI will want to keep some of their work private, so we can't allow any GPL dependencies. For that reason, it is generally considered best-practice in the ARMI ecosystem to only use third-party Python libraries that have MIT or BSD licenses. .. [#touranarmi] Touran, Nicholas W., et al. "Computational tools for the integrated design of advanced nuclear reactors." Engineering 3.4 (2017): 518-526. https://doi.org/10.1016/J.ENG.2017.04.016 .. |Build Status| image:: https://github.com/terrapower/armi/actions/workflows/unittests.yaml/badge.svg?branch=main :target: https://github.com/terrapower/armi/actions/workflows/unittests.yaml .. |Code Coverage| image:: https://codecov.io/gh/terrapower/armi/branch/main/graph/badge.svg :target: https://app.codecov.io/gh/terrapower/armi/tree/main .. |Commit Activity| image:: https://img.shields.io/github/commit-activity/m/terrapower/armi :target: https://github.com/terrapower/armi/pulse .. |Good First Issues| image:: https://img.shields.io/github/issues/terrapower/armi/good%20first%20issue :target: https://github.com/terrapower/armi/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 ================================================ FILE: armi/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Welcome to the Advanced Reactor Modeling Interface (ARMI). This module initializes the ARMI platform. The bootstrap process is broken into several phases: * Import fundamental dependencies in Python library and some third-party libs * Investigate environment: Check Python version, code version, MPI situation, and TTY/GUI/interactivity, * Set up temp dirs * Set up printout table formats (in preparation of logging info) * Initialize all possible nuclide objects in the nuclide directory * Discover and register available built-in :py:mod:`plugins ` (custom ones are registered after inputs) * Discover and define all potential configuration settings from available plugins * Read input files * Update :py:mod:`nuclide directory ` with depletion info based on config * Discover and define all state :py:mod:`Parameters ` on data model (maybe dependent on config) * Discover :py:mod:`Entry points ` from plugins * Choose entry point based on user command If using the ``run`` entry point, additional work is done: * Build :py:mod:`reactor model ` based on input * Build :py:mod:`operator object ` with specific calculation loop * Build ordered interface stack based on configuration settings * Begin looping over interface stack, operating upon data model according to operator design * Loop until complete * Wrap up * Quit """ # ruff: noqa: F401 import atexit import datetime import importlib import os import signal import subprocess import sys import traceback import warnings from typing import List, Optional, Type import __main__ as main # The _bootstrap module performs operations that may need to occur before it is necessarily safe to import the rest of # the ARMI system. Things like: # - configure the MPI environment # - detect the nature of interaction with the user (terminal UI, GUI, unsupervized, etc) # - Initialize the nuclide database import armi._bootstrap from armi import apps, cli, context, pluginManager, plugins, runLog from armi.context import ( APP_DATA, CURRENT_MODE, DOC, MPI_COMM, MPI_DISTRIBUTABLE, MPI_NODENAME, MPI_NODENAMES, MPI_RANK, MPI_SIZE, RES, ROOT, START_TIME, USER, Mode, ) from armi.meta import __version__ from armi.nucDirectory import nuclideBases from armi.reactor import flags, parameters # ARMI does not configure its own application by default. This is mostly to catch issues involving calling code that # requires the framework to be configured before that has explicitly taken place. An application should call # `configure()` with its App class in order for ARMI to work properly _app: Optional[apps.App] = None _ARMI_CONFIGURE_CONTEXT: Optional[str] = None # Advanced flag used in documentation builds to avoid isConfigured guards. _ignoreConfigures = False def disableFutureConfigures(): """Exposed function to ensure armi.configure() isn't called more than once.""" global _ignoreConfigures _ignoreConfigures = True def isStableReleaseVersion(version=None): """Determine if the version should be considered a stable release.""" version = version or __version__ return "-" not in version def init(fName=None, cs=None, skipInspection=False, choice=None): """ Scan a directory for armi inputs and load one to interact with. .. impl:: Settings are used to define an ARMI run. :id: I_ARMI_SETTING1 :implements: R_ARMI_SETTING This method initializes an ARMI run, and if successful returns an Operator. That operator is designed to drive the reactor simulation through time steps to simulate its operation. This method takes in a settings file or object to initialize the operator. Whether a settings file or object is supplied, the operator will be built based on the those settings. Because the total collection of settings can be modified by developers of ARMI applications, providing these settings allow ARMI end-users to granularly define their simulations. Parameters ---------- fName : str, optional The path to a settings file to load: my_case.yaml cs : Settings, optional If supplied, this CS object will supersede the other case input methods and use the object directly. skipInspection : bool, optional Whether or not the inputs should be checked for valid settings. Default is False. choice : int, optional Automatically run with this item out of the menu that would be produced by the existing YAML files. Examples -------- >>> o = armi.init() """ from armi import cases, settings if cs is None: if fName is None: fName = settings.promptForSettingsFile(choice) cs = settings.Settings(fName) armiCase = cases.Case(cs=cs) if not skipInspection: armiCase.checkInputs() try: return armiCase.initializeOperator() except: # Catch any and all errors. Naked exception on purpose. # Concatenate errors to the primary log file. runLog.close() raise def getDefaultPlugins() -> List[Type[plugins.ArmiPlugin]]: """ Return a list containing the default set of ARMI Framework plugins. This is useful for an application to fold all of the ARMI Framework's capabilities into its own set of plugins. """ from armi import bookkeeping, cli, reactor from armi.physics import fuelCycle, neutronics, safety defaultPlugins = [ cli.EntryPointsPlugin, bookkeeping.BookkeepingPlugin, fuelCycle.FuelHandlerPlugin, neutronics.NeutronicsPlugin, safety.SafetyPlugin, reactor.ReactorPlugin, ] return defaultPlugins def getDefaultPluginManager() -> pluginManager.ArmiPluginManager: """ Return a plugin manager containing the default set of ARMI Framework plugins. This is useful when using standalone facilities of ARMI without a specific application. """ pm = plugins.getNewPluginManager() for plugin in getDefaultPlugins(): pm.register(plugin) return pm def isConfigured(): """Returns whether ARMI has been configured with an App.""" return _app is not None def getPluginManager() -> Optional[pluginManager.ArmiPluginManager]: """Return the plugin manager, if there is one.""" global _app if _app is None: return None return _app.pluginManager def getPluginManagerOrFail() -> pluginManager.ArmiPluginManager: """Return the plugin manager. Raise an error if there is none.""" global _app assert _app is not None, ( "The ARMI plugin manager was requested, no App has been configured. Ensure that `armi.configure()` has been " "called before attempting to interact with the plugin manager." ) return _app.pluginManager def getApp() -> Optional[apps.App]: global _app return _app def _cleanupOnCancel(signum, _frame): """Helper function to clean up upon cancellation.""" print(f"Caught Cancel signal ({signum}); cleaning temporary files and exiting...", file=sys.stderr) context.cleanFastPathAfterSimulation() sys.stdout.flush() sys.stderr.flush() sys.exit(1) # since we're handling the signal we have to cancel def _liveInterpreter(): """Return whether we are running within a live/interactive python interpreter.""" return not hasattr(main, "__file__") def configure(app: Optional[apps.App] = None, permissive=False): """ Set the plugin manager for the Framework and configure internals to those plugins. Parameters ---------- app : An :py:class:`armi.apps.App` instance with which the framework is to be configured. If it is not provided, then the default ARMI App will be used. permissive : Whether or not an error should be produced if ``configure`` is called more than once. This should only be set to ``True`` under testing or demonstration purposes, where the contents of otherwise independent scripts need to be run under the same python instance. Important --------- Since this affects the behavior of several modules at their import time, it is generally not safe to re-configure the ARMI framework once it has been configured. Therefore this will raise an ``RuntimeError`` if such a re-configuration is attempted, unless ``permissive`` is set to ``True``. Notes ----- We are planning on encapsulating much of the global ARMI state that gets configured with an App into the App object itself (with some other things going into the Case object). This will provide a number of benefits, the main one being that it will become trivial to re-configure the framework, which is currently not possible. """ global _app global _ARMI_CONFIGURE_CONTEXT if _ignoreConfigures: return app = app or apps.App() if _app is not None: if permissive and isinstance(app, apps.App): return else: raise RuntimeError( f"Multiple calls to armi.configure() are not allowed. Previous call from:\n{_ARMI_CONFIGURE_CONTEXT}" ) assert not context.BLUEPRINTS_IMPORTED, ( "ARMI can no longer be configured after blueprints have been imported. Blueprints were imported from" f":\n{context.BLUEPRINTS_IMPORT_CONTEXT}" ) _ARMI_CONFIGURE_CONTEXT = "".join(traceback.format_stack()) _app = app context.APP_NAME = app.name if _liveInterpreter(): runLog.LOG.startLog(name=f"interactive-{app.name}") cli.splash() pm = app.pluginManager parameters.collectPluginParameters(pm) parameters.applyAllParameters() _app.registerPluginFlags() def applyAsyncioWindowsWorkaround() -> None: """ Apply Asyncio workaround for Windows and Python 3.8. This prevents a NotImplementedError on Windows with Python 3.8 his error showed up during jupyter notebook built- tests and documentation. See https://bugs.python.org/issue37373 """ import asyncio if sys.version_info[0] == 3 and sys.version_info[1] >= 8 and sys.platform.startswith("win"): asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) applyAsyncioWindowsWorkaround() # The ``atexit`` handler is like putting it in a finally after everything. atexit.register(context.cleanFastPathAfterSimulation) # register cleanups upon HPC cancellations. Linux clusters will send a different signal. SIGBREAK doesn't exist on # non-windows This actually doesn't work in mpi runs because MSMPI's mpiexec does not pass signals. if os.name == "nt": signal.signal(signal.SIGBREAK, _cleanupOnCancel) signal.signal(signal.SIGINT, _cleanupOnCancel) ================================================ FILE: armi/__main__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Primary entry point into ARMI. There are a variety of entry points in the ``cli`` package that define the various run options. This invokes them according to command-line user input. """ import sys import traceback from armi import apps, configure, context, isConfigured, runLog from armi.cli import ArmiCLI def main(): # Main entry point into ARMI try: if not isConfigured(): configure(apps.App()) code = ArmiCLI().run() # sys exit interprets None as 0 sys.exit(code) except Exception: # Make sure not to catch all BaseExceptions, lest we catch the expected SystemExit exception runLog.error( f"Unhandled exception in __main__, rank {context.MPI_RANK} on {context.MPI_NODENAME}.", file=sys.__stderr__, ) runLog.error(traceback.format_exc(), file=sys.__stderr__) if context.MPI_SIZE > 1: runLog.error( f"Killing all MPI tasks from __main__, rank {context.MPI_RANK}.", file=sys.__stderr__, ) # cleanFastPathAfterSimulation has @atexit.register so it should be called at the end, but mpi. Abort # in main will not allow for @atexit.register or except/finally code to be called so # calling here as well context.cleanFastPathAfterSimulation() # .Abort will not allow for @atexit.register or except/finally code to be called context.MPI_COMM.Abort(errorcode=-1) raise SystemExit(1) if __name__ == "__main__": main() ================================================ FILE: armi/_bootstrap.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code that needs to be executed before most ARMI components are safe to import.""" from armi.nucDirectory import nuclideBases # noqa: E402 # Nuclide bases get built explicitly here to have better determinism # about when they get instantiated. The burn chain is not applied # at this point, but only after input is read. Nuclides need to be built super early # because some import-time code needs them to function. Namely, Block parameter # collection uses them to create number density params. nuclideBases.factory() ================================================ FILE: armi/apps.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The base ARMI App class. This module defines the :py:class:`App` class, which is used to configure the ARMI Framework for a specific application. An ``App`` implements a simple interface for customizing much of the Framework's behavior. """ # ruff: noqa: E402 import collections import importlib import sys from typing import Dict, List, Optional, Tuple from armi import context, meta, pluginManager, plugins, settings from armi.reactor import parameters from armi.reactor.flags import Flags from armi.settings import Setting, fwSettings class App: """ The highest-level of abstraction for defining what happens during an ARMI run. .. impl:: An App has a plugin manager. :id: I_ARMI_APP_PLUGINS :implements: R_ARMI_APP_PLUGINS The App class is intended to be subclassed in order to customize the functionality and look-and-feel of the ARMI Framework for a specific use case. An App contains a plugin manager, which should be populated in ``__init__()`` with a collection of plugins that are deemed suitable for a given application, as well as other methods which provide further customization. The base App class is also a good place to expose some more convenient ways to get data out of the Plugin API; calling the ``pluggy`` hooks directly can sometimes be a pain, as the results returned by the individual plugins may need to be merged and/or checked for errors. Adding that logic here reduces boilerplate throughout the rest of the code. """ name = "armi" """ The program name of the app. This should be the actual name of the python entry point that loads the app, or the name of the module that contains the appropriate __main__ function. For example, if the app is expected to be invoked with ``python -m myapp``, ``name`` should be ``"myapp"`` """ def __init__(self): """ This mostly initializes the default plugin manager. Subclasses are free to adopt this plugin manager and register more plugins of their own, or to throw it away and start from scratch if they do not wish to use the default Framework plugins. For a description of the things that an ARMI plugin can do, see the :py:mod:`armi.plugins` module. """ self._pluginFlagsRegistered: bool = False self._pm: Optional[pluginManager.ArmiPluginManager] = None self._paramRenames: Optional[Tuple[Dict[str, str], int]] = None self.__initNewPlugins() def __initNewPlugins(self): from armi import bookkeeping, cli, reactor from armi.physics import ( fuelCycle, fuelPerformance, neutronics, safety, thermalHydraulics, ) self._pm = plugins.getNewPluginManager() for plugin in ( cli.EntryPointsPlugin, bookkeeping.BookkeepingPlugin, fuelCycle.FuelHandlerPlugin, fuelPerformance.FuelPerformancePlugin, neutronics.NeutronicsPlugin, safety.SafetyPlugin, thermalHydraulics.ThermalHydraulicsPlugin, reactor.ReactorPlugin, ): self._pm.register(plugin) self._paramRenames = None @property def version(self) -> str: """Grab the version of this app (defaults to ARMI version). Notes ----- This is designed to be over-ridable by Application developers. """ return meta.__version__ @property def pluginManager(self) -> pluginManager.ArmiPluginManager: """Return the App's PluginManager.""" return self._pm def getSettings(self) -> Dict[str, Setting]: """Return a dictionary containing all Settings defined by the framework and all plugins.""" # Start with framework settings settingDefs = {setting.name: setting for setting in fwSettings.getFrameworkSettings()} # The optionsCache stores options that may have come from a plugin before the setting to # which they apply. Whenever a new setting is added, we check to see if there are any # options in the cache, popping them out and adding them to the setting. If all plugins' # settings have been processed and the cache is not empty, that's an error, because a plugin # must have provided options to a setting that doesn't exist. optionsCache: Dict[str, List[settings.Option]] = collections.defaultdict(list) defaultsCache: Dict[str, settings.Default] = {} for pluginSettings in self._pm.hook.defineSettings(): for pluginSetting in pluginSettings: if isinstance(pluginSetting, settings.Setting): name = pluginSetting.name if name in settingDefs: raise ValueError(f"The setting {pluginSetting.name} already exists and cannot be redefined.") settingDefs[name] = pluginSetting # handle when new setting has modifier in the cache (modifier loaded first) if name in optionsCache: settingDefs[name].addOptions(optionsCache.pop(name)) if name in defaultsCache: settingDefs[name].changeDefault(defaultsCache.pop(name)) elif isinstance(pluginSetting, settings.Option): if pluginSetting.settingName in settingDefs: # modifier loaded after setting, so just apply it (no cache needed) settingDefs[pluginSetting.settingName].addOption(pluginSetting) else: # no setting yet, cache it and apply when it arrives optionsCache[pluginSetting.settingName].append(pluginSetting) elif isinstance(pluginSetting, settings.Default): if pluginSetting.settingName in settingDefs: # modifier loaded after setting, so just apply it (no cache needed) settingDefs[pluginSetting.settingName].changeDefault(pluginSetting) else: # no setting yet, cache it and apply when it arrives defaultsCache[pluginSetting.settingName] = pluginSetting else: raise TypeError( "Invalid setting definition found: {} ({})".format(pluginSetting, type(pluginSetting)) ) if optionsCache: raise ValueError( "The following options were provided for settings that do " "not exist. Make sure that the set of active plugins is " "consistent.\n{}".format(optionsCache) ) if defaultsCache: raise ValueError( "The following defaults were provided for settings that do " "not exist. Make sure that the set of active plugins is " "consistent.\n{}".format(defaultsCache) ) return settingDefs def getParamRenames(self) -> Dict[str, str]: """ Return the parameter renames from all registered plugins. This renders a merged dictionary containing all parameter renames from all of the registered plugins. It also performs simple error checking. The result of this operation is cached, since it is somewhat expensive to perform. If the App detects that its plugin manager's set of registered plugins has changed, the cache will be invalidated and recomputed. """ cacheInvalid = False if self._paramRenames is not None: renames, counter = self._paramRenames if counter != self._pm.counter: cacheInvalid = True else: cacheInvalid = True if cacheInvalid: currentNames = {pd.name for pd in parameters.ALL_DEFINITIONS} renames = dict() for pluginRenames in self._pm.hook.defineParameterRenames(): collisions = currentNames & pluginRenames.keys() if collisions: raise plugins.PluginError( "The following parameter renames from a plugin collide with " "currently-defined parameters:\n{}".format(collisions) ) pluginCollisions = renames.keys() & pluginRenames.keys() if pluginCollisions: raise plugins.PluginError( "The following parameter renames are already defined by another plugin:\n{}".format( pluginCollisions ) ) renames.update(pluginRenames) self._paramRenames = renames, self._pm.counter return renames def registerPluginFlags(self): """ Apply flags specified in the passed ``PluginManager`` to the ``Flags`` class. See Also -------- armi.plugins.ArmiPlugin.defineFlags """ if self._pluginFlagsRegistered: raise RuntimeError("Plugin flags have already been registered. Cannot do it twice!") for pluginFlags in self._pm.hook.defineFlags(): Flags.extend(pluginFlags) self._pluginFlagsRegistered = True def registerUserPlugins(self, pluginPaths): r""" Register additional plugins passed in by importable paths. These plugins may be provided e.g. by an application during startup based on user input. Format expected to be a list of full namespaces to plugin classes. There should be a comma between individual plugins and dots representing the file path or importable python namespace. Examples -------- importable namespace: ``armi.stuff.plugindir.pluginMod.pluginCls,armi.whatever.plugMod2.plugCls2`` or on Linux/Unix: ``/path/to/pluginMod.py:pluginCls,/path/to/plugMod2.py:plugCls2`` or on Windows: ``C:\\path\\to\\pluginMod.py:pluginCls,C:\\\\path\\to\\plugMod2.py:plugCls2`` Notes ----- These paths are meant to be taken from a settings file, though this method is public. The idea is that these "user plugins" differ from regular plugins because they are defined during run time, not import time. As such, we restrict their flexibility and power as compared to the usual ArmiPlugins. """ for pluginPath in pluginPaths: if self._isPluginRegistered(pluginPath): continue if ".py:" in pluginPath: # The path is of the form: /path/to/why.py:MyPlugin self.__registerUserPluginsAbsPath(pluginPath) else: # The path is of the form: armi.thing.what.MyPlugin self.__registerUserPluginsInternalImport(pluginPath) def _isPluginRegistered(self, pluginPath: str): r""" Check if the plugin at the provided path is already registered. The expected path formats are: ------------------------------ importable namespace: ``armi.stuff.plugindir.pluginMod.pluginCls`` or on Linux/Unix: ``/path/to/pluginMod.py:pluginCls`` or on Windows: ``C:\\path\\to\\pluginMod.py:pluginCls`` Parameters ---------- pluginPath : str String path to a userPlugin. Returns ------- bool Whether or not the plugin name is already registered with the manager. """ if ":" in pluginPath: pluginName = pluginPath.strip().split(":")[-1] else: pluginName = pluginPath.strip().split(".")[-1] return self._pm.has_plugin(pluginName) def __registerUserPluginsAbsPath(self, pluginPath): """Helper method to register a single UserPlugin via absolute path. Here the given path is of the form: /path/to/why.py:MyPlugin """ assert pluginPath.count(".py:") == 1, f"Invalid plugin path: {pluginPath}" # split the settings string into file path and class name filePath, className = pluginPath.split(".py:") filePath += ".py" spec = importlib.util.spec_from_file_location(className, filePath) mod = importlib.util.module_from_spec(spec) sys.modules[spec.name] = mod spec.loader.exec_module(mod) plugin = getattr(mod, className) assert issubclass(plugin, plugins.UserPlugin) self._pm.register(plugin) # ensure UserPlugin flags are loaded newFlags = plugin.defineFlags() if newFlags: Flags.extend(newFlags) def __registerUserPluginsInternalImport(self, pluginPath): """Helper method to register a single UserPlugin via internal import. Here the given path is of the form: armi.thing.what.MyPlugin """ names = pluginPath.strip().split(".") modPath = ".".join(names[:-1]) clsName = names[-1] mod = importlib.import_module(modPath) plugin = getattr(mod, clsName) assert issubclass(plugin, plugins.UserPlugin) self._pm.register(plugin) # ensure UserPlugin flags are loaded newFlags = plugin.defineFlags() if newFlags: Flags.extend(newFlags) @property def splashText(self): """ Return a textual splash screen. Specific applications will want to customize this, but by default the ARMI one is produced, with extra data on the App name and version, if available. """ # typical ARMI splash text splash = r""" +===================================================+ | _ ____ __ __ ___ | | / \ | _ \ | \/ | |_ _| | | / _ \ | |_) | | |\/| | | | | | / ___ \ | _ < | | | | | | | | /_/ \_\ |_| \_\ |_| |_| |___| | | Advanced Reactor Modeling Interface | | | | version {0:10s} | | |""".format(meta.__version__) # add the name/version of the current App, if it's not the default if context.APP_NAME != "armi": from armi import getApp splash += r""" |---------------------------------------------------| | {0:>17s} app version {1:10s} |""".format(context.APP_NAME, getApp().version) # bottom border of the splash splash += r""" +===================================================+ """ return splash ================================================ FILE: armi/bookkeeping/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The bookkeeping package handles data persistence, reporting, and some debugging.""" from armi import plugins class BookkeepingPlugin(plugins.ArmiPlugin): @staticmethod @plugins.HOOKIMPL def exposeInterfaces(cs): from armi.bookkeeping import ( historyTracker, mainInterface, memoryProfiler, snapshotInterface, ) from armi.bookkeeping.db import databaseInterface from armi.bookkeeping.report import reportInterface interfaceInfo = [] interfaceInfo += plugins.collectInterfaceDescriptions(mainInterface, cs) interfaceInfo += plugins.collectInterfaceDescriptions(databaseInterface, cs) interfaceInfo += plugins.collectInterfaceDescriptions(historyTracker, cs) interfaceInfo += plugins.collectInterfaceDescriptions(memoryProfiler, cs) interfaceInfo += plugins.collectInterfaceDescriptions(reportInterface, cs) interfaceInfo += plugins.collectInterfaceDescriptions(snapshotInterface, cs) return interfaceInfo @staticmethod @plugins.HOOKIMPL def defineEntryPoints(): from armi.bookkeeping import visualization from armi.cli import database entryPoints = [] entryPoints.append(database.ExtractInputs) entryPoints.append(database.InjectInputs) entryPoints.append(visualization.VisFileEntryPoint) return entryPoints @staticmethod @plugins.HOOKIMPL def defineCaseDependencies(case, suite): if case.cs["loadStyle"] == "fromDB": # the ([^\/]) capture basically gets the file name portion and excludes any # directory separator return case.getPotentialParentFromSettingValue( case.cs["reloadDBName"], r"^(?P.*[\/\\])?(?P[^\/\\]+?)(\.[hH]5)?$", ) return None @staticmethod @plugins.HOOKIMPL def mpiActionRequiresReset(cmd) -> bool: """ Prevent reactor resets after certain mpi actions. * Memory profiling is small enough that we don't want to reset * distributing state would be undone by this so we don't want that. See Also -------- armi.operators.operatorMPI.OperatorMPI.workerOperate """ from armi import mpiActions from armi.bookkeeping import memoryProfiler if isinstance(cmd, mpiActions.MpiAction): for donotReset in ( mpiActions.DistributeStateAction, mpiActions.DistributionAction, memoryProfiler.PrintSystemMemoryUsageAction, memoryProfiler.ProfileMemoryUsageAction, ): if isinstance(cmd, donotReset): return False return True ================================================ FILE: armi/bookkeeping/db/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The db package is responsible for reading and writing the state of the reactor to/from disk. As an ARMI run progresses, this is periodically updated as the primary output file. It can also be an input file for follow-on analysis or restart runs. This module contains factories for selecting and building DB-related objects. When updating a db version -------------------------- The code associated with reading and writing database files may not benefit from Don't Repeat Yourself (DRY) practices in the same way as other code. Therefore, do not share code between different major versions of the databases. Create a new module if you are creating a new major database version. Database revision changelog --------------------------- - 1: Originally, calculation results were stored in a SQL database. - 2: The storage format was changed to HDF5. This required less external infrastructure than SQL. However, the implementation did not store a complete model of a reactor, but a ghost of assembly, block, and reactor parameters that could be applied to an existing reactor model (so long as the dimensions were consistent). This was inconvenient and error prone. - 3: The HDF5 format was kept, but the schema was made more flexible to permit storing the entire reactor model. All objects in the ARMI Composite Model are written to the database, and the model can be completely recovered from just the HDF5 file. - 3.1: Improved the handling of reading/writing grids. - 3.2: Changed the strategy for storing large attributes to using a special string starting with an "@" symbol (e.g., "@/c00n00/attrs/5_linkedDims"). This was done to support copying time node datasets from one file to another without invalidating the references. Support was maintained for reading previous versions, by performing a ``mergeHistory()`` and converting to the new naming strategy, but the old version cannot be written. - 3.3: Compressed the way locations are stored in the database and allow MultiIndex locations to be read and written. - 3.4: Modified the way locations are stored in the database to include complete indices for indices that can be composed from multiple grids. Having complete indices allows for more efficient means of extracting information based on location, without having to compose the full model. """ import os from armi import runLog from armi.bookkeeping.db.compareDB3 import compareDatabases # re-export package components for easier import from armi.bookkeeping.db.database import Database from armi.bookkeeping.db.databaseInterface import DatabaseInterface from armi.bookkeeping.db.factory import databaseFactory __all__ = [ "Database", "DatabaseInterface", "compareDatabases", "databaseFactory", ] def loadOperator( pathToDb, loadCycle, loadNode, statePointName=None, allowMissing=False, handleInvalids=True, callReactorConstructionHook=False, ): """ Return an operator given the path to a database. Parameters ---------- pathToDb : str The path of the database to load from. loadCycle : int The cycle to load the reactor state from. loadNode : int The time node to load the reactor from. statePointName: str State point name at the end, E.G. `EOC` or `EOL`. Full name would be C0N2EOC, see database.getH5GroupName allowMissing : bool Whether to emit a warning, rather than crash if reading a database with undefined parameters. Default False. handleInvalids : bool Whether to check for invalid settings. Default True. callReactorConstructionHook : bool Flag for whether the beforeReactorConstruction plugin hook should be executed. Default is False. See Also -------- armi.operator.Operator.loadState: A method for loading reactor state that is useful if you already have an operator and a reactor object. loadOperator varies in that it supplies these given only a database file. loadState should be used if you are in the middle of an ARMI calculation and need load a different time step. Notes ----- The operator will have a reactor attached that is loaded to the specified cycle and node. The operator will not be in the same state that it was at that cycle and node, only the reactor. Examples -------- >>> o = db.loadOperator(r"pathToDatabase", 0, 1) >>> r = o.r >>> cs = o.cs >>> r.p.timeNode 1 >>> r.getFPMass() # Note since it is loaded from step 1 there are fission products. 12345.67 """ # `import armi` doesn't work if imported at top from armi import cases if not os.path.exists(pathToDb): raise ValueError( f"Specified database at path {pathToDb} does not exist. \n\n" "Double check that escape characters were correctly processed.\n" "Consider sending the full path, or change directory to be the directory " "of the database." ) db = Database(pathToDb, "r") with db: # init Case here as it keeps track of execution time and assigns a reactor # attribute. This attribute includes the time it takes to initialize the reactor # so creating a reactor from the database should be included. cs = db.loadCS(handleInvalids=handleInvalids) thisCase = cases.Case(cs) r = db.load( loadCycle, loadNode, cs=cs, statePointName=statePointName, allowMissing=allowMissing, handleInvalids=handleInvalids, callReactorConstructionHook=callReactorConstructionHook, ) o = thisCase.initializeOperator(r=r) runLog.important( "The operator will not be in the same state that it was at that cycle and " "node, only the reactor.\n" "The operator should have access to the same interface stack, but the " "interfaces will not be in the same state (they will be fresh instances " "of each interface as if __init__ was just called rather than the state " "during the run at this time node.)\n" "ARMI does not support loading operator states, as they are not stored." ) return o def _getH5File(db): """Return the underlying h5py File that provides the backing storage for a database. This is done here because HDF5 isn't an official aspect of the base Database abstraction, and thus making this part of the base Database class interface wouldn't be ideal. **However**, we violate this assumption when working with "auxiliary" data, which use HDF5 features directly. To be able to convert, we need to be able to access and copy these groups, so we need access to the HDF5 file under the hood. To avoid this, we would need to come up with our own formalization of what a storage-agnostic aux data concept looks like. We can tackle that if/when we decode that we want to start using protobufs or whatever. All this being said, we are probably violating this already with genAuxiliaryData, but we have to start somewhere. """ if isinstance(db, Database): return db.h5db else: raise TypeError("Unsupported Database type ({})!".format(type(db))) ================================================ FILE: armi/bookkeeping/db/compareDB3.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Use the generic database class to compare two ARMI databases. This assumes some intimate knowledge about how the database is structured internally. For instance, it knows that the database is composed of HDF5 data (the attrs of a dataset are used, and h5py Groups are indexed), and it knows how special data is structured within the HDF5 dataset and what the corresponding attributes are used for. Some of this could be easily pulled up to the public interfaces of the Database class, which may allow for cross-version database checking, but there is probably little value in doing so if one is able to convert between versions. Speaking of conversions, there are some common issues that may arise from comparing against databases that were converted from an old version. The process of reading in the old database values can sometimes lead to more parameters being written out to the new database than were in the original database (set to the parameter's default value). That means that one generally should not be worried about a converted database having more parameters in it that the one produced directly may not, assuming that the extra converted parameters are the default. Also, especially at the Component level, some of the parameters are expected to be different. Specifically the following: * temperatures: The old database format simply did not store these on the component level, so when converting a database, the components in a block will uniformly get whatever the Block temperature was. * serial numbers: At all levels, we cannot really expect the serial numbers to line up from object to object. These are not really supposed to be the same. * volume: Component volumes also are not stored on the database, and come from temperatures * memory usage: Relatively self-evident. Resource usage will vary from run to run, even if the code hasn't changed. """ import collections import os import re import traceback from typing import Optional, Pattern, Sequence, Tuple import h5py import numpy as np from armi import runLog from armi.bookkeeping.db import database from armi.bookkeeping.db.database import Database from armi.bookkeeping.db.factory import databaseFactory from armi.bookkeeping.db.permissions import Permissions from armi.reactor.composites import ArmiObject from armi.utils.tabulate import tabulate class OutputWriter: """Basically a tee to writeln to runLog and the output file.""" def __init__(self, fname): self.fname = fname self._stream = None def __enter__(self): self._stream = open(self.fname, "w") return self def __exit__(self, *args): self._stream.close() def writeln(self, msg: str) -> None: runLog.info(msg) self._stream.write(msg) self._stream.write("\n") class DiffResults: """Utility class for storing differences between database data. This class is used to store the differences between reference data and other ("source") data. It is configured with a tolerance, below which differences are ignored. Differences that exceed the tolerance are stored in a collection of differences, organized by time step to be outputted later. It also keeps track of the number of issues that may have been encountered in attempting to compare two databases. For instance, missing datasets on one database or the other, or datasets with incompatible dimensions and the like. All differences are based on a weird type of relative difference, which uses the mean of the reference and source data elements as the normalization value: 2*(C-E)/(C+E). This is somewhat strange, in that if the two are very different, the reported relative difference will be smaller than expected. It does have the useful property that if the reference value is zero and the source value is non-zero, the diff will not be infinite. We do not typically report these in any rigorous manner, so this should be fine, though we may wish to revisit this in the future. """ def __init__(self, tolerance): self._columns = [] self._structureDiffs = [] self.tolerance = tolerance # diffs is a dictionary, keyed on strings describing the object to which the # diffs apply, and the different diff metrics that we use (e.g. mean(abs(diff)), # max(abs(diff))), with the values being a list of diffs by time step. If the # diff doesn't exceed the tolerance, a None is inserted instead. self.diffs = collections.defaultdict(self._getDefault) def addDiff(self, compType: str, paramName: str, absMean: float, mean: float, absMax: float) -> None: """Add a collection of diffs to the diff dictionary if they exceed the tolerance.""" absMean = absMean if absMean > self.tolerance else None self.diffs["{}/{} mean(abs(diff))".format(compType, paramName)].append(absMean) mean = mean if abs(mean) > self.tolerance else None self.diffs["{}/{} mean(diff)".format(compType, paramName)].append(mean) absMax = absMax if absMax > self.tolerance else None self.diffs["{}/{} max(abs(diff))".format(compType, paramName)].append(absMax) def addStructureDiffs(self, nDiffs: int) -> None: if not self._structureDiffs: self._structureDiffs = [0] self._structureDiffs[-1] += nDiffs def addTimeStep(self, tsName: str) -> None: self._structureDiffs.append(0) self._columns.append(tsName) def _getDefault(self) -> list: return [None] * (len(self._columns) - 1) def reportDiffs(self, stream: OutputWriter) -> None: """Print out a well-formatted table of the non-zero diffs.""" # filter out empty rows diffsToPrint = {key: value for key, value in self.diffs.items() if not all(v is None for v in value)} stream.writeln( tabulate( [k.split() + val for k, val in sorted(diffsToPrint.items())], headers=self._columns, ) ) def nDiffs(self) -> int: """Return the number of differences that exceeded the tolerance.""" return sum(1 for _, value in self.diffs.items() if any(v is not None for v in value)) + sum( self._structureDiffs ) def compareDatabases( refFileName: str, srcFileName: str, exclusions: Optional[Sequence[str]] = None, tolerance: float = 0.0, timestepCompare: Optional[Sequence[Tuple[int, int]]] = None, ) -> Optional[DiffResults]: """High-level method to compare two ARMI H5 files, given file paths.""" compiledExclusions = None if exclusions is not None: compiledExclusions = [re.compile(ex) for ex in exclusions] outputName = os.path.basename(refFileName) + "_vs_" + os.path.basename(srcFileName) + ".txt" diffResults = DiffResults(tolerance) with OutputWriter(outputName) as out: ref = databaseFactory(refFileName, Permissions.READ_ONLY_FME) src = databaseFactory(srcFileName, Permissions.READ_ONLY_FME) if not isinstance(ref, Database) or not isinstance(src, Database): raise TypeError( "This database comparer only knows how to deal with database version 3; received {} and {}".format( type(ref), type(src) ) ) with ref, src: if not timestepCompare: _, nDiff = _compareH5Groups(out, ref, src, "timesteps") if nDiff > 0: runLog.warning( "{} and {} have differing timestep groups, and are " "probably not safe to compare. This is likely due to one of " "the cases having failed to complete.".format(ref, src) ) return None for refGroup, srcGroup in zip( ref.genTimeStepGroups(timeSteps=timestepCompare), src.genTimeStepGroups(timeSteps=timestepCompare), ): runLog.info( f"Comparing ref time step {refGroup.name.split('/')[1]} to src time " f"step {srcGroup.name.split('/')[1]}" ) diffResults.addTimeStep(refGroup.name) _compareTimeStep(out, refGroup, srcGroup, diffResults, exclusions=compiledExclusions) diffResults.reportDiffs(out) return diffResults def _compareH5Groups(out: OutputWriter, ref: h5py.Group, src: h5py.Group, name: str) -> Tuple[Sequence[str], int]: refGroups = set(ref.keys()) srcGroups = set(src.keys()) n = _compareSets(srcGroups, refGroups, out, name) return sorted(refGroups & srcGroups), n def _compareTimeStep( out: OutputWriter, refGroup: h5py.Group, srcGroup: h5py.Group, diffResults: DiffResults, exclusions: Optional[Sequence[Pattern]] = None, ): groupNames, structDiffs = _compareH5Groups(out, refGroup, srcGroup, "composite objects/auxiliary data") diffResults.addStructureDiffs(structDiffs) componentTypes = {gn for gn in groupNames if gn in ArmiObject.TYPES} auxData = set(groupNames) - componentTypes auxData.discard("layout") for componentType in componentTypes: refTypeGroup = refGroup[componentType] srcTypeGroup = srcGroup[componentType] _compareComponentData(out, refTypeGroup, srcTypeGroup, diffResults, exclusions=exclusions) for aux in auxData: _compareAuxData(out, refGroup[aux], srcGroup[aux], diffResults) def _compareAuxData( out: OutputWriter, refGroup: h5py.Group, srcGroup: h5py.Group, diffResults: DiffResults, ): """ Compare auxiliary datasets, which aren't stored as Parameters on the Composite model. Some parts of ARMI directly create HDF5 groups under the time step group to store arbitrary data. These still need to be compared. Missing datasets will be treated as structure differences and reported. """ data = dict() def visitor(name, obj): if isinstance(obj, h5py.Dataset): data[name] = obj refGroup.visititems(visitor) refData = data data = dict() srcGroup.visititems(visitor) srcData = data n = _compareSets(set(srcData.keys()), set(refData.keys()), out, name="auxiliary dataset") diffResults.addStructureDiffs(n) matchedSets = set(srcData.keys()) & set(refData.keys()) for name in matchedSets: _diffSimpleData(refData[name], srcData[name], diffResults) def _compareSets(src: set, ref: set, out: OutputWriter, name: Optional[str] = None) -> int: nDiffs = 0 printName = "" if name is None else name + " " if ref - src: nDiffs += len(ref - src) out.writeln("ref has {}not in src: {}".format(printName, list(ref - src))) if src - ref: nDiffs += len(src - ref) out.writeln("src has {}not in ref: {}".format(printName, list(src - ref))) return nDiffs def _diffSpecialData( refData: h5py.Dataset, srcData: h5py.Dataset, out: OutputWriter, diffResults: DiffResults, ): """ Compare specially-formatted datasets. This employs the pack/unpackSpecialData functions to reconstitute complicated datasets for comparison. These usually don't behave well as giant numpy arrays, so we go element-by-element to calculate the diffs, then concatenate them. """ name = refData.name paramName = refData.name.split("/")[-1] compName = refData.name.split("/")[-2] nDiffs = _compareSets(set(srcData.attrs.keys()), set(refData.attrs.keys()), out, "formatting data") keysMatch = nDiffs == 0 diffResults.addStructureDiffs(nDiffs) if not keysMatch: diffResults.addDiff(name, name, np.inf, np.inf, np.inf) return if srcData.attrs.get("dict", False): out.writeln(f"Not comparing {name} as it is a dictionary.") return attrsMatch = True for k, srcAttr in srcData.attrs.items(): refAttr = refData.attrs[k] if isinstance(srcAttr, np.ndarray) and isinstance(refAttr, np.ndarray): srcFlat = srcAttr.flatten() refFlat = refAttr.flatten() if len(srcFlat) != len(refFlat): same = False else: same = all(srcFlat == refFlat) else: same = srcAttr == refAttr if not same: attrsMatch = False out.writeln( "Special formatting parameters for {} do not match for {}. Src: {} Ref: {}".format( name, k, srcData.attrs[k], refData.attrs[k] ) ) break if not attrsMatch: diffResults.addDiff(compName, paramName, np.inf, np.inf, np.inf) return try: src = database.unpackSpecialData(srcData[()], srcData.attrs, paramName) ref = database.unpackSpecialData(refData[()], refData.attrs, paramName) except Exception: runLog.error( f"Unable to unpack special data for paramName {paramName}. {traceback.format_exc()}", ) return diff = [] for dSrc, dRef in zip(src.tolist(), ref.tolist()): if isinstance(dSrc, np.ndarray) and isinstance(dRef, np.ndarray): if dSrc.shape != dRef.shape: out.writeln("Shapes did not match for {}".format(refData)) diffResults.addDiff(compName, paramName, np.inf, np.inf, np.inf) return if dSrc.dtype.type == np.bytes_ or dRef.dtype.type == np.bytes_: # data is byte strings; can't be diffed like numbers if np.array_equal(dSrc, dRef): diffResults.addDiff(name, name, 0.0, 0.0, 0.0) else: diffResults.addDiff(name, name, np.inf, np.inf, np.inf) return # Make sure not to try to compare empty arrays. Numpy is mediocre at these; # they are super degenerate and cannot participate in concatenation. if 0 not in dSrc.shape: # Use the mean of the two to calc relative error. This is more robust to # changes that cause one of the values to be zero, while the other is # non-zero, leading to infinite relative error dMean = (dSrc + dRef) / 2 diff.append((dSrc - dRef) / dMean) continue if (dSrc is None) ^ (dRef is None): out.writeln("Mismatched Nones for {} in {}".format(paramName, compName)) diff.append([np.inf]) continue if dSrc is None: diff.append([0.0]) continue try: # Use mean to avoid some infinities; see above dMean = (dSrc + dRef) / 2 diff.append([(dSrc - dRef) / dMean]) except ZeroDivisionError: if dSrc == dRef: diff.append([0.0]) else: diff.append([np.inf]) if diff: try: diff = [np.array(d).flatten() for d in diff] diff = np.concatenate(diff) except ValueError as e: out.writeln("Failed to concatenate diff data for {} in {}: {}".format(paramName, compName, diff)) out.writeln("Because: {}".format(e)) return absDiff = np.abs(diff) mean = np.nanmean(diff) absMax = np.nanmax(absDiff) absMean = np.nanmean(absDiff) diffResults.addDiff(compName, paramName, absMean, mean, absMax) def _diffSimpleData(ref: h5py.Dataset, src: h5py.Dataset, diffResults: DiffResults): paramName = ref.name.split("/")[-1] compName = ref.name.split("/")[-2] try: # use mean to avoid some unnecessary infinities mean = (src[()] + ref[()]) / 2.0 diff = (src[()] - ref[()]) / mean except TypeError: # Strings are persnickety if src.dtype.kind == ref.dtype.kind and src.dtype.kind in {"U", "S"}: return else: runLog.error("Failed to compare {} in {}".format(paramName, compName)) runLog.error("source: {}".format(src)) runLog.error("reference: {}".format(ref)) diff = np.array([np.inf]) except ValueError: runLog.error("Failed to compare {} in {}".format(paramName, compName)) runLog.error("source: {}".format(src)) runLog.error("reference: {}".format(ref)) diff = np.array([np.inf]) if 0 in diff.shape: # Empty list, no diff return absDiff = np.abs(diff) mean = np.nanmean(diff) absMax = np.nanmax(absDiff) absMean = np.nanmean(absDiff) diffResults.addDiff(compName, paramName, absMean, mean, absMax) def _compareComponentData( out: OutputWriter, refGroup: h5py.Group, srcGroup: h5py.Group, diffResults: DiffResults, exclusions: Optional[Sequence[Pattern]] = None, ): exclusions = exclusions or [] compName = refGroup.name paramNames, nDiff = _compareH5Groups(out, refGroup, srcGroup, "{} parameters".format(compName)) diffResults.addStructureDiffs(nDiff) for paramName in paramNames: fullName = "/".join((refGroup.name, paramName)) if any(pattern.match(fullName) for pattern in exclusions): runLog.debug("Skipping comparison of {} since it is being ignored.".format(fullName)) continue refDataset = refGroup[paramName] srcDataset = srcGroup[paramName] srcSpecial = srcDataset.attrs.get("specialFormatting", False) refSpecial = refDataset.attrs.get("specialFormatting", False) if srcSpecial ^ refSpecial: out.writeln( "Could not compare data for parameter {} because one uses special " "formatting, and the other does not. Ref: {} Src: {}".format(paramName, refSpecial, srcSpecial) ) diffResults.addDiff(refGroup.name, paramName, np.inf, np.inf, np.inf) continue if srcSpecial or refSpecial: _diffSpecialData(refDataset, srcDataset, out, diffResults) else: _diffSimpleData(refDataset, srcDataset, diffResults) ================================================ FILE: armi/bookkeeping/db/database.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ARMI Database implementation, version 3.4. A reactor model should be fully recoverable from the database; all the way down to the component level. As a result, the structure of the underlying data is bound to the hierarchical Composite Reactor Model. Furthermore, this database format is intended to be more dynamic, permitting as-yet undeveloped levels and classes in the Composite Reactor Model to be supported as they are added. More high-level discussion is contained in :ref:`database-file`. The :py:class:`Database` class contains most of the functionality for interacting with the underlying data. This includes things like dumping a Reactor state to the database and loading it back again, as well as extracting historical data for a given object or collection of object from the database file. However, for the nitty-gritty details of how the hierarchical Composite Reactor Model is translated to the flat file database, please refer to :py:mod:`armi.bookkeeping.db.layout`. Refer to :py:mod:`armi.bookkeeping.db` for information about versioning. """ import collections import copy import gc import io import itertools import os import pathlib import re import shutil import subprocess import sys from platform import uname from typing import ( Any, Dict, Generator, List, Optional, Sequence, Tuple, Type, Union, ) import h5py import numpy as np from armi import context, getApp, getPluginManagerOrFail, meta, runLog, settings from armi.bookkeeping.db.jaggedArray import JaggedArray from armi.bookkeeping.db.layout import ( DB_VERSION, LOC_COORD, Layout, replaceNonesWithNonsense, replaceNonsenseWithNones, ) from armi.bookkeeping.db.typedefs import Histories, History from armi.physics.neutronics.settings import CONF_LOADING_FILE from armi.reactor import grids, parameters from armi.reactor.assemblies import Assembly from armi.reactor.blocks import Block from armi.reactor.components import Component from armi.reactor.composites import ArmiObject from armi.reactor.parameters import parameterCollections from armi.reactor.reactorParameters import makeParametersReadOnly from armi.reactor.reactors import Core, Reactor from armi.settings.fwSettings.globalSettings import ( CONF_GROW_TO_FULL_CORE_AFTER_LOAD, CONF_SORT_REACTOR, ) from armi.utils import getNodesPerCycle, safeCopy, safeMove from armi.utils.textProcessors import resolveMarkupInclusions # CONSTANTS _SERIALIZER_NAME = "serializerName" _SERIALIZER_VERSION = "serializerVersion" def getH5GroupName(cycle: int, timeNode: int, statePointName: str = None) -> str: """ Naming convention specifier. ARMI defines the naming convention cXXnYY for groups of simulation data. That is, data is grouped by cycle and time node information during a simulated run. """ return "c{:0>2}n{:0>2}{}".format(cycle, timeNode, statePointName or "") class Database: """ ARMI Database, handling serialization and loading of Reactor states. This implementation of the database pushes all objects in the Composite Reactor Model into the database. This process is aided by the ``Layout`` class, which handles the packing and unpacking of the structure of the objects, their relationships, and their non-parameter attributes. .. impl:: The database files are H5, and thus language agnostic. :id: I_ARMI_DB_H51 :implements: R_ARMI_DB_H5 This class implements a light wrapper around H5 files, so they can be used to store ARMI outputs. H5 files are commonly used in scientific applications in Fortran and C++. As such, they are entirely language agnostic binary files. The implementation here is that ARMI wraps the ``h5py`` library, and uses its extensive tooling, instead of re-inventing the wheel. See Also -------- `doc/user/outputs/database` for more details. """ # Allows matching for, e.g., c01n02EOL timeNodeGroupPattern = re.compile(r"^c(\d\d)n(\d\d).*$") def __init__(self, fileName: os.PathLike, permission: str = "r"): """ Create a new Database object. Parameters ---------- fileName: name of the file permission: file permissions, write ("w") or read ("r") """ self._fileName = fileName # No full path yet; we will determine this based on FAST_PATH and permissions self._fullPath: Optional[str] = None self._permission = permission self.h5db: Optional[h5py.File] = None # Allows context management on open files. If context management is used on a file that is already open, it will # not reopen and it will also not close after leaving that context. This allows the treatment of all databases # the same whether they are open or closed. self._openCount: int = 0 if permission == "w": self.version = DB_VERSION else: # will be set upon read self._version = None self._versionMajor = None self._versionMinor = None @property def version(self) -> str: return self._version @version.setter def version(self, value: str): self._version = value self._versionMajor, self._versionMinor = (int(v) for v in value.split(".")) if self.versionMajor != 3: raise ValueError(f"This version of ARMI only supports version 3 of the ARMI DB, found {self.versionMajor}.") @property def versionMajor(self): return self._versionMajor @property def versionMinor(self): return self._versionMinor def __repr__(self): return "<{} {}>".format(self.__class__.__name__, repr(self.h5db).replace("<", "").replace(">", "")) def open(self): if self.h5db is not None: raise ValueError("This database is already open; make sure to close it before trying to open it again.") filePath = self._fileName self._openCount += 1 if self._permission in {"r", "a"}: self._fullPath = os.path.abspath(filePath) self.h5db = h5py.File(filePath, self._permission) self.version = self.h5db.attrs["databaseVersion"] return if self._permission == "w": # assume fast path! filePath = os.path.join(context.getFastPath(), filePath) self._fullPath = os.path.abspath(filePath) else: runLog.error(f"Unrecognized file permissions `{self._permission}`") raise ValueError(f"Cannot open database with permission `{self._permission}`") # open the database, and write a bunch of metadata to it runLog.info("Opening database file at {}".format(os.path.abspath(filePath))) self.h5db = h5py.File(filePath, self._permission) self.h5db.attrs["successfulCompletion"] = False self.h5db.attrs["version"] = meta.__version__ self.h5db.attrs["databaseVersion"] = self.version self.writeSystemAttributes(self.h5db) # store app and plugin data app = getApp() self.h5db.attrs["appName"] = app.name plugins = app.pluginManager.list_name_plugin() ps = [(os.path.abspath(sys.modules[p[1].__module__].__file__), p[1].__name__) for p in plugins] ps = np.array([str(p[0]) + ":" + str(p[1]) for p in ps]).astype("S") self.h5db.attrs["pluginPaths"] = ps self.h5db.attrs["localCommitHash"] = Database.grabLocalCommitHash() def isOpen(self): return self.h5db is not None @staticmethod def writeSystemAttributes(h5db): """Write system attributes to the database. .. impl:: Add system attributes to the database. :id: I_ARMI_DB_QA :implements: R_ARMI_DB_QA This method writes some basic system information to the H5 file. This is designed as a starting point, so users can see information about the system their simulations were run on. As ARMI is used on Windows and Linux, the tooling here has to be platform independent. The two major sources of information are the ARMI :py:mod:`context <armi.context>` module and the Python standard library ``platform``. """ h5db.attrs["user"] = context.USER h5db.attrs["python"] = sys.version h5db.attrs["armiLocation"] = os.path.dirname(context.ROOT) h5db.attrs["startTime"] = context.START_TIME h5db.attrs["machines"] = np.array(context.MPI_NODENAMES).astype("S") # store platform data platform_data = uname() h5db.attrs["platform"] = platform_data.system h5db.attrs["hostname"] = platform_data.node h5db.attrs["platformRelease"] = platform_data.release h5db.attrs["platformVersion"] = platform_data.version h5db.attrs["platformArch"] = platform_data.processor @staticmethod def grabLocalCommitHash(): """ Try to determine the local Git commit. We have to be sure to handle the errors where the code is run on a system that doesn't have Git installed. Or if the code is simply not run from inside a repo. Returns ------- str The commit hash if it exists, otherwise "unknown". """ unknown = "unknown" if not shutil.which("git"): # no git available. cannot check git info return unknown repo_exists = ( subprocess.run( "git rev-parse --git-dir".split(), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ).returncode == 0 and subprocess.run( ["git", "describe"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ).returncode == 0 ) if repo_exists: try: commit_hash = subprocess.check_output(["git", "describe"]) return commit_hash.decode("utf-8").strip() except Exception: return unknown else: return unknown def close(self, completedSuccessfully=False): """Close the DB and perform cleanups and auto-conversions.""" self._openCount = 0 if self.h5db is None: return if self._permission == "w": self.h5db.attrs["successfulCompletion"] = completedSuccessfully # a bit redundant to call flush, but with unreliable IO issues, why not? self.h5db.flush() self.h5db.close() self.h5db = None if self._permission == "w": # move out of the FAST_PATH and into the working directory newPath = safeMove(self._fullPath, self._fileName) self._fullPath = os.path.abspath(newPath) def splitDatabase(self, keepTimeSteps: Sequence[Tuple[int, int]], label: str) -> str: """ Discard all data except for specific time steps, retaining old data in a separate file. This is useful when performing more exotic analyses, where each "time step" may not represent a specific point in time, but something more nuanced. For example, equilibrium cases store a new "cycle" for each iteration as it attempts to converge the equilibrium cycle. At the end of the run, the last "cycle" is the converged equilibrium cycle, whereas the previous cycles constitute the path to convergence, which we typically wish to discard before further analysis. Parameters ---------- keepTimeSteps A collection of the time steps to retain label An informative label for the backed-up database. Usually something like "-all-iterations". Will be interposed between the source name and the ".h5" extension. Returns ------- str The name of the new, backed-up database file. """ if self.h5db is None: raise ValueError("There is no open database to split.") self.h5db.close() backupDBPath = os.path.abspath(label.join(os.path.splitext(self._fileName))) runLog.info(f"Retaining full database history in {backupDBPath}") if self._fullPath is not None: safeMove(self._fullPath, backupDBPath) self.h5db = h5py.File(self._fullPath, self._permission) dbOut = self.h5db with h5py.File(backupDBPath, "r") as dbIn: dbOut.attrs.update(dbIn.attrs) # Copy everything except time node data timeSteps = set() for groupName, _ in dbIn.items(): m = self.timeNodeGroupPattern.match(groupName) if m: timeSteps.add((int(m.group(1)), int(m.group(2)))) else: dbIn.copy(groupName, dbOut) if not set(keepTimeSteps).issubset(timeSteps): raise ValueError(f"Not all desired time steps ({keepTimeSteps}) are even present in the database") minCycle = next(iter(sorted(keepTimeSteps)))[0] for cycle, node in keepTimeSteps: offsetCycle = cycle - minCycle offsetGroupName = getH5GroupName(offsetCycle, node) dbIn.copy(getH5GroupName(cycle, node), dbOut, name=offsetGroupName) dbOut[offsetGroupName + "/Reactor/cycle"][()] = offsetCycle return backupDBPath @property def fileName(self): return self._fileName @fileName.setter def fileName(self, fName): if self.h5db is not None: raise RuntimeError("Cannot change Database file name while it's opened!") self._fileName = fName def loadCS(self, handleInvalids=True): """Attempt to load settings from the database file. Parameters ---------- handleInvalids : bool Whether to check for invalid settings. Default True. Notes ----- There are no guarantees here. If the database was written from a different version of ARMI than you are using, these results may not be usable. Or if the database was written using a custom Application you do not have access to, the DB may not be usable. """ cs = settings.Settings() cs.path = self.fileName cs.loadFromString(self.h5db["inputs/settings"].asstr()[()], handleInvalids=handleInvalids) return cs def loadBlueprints(self, cs=None): """Attempt to load reactor blueprints from the database file. Notes ----- There are no guarantees here. If the database was written from a different version of ARMI than you are using, these results may not be usable. Or if the database was written using a custom Application you do not have access to, the DB may not be usable. """ # Blueprints use the yamlize package, which uses class attributes to define much of the class's behavior through # metaclassing. Therefore, we need to be able to import all plugins before importing blueprints. from armi.reactor.blueprints import Blueprints bpString = None try: bpString = self.h5db["inputs/blueprints"].asstr()[()] # Need to update the blueprint file to be the database so that its not pointing at a source that doesn't # exist anymore (the original blueprints yaml). if cs: # Update the settings to point at where the file was actually read from cs[CONF_LOADING_FILE] = os.path.basename(self.fileName) except KeyError: # not all reactors need to be created from blueprints, so they may not exist pass if not bpString: # looks like no blueprints contents return None stream = io.StringIO(bpString) stream = Blueprints.migrate(stream) return Blueprints.load(stream) def writeInputsToDB(self, cs, csString=None, bpString=None): """ Write inputs into the database based the Settings. This is not DRY on purpose. The goal is that any particular Database implementation should be very stable, so we dont want it to be easy to change one Database implementation's behavior when trying to change another's. .. impl:: The run settings are saved the settings file. :id: I_ARMI_DB_CS :implements: R_ARMI_DB_CS A ``Settings`` object is passed into this method, and then the settings are converted into a YAML string stream. That stream is then written to the H5 file. Optionally, this method can take a pre-build settings string to be written directly to the file. .. impl:: The reactor blueprints are saved the settings file. :id: I_ARMI_DB_BP :implements: R_ARMI_DB_BP A ``Blueprints`` string is optionally passed into this method, and then written to the H5 file. If it is not passed in, this method will attempt to find the blueprints input file in the settings, and read the contents of that file into a stream to be written to the H5 file. Notes ----- This is hard-coded to read the entire file contents into memory and write that directly into the database. We could have the cs/blueprints/geom write to a string, however the ARMI log file contains a hash of each files' contents. In the future, we should be able to reproduce a calculation with confidence that the inputs are identical. """ caseTitle = cs.caseTitle if cs is not None else os.path.splitext(self.fileName)[0] self.h5db.attrs["caseTitle"] = caseTitle if csString is None: # Don't read file; use what's in the cs now. Sometimes settings are modified in tests. stream = io.StringIO() cs.writeToYamlStream(stream) stream.seek(0) csString = stream.read() if bpString is None: bpPath = pathlib.Path(cs.inputDirectory) / cs[CONF_LOADING_FILE] if bpPath.suffix.lower() in (".h5", ".hdf5"): # The blueprints are in a database file, they need to be read try: db = h5py.File(bpPath, "r") bpString = db["inputs/blueprints"].asstr()[()] except KeyError: # not all reactors need to be created from blueprints, so they may not exist bpString = "" else: # The blueprints are a standard blueprints yaml that can be read. if bpPath.exists() and bpPath.is_file(): # Only store blueprints if we actually loaded from them. Ensure that the input as stored in the DB # is complete bpString = resolveMarkupInclusions(pathlib.Path(cs.inputDirectory) / cs[CONF_LOADING_FILE]).read() else: bpString = "" self.h5db["inputs/settings"] = csString self.h5db["inputs/blueprints"] = bpString def readInputsFromDB(self): return ( self.h5db["inputs/settings"].asstr()[()], self.h5db["inputs/blueprints"].asstr()[()], ) def mergeHistory(self, inputDB, startCycle, startNode): """ Copy time step data up to, but not including the passed cycle and node. Notes ----- This is used for restart runs with the standard operator for example. The current time step (being loaded from) should not be copied, as that time steps data will be written at the end of the time step. """ if self.versionMajor != 3: raise ValueError(f"Only version 3 of the ARMI DB is supported, found {self.versionMajor}.") elif inputDB.versionMajor != 3: raise ValueError(f"Only version 3 of the ARMI DB is supported, found {inputDB.versionMajor}.") # iterate over the top level H5Groups and copy for time, h5ts in zip(inputDB.genTimeSteps(), inputDB.genTimeStepGroups()): cyc, tn = time if cyc == startCycle and tn == startNode: # all data up to current state are merged return self.h5db.copy(h5ts, h5ts.name) def __enter__(self): """Context management support.""" if self._openCount == 0: # open also increments _openCount self.open() else: self._openCount += 1 return self def __exit__(self, type, value, traceback): """Typically we don't care why it broke but we want the DB to close.""" self._openCount -= 1 # always close if there is a traceback. if self._openCount == 0 or traceback: self.close(all(i is None for i in (type, value, traceback))) def __del__(self): if self.h5db is not None: self.close(False) def __delitem__(self, tn: Tuple[int, int, Optional[str]]): cycle, timeNode, statePointName = tn name = getH5GroupName(cycle, timeNode, statePointName) if self.h5db is not None: del self.h5db[name] def genTimeStepGroups( self, timeSteps: Sequence[Tuple[int, int]] = None ) -> Generator[h5py._hl.group.Group, None, None]: """Returns a generator of HDF5 Groups for all time nodes, or for the passed selection.""" assert self.h5db is not None, "Must open the database before calling genTimeStepGroups" if timeSteps is None: for groupName, h5TimeNodeGroup in sorted(self.h5db.items()): match = self.timeNodeGroupPattern.match(groupName) if match: yield h5TimeNodeGroup else: for step in timeSteps: yield self.h5db[getH5GroupName(*step)] def getLayout(self, cycle, node): """Return a Layout object representing the requested cycle and time node.""" version = (self._versionMajor, self._versionMinor) timeGroupName = getH5GroupName(cycle, node) return Layout(version, self.h5db[timeGroupName]) def genTimeSteps(self) -> Generator[Tuple[int, int], None, None]: """Returns a generator of (cycle, node) tuples that are present in the DB.""" assert self.h5db is not None, "Must open the database before calling genTimeSteps" for groupName in sorted(self.h5db.keys()): match = self.timeNodeGroupPattern.match(groupName) if match: cycle = int(match.groups()[0]) node = int(match.groups()[1]) yield (cycle, node) def genAuxiliaryData(self, ts: Tuple[int, int]) -> Generator[str, None, None]: """Returns a generator of names of auxiliary data on the requested time point.""" assert self.h5db is not None, "Must open the database before calling genAuxiliaryData" cycle, node = ts groupName = getH5GroupName(cycle, node) timeGroup = self.h5db[groupName] exclude = set(ArmiObject.TYPES.keys()) exclude.add("layout") return (groupName + "/" + key for key in timeGroup.keys() if key not in exclude) @staticmethod def getAuxiliaryDataPath(ts: Tuple[int, int], name: str) -> str: return getH5GroupName(*ts) + "/" + name def keys(self): return (g.name for g in self.genTimeStepGroups()) def getH5Group(self, r, statePointName=None): """ Get the H5Group for the current ARMI timestep. This method can be used to allow other interfaces to place data into the database at the correct timestep. """ groupName = getH5GroupName(r.p.cycle, r.p.timeNode, statePointName) if groupName in self.h5db: return self.h5db[groupName] else: group = self.h5db.create_group(groupName, track_order=True) group.attrs["cycle"] = r.p.cycle group.attrs["timeNode"] = r.p.timeNode return group def hasTimeStep(self, cycle, timeNode, statePointName=""): """Returns True if (cycle, timeNode, statePointName) is contained in the database.""" return getH5GroupName(cycle, timeNode, statePointName) in self.h5db def writeToDB(self, reactor, statePointName=None): assert self.h5db is not None, "Database must be open before writing." # _createLayout is recursive h5group = self.getH5Group(reactor, statePointName) runLog.info("Writing to database for statepoint: {}".format(h5group.name)) layout = Layout((self.versionMajor, self.versionMinor), comp=reactor) layout.writeToDB(h5group) groupedComps = layout.groupedComps for comps in groupedComps.values(): self._writeParams(h5group, comps) def syncToSharedFolder(self): """ Copy DB to run working directory. Needed when multiple MPI processes need to read the same db, for example when a history is needed from independent runs (e.g. for fuel performance on a variety of assemblies). Notes ----- At some future point, we may implement a client-server like DB system which would render this kind of operation unnecessary. """ runLog.extra("Copying DB to shared working directory.") self.h5db.flush() # Close the h5 file so it can be copied self.h5db.close() self.h5db = None safeCopy(self._fullPath, self._fileName) # Garbage collect so we don't have multiple databases hanging around in memory gc.collect() # Reload the file in append mode and continue on our merry way self.h5db = h5py.File(self._fullPath, "r+") def load( self, cycle, node, cs=None, bp=None, statePointName=None, allowMissing=False, handleInvalids=True, callReactorConstructionHook=False, ): """Load a new reactor from a DB at (cycle, node). Case settings and blueprints can be provided, or read from the database. Providing can be useful for snapshot runs or when you want to change settings mid-simulation. Geometry is read from the database. .. impl:: Users can load a reactor from a DB. :id: I_ARMI_DB_TIME1 :implements: R_ARMI_DB_TIME This method creates a ``Reactor`` object by reading the reactor state out of an ARMI database file. This is done by passing in mandatory arguments that specify the exact place in time you want to load the reactor from. (That is, the cycle and node numbers.) Users can either pass the settings and blueprints directly into this method, or it will attempt to read them from the database file. The primary work done here is to read the hierarchy of reactor objects from the data file, then reconstruct them in the correct order. Parameters ---------- cycle : int Cycle number node : int Time node. If value is negative, will be indexed from EOC backwards like a list. cs : armi.settings.Settings, optional If not provided one is read from the database bp : armi.reactor.Blueprints, optional If not provided one is read from the database statePointName : str, optional Statepoint name (e.g., "special" for "c00n00-special/") allowMissing : bool, optional Whether to emit a warning, rather than crash if reading a database with undefined parameters. Default False. handleInvalids : bool Whether to check for invalid settings. Default True. callReactorConstructionHook : bool Flag for whether the beforeReactorConstruction plugin hook should be executed. Default is False. Returns ------- root : Reactor The top-level object stored in the database; a Reactor. """ runLog.info(f"Loading reactor state for time node ({cycle}, {node})") if cs is None: cs = self.loadCS(handleInvalids=handleInvalids) if bp is None: bp = self.loadBlueprints(cs) if callReactorConstructionHook: getPluginManagerOrFail().hook.beforeReactorConstruction(cs=cs) if node < 0: numNodes = getNodesPerCycle(cs)[cycle] if (node + numNodes) < 0: raise ValueError(f"Node {node} specified does not exist for cycle {cycle}") node = numNodes + node h5group = self.h5db[getH5GroupName(cycle, node, statePointName)] layout = Layout((self.versionMajor, self.versionMinor), h5group=h5group) comps, groupedComps = layout._initComps(cs.caseTitle, bp) # populate data onto initialized components for compType, compTypeList in groupedComps.items(): self._readParams(h5group, compType, compTypeList, allowMissing=allowMissing) # assign params from blueprints if bp is not None: self._assignBlueprintsParams(bp, groupedComps) # stitch together self._compose(iter(comps), cs) # also, make sure to update the global serial number so we don't reuse a number parameterCollections.GLOBAL_SERIAL_NUM = max(parameterCollections.GLOBAL_SERIAL_NUM, layout.serialNum.max()) root = comps[0][0] # return a Reactor object if cs[CONF_SORT_REACTOR]: root.sort() else: runLog.warning( "DeprecationWarning: This Reactor is not being sorted on DB load. Due to the setting " f"{CONF_SORT_REACTOR}, this Reactor is unsorted. But this feature is temporary and will be removed by " "2024." ) if cs[CONF_GROW_TO_FULL_CORE_AFTER_LOAD] and not root.core.isFullCore: root.core.growToFullCore(cs) return root def loadReadOnly(self, cycle, node, statePointName=None): """Load a new reactor, in read-only mode from a DB at (cycle, node). Parameters ---------- cycle : int Cycle number node : int Time node. If value is negative, will be indexed from EOC backwards like a list. statePointName : str, optional Statepoint name (e.g., "special" for "c00n00-special/") Returns ------- Reactor The top-level object stored in the database; a Reactor. """ r = self.load(cycle, node, statePointName=statePointName, allowMissing=True) self._setParamsBeforeFreezing(r) makeParametersReadOnly(r) return r @staticmethod def _setParamsBeforeFreezing(r: Reactor): """Set some special case parameters before they are made read-only.""" for child in r.iterChildren(deep=True, predicate=lambda c: isinstance(c, Component)): # calling Component.getVolume() sets the volume parameter child.getVolume() @staticmethod def _assignBlueprintsParams(blueprints, groupedComps): for compType, designs in ( (Block, blueprints.blockDesigns), (Assembly, blueprints.assemDesigns), ): paramsToSet = {pDef.name for pDef in compType.pDefs.inCategory(parameters.Category.assignInBlueprints)} for comp in groupedComps[compType]: design = designs[comp.p.type] for pName in paramsToSet: val = getattr(design, pName) if val is not None: comp.p[pName] = val def _compose(self, comps, cs, parent=None): """Given a flat collection of all of the ArmiObjects in the model, reconstitute the hierarchy.""" comp, _, numChildren, location, locationType = next(comps) # attach the parent early, if provided; some cases need the parent attached for the rest of _compose to work # properly. comp.parent = parent # The Reactor adds a Core child by default, this is not ideal for spontaneousChild in list(comp): comp.remove(spontaneousChild) if isinstance(comp, Core): pass elif isinstance(comp, Assembly): # Assemblies force their name to be something based on assemNum. When the assembly is created it gets a new # assemNum, and throws out the correct name read from the DB. comp.name = comp.makeNameFromAssemNum(comp.p.assemNum) comp.lastLocationLabel = Assembly.DATABASE # set the spatialLocators on each component if location is not None: if parent is not None and parent.spatialGrid is not None: if locationType != LOC_COORD: # We can directly index into the spatial grid for IndexLocation and MultiIndexLocators to get # equivalent spatial locators comp.spatialLocator = parent.spatialGrid[location] else: comp.spatialLocator = grids.CoordinateLocation( location[0], location[1], location[2], parent.spatialGrid ) else: comp.spatialLocator = grids.CoordinateLocation(location[0], location[1], location[2], None) # Need to keep a collection of Component instances for linked dimension resolution, before they can be add()ed # to their parents. Not just filtering out of `children`, since resolveLinkedDims() needs a dict childComponents = collections.OrderedDict() children = [] for _ in range(numChildren): child = self._compose(comps, cs, parent=comp) children.append(child) if isinstance(child, Component): childComponents[child.name] = child for _childName, child in childComponents.items(): child.resolveLinkedDims(childComponents) for child in children: comp.add(child) if isinstance(comp, Core): comp.processLoading(cs, dbLoad=True) elif isinstance(comp, Assembly): comp.calculateZCoords() elif isinstance(comp, Component): comp.finalizeLoadingFromDB() return comp @staticmethod def _getArrayShape(arr: Union[np.ndarray, List, Tuple]): """Get the shape of a np.ndarray, list, or tuple.""" if isinstance(arr, np.ndarray): return arr.shape elif isinstance(arr, (list, tuple)): return (len(arr),) else: # not a list, tuple, or array (likely int, float, or None) return 1 def _writeParams(self, h5group, comps) -> tuple: c = comps[0] groupName = c.__class__.__name__ if groupName not in h5group: # Only create the group if it doesn't already exist. This happens when re-writing params in the same time # node (e.g. something changed between EveryNode and EOC). g = h5group.create_group(groupName, track_order=True) else: g = h5group[groupName] for paramDef in c.p.paramDefs.toWriteToDB(): attrs = {} if hasattr(c, "DIMENSION_NAMES") and paramDef.name in c.DIMENSION_NAMES: linkedDims = [] data = [] for _, c in enumerate(comps): val = c.p[paramDef.name] if isinstance(val, tuple): linkedDims.append("{}.{}".format(val[0].name, val[1])) data.append(val[0].getDimension(val[1])) else: linkedDims.append("") data.append(val) data = np.array(data) if any(linkedDims): attrs["linkedDims"] = np.array(linkedDims).astype("S") else: # NOTE: after loading, the previously unset values will be defaulted temp = [c.p.get(paramDef.name, paramDef.default) for c in comps] if paramDef.serializer is not None: data, sAttrs = paramDef.serializer.pack(temp) assert data.dtype.kind != "O", "{} failed to convert {} to a numpy-supported type.".format( paramDef.serializer.__name__, paramDef.name ) attrs.update(sAttrs) attrs[_SERIALIZER_NAME] = paramDef.serializer.__name__ attrs[_SERIALIZER_VERSION] = paramDef.serializer.version else: # check if temp is a jagged array if any(isinstance(x, (np.ndarray, list)) for x in temp): jagged = len(set([self._getArrayShape(x) for x in temp])) != 1 else: jagged = False data = JaggedArray(temp, paramDef.name) if jagged else np.array(temp) del temp # - Check to see if the array is jagged. If so, flatten, store the data offsets and array shapes, and None # locations as attrs. # - If not jagged, all top-level ndarrays are the same shape, so it is easier to replace Nones with ndarrays # filled with special values. if isinstance(data, JaggedArray): data, specialAttrs = packSpecialData(data, paramDef.name) attrs.update(specialAttrs) else: # np.ndarray # Convert Unicode to byte-string if data.dtype.kind == "U": data = data.astype("S") if data.dtype.kind == "O": # Something was added to the data array that caused np to want to treat it as a general-purpose # Object array. This usually happens because: # - the data contain NoDefaults # - the data contain one or more Nones, # - the data contain special types like tuples, dicts, etc # - there is some sort of honest-to-goodness weird object # We want to support the first two cases with minimal intrusion, since these should be pretty easy # to faithfully represent in the db. The last case isn't really worth supporting. if parameters.NoDefault in data: data = None else: data, specialAttrs = packSpecialData(data, paramDef.name) attrs.update(specialAttrs) if data is None: continue try: if paramDef.name in g: raise ValueError(f"`{paramDef.name}` was already in `{g}`. This time node should have been empty") dataset = g.create_dataset(paramDef.name, data=data, compression="gzip", track_order=True) if any(attrs): Database._writeAttrs(dataset, h5group, attrs) except Exception: runLog.error(f"Failed to write {paramDef.name} to database. Data: {data}") raise if isinstance(c, Block): self._addHomogenizedNumberDensityParams(comps, g) @staticmethod def _addHomogenizedNumberDensityParams(blocks, h5group): """ Create on-the-fly block homog. number density params for XTVIEW viewing. See Also -------- collectBlockNumberDensities """ nDens = collectBlockNumberDensities(blocks) for nucName, numDens in nDens.items(): h5group.create_dataset(nucName, data=numDens, compression="gzip", track_order=True) @staticmethod def _readParams(h5group, compTypeName, comps, allowMissing=False): g = h5group[compTypeName] renames = getApp().getParamRenames() pDefs = comps[0].pDefs # this can also be made faster by specializing the method by type for paramName, dataSet in g.items(): # Honor historical databases where the parameters may have changed names since. while paramName in renames: paramName = renames[paramName] try: pDef = pDefs[paramName] except KeyError: if re.match(r"^n[A-Z][a-z]?\d*", paramName): # This is a temporary viz param (number density) made by _addHomogenizedNumberDensityParams ignore # it safely continue else: # If a parameter exists in the database but not in the application reading it, we can technically # keep going. Since this may lead to potential correctness issues, raise a warning if allowMissing: runLog.warning( "Found `{}` parameter `{}` in the database, which is not defined. Ignoring it.".format( compTypeName, paramName ) ) continue else: raise data = dataSet[:] attrs = Database._resolveAttrs(dataSet.attrs, h5group) if pDef.serializer is not None: assert _SERIALIZER_NAME in dataSet.attrs assert dataSet.attrs[_SERIALIZER_NAME] == pDef.serializer.__name__ assert _SERIALIZER_VERSION in dataSet.attrs data = np.array(pDef.serializer.unpack(data, dataSet.attrs[_SERIALIZER_VERSION], attrs)) # nuclides are a special case where we want to keep in np.bytes_ format if data.dtype.type is np.bytes_ and "nuclides" not in paramName.lower(): data = np.char.decode(data) if attrs.get("specialFormatting", False): data = unpackSpecialData(data, attrs, paramName) linkedDims = [] if "linkedDims" in attrs: linkedDims = np.char.decode(attrs["linkedDims"]) unpackedData = data.tolist() if len(comps) != len(unpackedData): msg = ( "While unpacking special data for {}, encountered composites and parameter " "data with unmatched sizes.\nLength of composites list = {}\nLength of data " "list = {}\nThis could indicate an error in data unpacking, which could " "result in faulty data on the resulting reactor model.".format( paramName, len(comps), len(unpackedData) ) ) runLog.error(msg) raise ValueError(msg) if paramName == "numberDensities" and attrs.get("dict", False): Database._applyComponentNumberDensitiesMigration(comps, unpackedData) else: # iterating of np is not fast... for c, val, linkedDim in itertools.zip_longest(comps, unpackedData, linkedDims, fillvalue=""): try: if linkedDim != "": c.p[paramName] = linkedDim else: c.p[paramName] = val except AssertionError as ae: # happens when a param was deprecated but being loaded from old DB runLog.warning( f"{str(ae)}\nSkipping load of invalid param `{paramName}` (possibly loading from old DB)\n" ) def getHistoryByLocation( self, comp: ArmiObject, params: Optional[List[str]] = None, timeSteps: Optional[Sequence[Tuple[int, int]]] = None, ) -> History: """Get the parameter histories at a specific location.""" return self.getHistoriesByLocation([comp], params=params, timeSteps=timeSteps)[comp] def getHistoriesByLocation( self, comps: Sequence[ArmiObject], params: Optional[List[str]] = None, timeSteps: Optional[Sequence[Tuple[int, int]]] = None, ) -> Histories: """ Get the parameter histories at specific locations. This has a number of limitations, which should in practice not be too limiting: - The passed objects must have IndexLocations. This type of operation doesn't make much sense otherwise. - The passed objects must exist in a hierarchy that leads to a Core object, which serves as an anchor that can fully define all index locations. This could possibly be made more general by extending grids, but that gets a little more complicated. - All requested objects must exist under the **same** anchor object, and at the same depth below it. - All requested objects must have the same type. Parameters ---------- comps : list of ArmiObject The components/composites that currently occupy the location that you want histories at. ArmiObjects are passed, rather than locations, because this makes it easier to figure out things related to layout. params : List of str, optional The parameter names for the parameters that we want the history of. If None, all parameter history is given. timeSteps : List of (cycle, node) tuples, optional The time nodes that you want history for. If None, all available time nodes will be returned. """ if self.versionMajor != 3: raise ValueError(f"This version of ARMI only supports version 3 of the ARMI DB, found {self.versionMajor}.") elif self.versionMinor < 4: raise ValueError( "Location-based histories are only supported for db version 3.4 and greater. This database is version " f"{self.versionMajor}, {self.versionMinor}." ) locations = [c.spatialLocator.getCompleteIndices() for c in comps] histData: Histories = {c: collections.defaultdict(collections.OrderedDict) for c in comps} # Check our assumptions about the passed locations: All locations must have the same parent and bear the same # relationship to the anchor object. anchors = {obj.getAncestorAndDistance(lambda a: isinstance(a, Core)) for obj in comps} if len(anchors) != 1: raise ValueError( "The passed objects do not have the same anchor or distance to that anchor; encountered the following: " f"{anchors}" ) anchorInfo = anchors.pop() if anchorInfo is not None: anchor, anchorDistance = anchorInfo else: raise ValueError("Could not determine an anchor object for the passed components") anchorSerialNum = anchor.p.serialNum # All objects of the same type objectTypes = {type(obj) for obj in comps} if len(objectTypes) != 1: raise TypeError(f"The passed objects must be the same type; got objects of types `{objectTypes}`") compType = objectTypes.pop() objClassName = compType.__name__ locToComp = {c.spatialLocator.getCompleteIndices(): c for c in comps} for h5TimeNodeGroup in self.genTimeStepGroups(timeSteps): if "layout" not in h5TimeNodeGroup: # Layout hasn't been written for this time step, so we can't get anything useful here. Perhaps the # current value is of use, in which case the DatabaseInterface should be used. continue cycle = h5TimeNodeGroup.attrs["cycle"] timeNode = h5TimeNodeGroup.attrs["timeNode"] layout = Layout((self.versionMajor, self.versionMinor), h5group=h5TimeNodeGroup) ancestors = layout.computeAncestors(layout.serialNum, layout.numChildren, depth=anchorDistance) lLocation = layout.location # filter for objects that live under the desired ancestor and at a desired location objectIndicesInLayout = np.array( [ i for i, (ancestor, loc) in enumerate(zip(ancestors, lLocation)) if ancestor == anchorSerialNum and loc in locations ] ) # This could also be way more efficient if lLocation were a numpy array objectLocationsInLayout = [lLocation[i] for i in objectIndicesInLayout] objectIndicesInData = np.array(layout.indexInData)[objectIndicesInLayout].tolist() try: h5GroupForType = h5TimeNodeGroup[objClassName] except KeyError as ee: runLog.error(f"{objClassName} not found in {h5TimeNodeGroup} of {self}") raise ee for paramName in params or h5GroupForType.keys(): if paramName == "location": # location is special, since it is stored in layout/ data = np.array(layout.location)[objectIndicesInLayout] elif paramName in h5GroupForType: dataSet = h5GroupForType[paramName] try: data = dataSet[objectIndicesInData] except: runLog.error(f"Failed to load index {objectIndicesInData} from {dataSet}@{(cycle, timeNode)}") raise if data.dtype.type is np.bytes_: data = np.char.decode(data) if dataSet.attrs.get("specialFormatting", False): if dataSet.attrs.get("nones", False): data = replaceNonsenseWithNones(data, paramName) else: raise ValueError( "History tracking for non-None, special-formatted parameters is not supported: " "{}, {}".format(paramName, {k: v for k, v in dataSet.attrs.items()}) ) else: # Nothing in the database for this param, so use the default value data = np.repeat( parameters.byNameAndType(paramName, compType).default, len(comps), ) # store data to the appropriate comps. This is where taking components as the argument (rather than # locations) is a little bit peculiar. # # At this point, `data` are arranged by the order of elements in `objectIndicesInData`, which # corresponds to the order of `objectIndicesInLayout` for loc, val in zip(objectLocationsInLayout, data.tolist()): comp = locToComp[loc] histData[comp][paramName][cycle, timeNode] = val return histData def getHistory( self, comp: ArmiObject, params: Optional[Sequence[str]] = None, timeSteps: Optional[Sequence[Tuple[int, int]]] = None, ) -> History: """ Get parameter history for a single ARMI Object. Parameters ---------- comps An individual ArmiObject params parameters to gather Returns ------- dict Dictionary of str/list pairs. """ return self.getHistories([comp], params, timeSteps)[comp] def getHistories( self, comps: Sequence[ArmiObject], params: Optional[Sequence[str]] = None, timeSteps: Optional[Sequence[Tuple[int, int]]] = None, ) -> Histories: """ Get the parameter histories for a sequence of ARMI Objects. This implementation is unaware of the state of the reactor outside of the database itself, and is therefore not usually what client code should be calling directly during normal ARMI operation. It only knows about historical data that have actually been written to the database. Usually one wants to be able to get historical, plus current data, for which the similar method on the DatabaseInterface may be more useful. Parameters ---------- comps Something that is iterable multiple times params parameters to gather. timeSteps Selection of time nodes to get data for. If omitted, return full history Returns ------- dict Dictionary ArmiObject (input): dict of str/list pairs containing ((cycle, node), value). """ histData: Histories = {c: collections.defaultdict(collections.OrderedDict) for c in comps} types = {c.__class__ for c in comps} compsByTypeThenSerialNum: Dict[Type[ArmiObject], Dict[int, ArmiObject]] = {t: dict() for t in types} for c in comps: compsByTypeThenSerialNum[c.__class__][c.p.serialNum] = c for h5TimeNodeGroup in self.genTimeStepGroups(timeSteps): if "layout" not in h5TimeNodeGroup: # Layout hasn't been written for this time step, so whatever is in there didn't come from the # DatabaseInterface. Probably because it's the current time step and something has created the group to # store aux data continue # might save as int or np.int64, so forcing int keeps things predictable cycle = int(h5TimeNodeGroup.attrs["cycle"]) timeNode = int(h5TimeNodeGroup.attrs["timeNode"]) layout = Layout((self.versionMajor, self.versionMinor), h5group=h5TimeNodeGroup) for compType, compsBySerialNum in compsByTypeThenSerialNum.items(): compTypeName = compType.__name__ try: h5GroupForType = h5TimeNodeGroup[compTypeName] except KeyError as ee: runLog.error("{} not found in {} of {}".format(compTypeName, h5TimeNodeGroup, self)) raise ee layoutIndicesForType = np.where(layout.type == compTypeName)[0] serialNumsForType = layout.serialNum[layoutIndicesForType].tolist() layoutIndexInData = layout.indexInData[layoutIndicesForType].tolist() indexInData = [] reorderedComps = [] for ii, sn in zip(layoutIndexInData, serialNumsForType): d = compsBySerialNum.get(sn, None) if d is not None: indexInData.append(ii) reorderedComps.append(d) if not indexInData: continue # note this is very similar to _readParams but there are some important differences. # 1) we are not assigning to p[paramName] # 2) not using linkedDims at all # 3) not performing parameter renaming. This may become necessary for paramName in params or h5GroupForType.keys(): if paramName == "location": locs = [] for id in indexInData: locs.append((layout.location[layoutIndicesForType[id]])) data = np.array(locs) elif paramName in h5GroupForType: dataSet = h5GroupForType[paramName] try: data = dataSet[indexInData] except: runLog.error( "Failed to load index {} from {}@{}".format(indexInData, dataSet, (cycle, timeNode)) ) raise if data.dtype.type is np.bytes_: data = np.char.decode(data) if dataSet.attrs.get("specialFormatting", False): if dataSet.attrs.get("nones", False): data = replaceNonsenseWithNones(data, paramName) else: raise ValueError( "History tracking for non-none special formatting not supported: {}, {}".format( paramName, {k: v for k, v in dataSet.attrs.items()}, ) ) else: # Nothing in the database, so use the default value data = np.repeat( parameters.byNameAndType(paramName, compType).default, len(reorderedComps), ) # iterating of np is not fast.. for c, val in zip(reorderedComps, data.tolist()): if paramName == "location": val = tuple(val) elif isinstance(val, list): val = np.array(val) histData[c][paramName][cycle, timeNode] = val r = comps[0].getAncestor(lambda c: isinstance(c, Reactor)) cycleNode = r.p.cycle, r.p.timeNode for c, paramHistories in histData.items(): for paramName, hist in paramHistories.items(): if cycleNode not in hist: try: hist[cycleNode] = c.p[paramName] except Exception: if paramName == "location": hist[cycleNode] = tuple(c.spatialLocator.indices) return histData @staticmethod def _writeAttrs(obj, group, attrs): """ Handle safely writing attributes to a dataset, handling large data if necessary. This will attempt to store attributes directly onto an HDF5 object if possible, falling back to proper datasets and reference attributes if necessary. This is needed because HDF5 tries to fit attributes into the object header, which has limited space. If an attribute is too large, h5py raises a RuntimeError. In such cases, this will store the attribute data in a proper dataset and place a reference to that dataset in the attribute instead. In practice, this takes ``linkedDims`` attrs from a particular component type (like ``c00n00/Circle/id``) and stores them in new datasets (like ``c00n00/attrs/1_linkedDims``, ``c00n00/attrs/2_linkedDims``) and then sets the object's attrs to links to those datasets. """ for key, value in attrs.items(): try: obj.attrs[key] = value except RuntimeError as err: if "object header message is too large" not in err.args[0]: raise runLog.info(f"Storing attribute `{key}` for `{obj}` into it's own dataset within `{group}/attrs`") if "attrs" not in group: attrGroup = group.create_group("attrs") else: attrGroup = group["attrs"] dataName = str(len(attrGroup)) + "_" + key attrGroup[dataName] = value # using a soft link here allows us to cheaply copy time nodes without needing to crawl through and # update object references. linkName = attrGroup[dataName].name obj.attrs[key] = "@{}".format(linkName) @staticmethod def _resolveAttrs(attrs, group): """ Reverse the action of _writeAttrs. This reads actual attrs and looks for the real data in the datasets that the attrs were pointing to. """ attr_link = re.compile("^@(.*)$") resolved = {} for key, val in attrs.items(): try: if isinstance(val, h5py.h5r.Reference): # Old style object reference. If this cannot be dereferenced, it is likely because mergeHistory was # used to get the current database, which does not preserve references. resolved[key] = group[val] elif isinstance(val, str): m = attr_link.match(val) if m: # dereference the path to get the data out of the dataset. resolved[key] = group[m.group(1)][()] else: resolved[key] = val else: resolved[key] = val except ValueError: runLog.error(f"HDF error loading {key} : {val}\nGroup: {group}") raise return resolved @staticmethod def _applyComponentNumberDensitiesMigration(comps, unpackedData): """ Special migration from <= v0.5.1 component numberDensities parameter data type. old format: dict[str: float] new format: two numpy arrays - nuclides = np.array(dtype="S6") - numberDensities = np.array(dtype=np.float64) """ for c, ndensDict in zip(comps, unpackedData): nuclides = np.array(list(ndensDict.keys()), dtype="S6") numberDensities = np.array(list(ndensDict.values()), dtype=np.float64) c.p.nuclides = nuclides c.p.numberDensities = numberDensities @staticmethod def getCycleNodeAtTime(dbPath, startTime, endTime, errorIfNotExactlyOne=True): """Given the path to an ARMI database file and a start and end time (in years), return the full set of all time nodes that correspond to that time period in the database. Parameters ---------- dbPath : str File path to an ARMI database. startTime : int In years, start of the desired interval. endTime : int In years, end of the desired interval. errorIfNotExactlyOne : boolean Raise an error if more than one cycle/node combination is returned. Default is True. Returns ------- list of strings A list of strings to the desired time interval, e.g.: ["c01n08", "c14n18EOL"] """ # basic sanity checks assert startTime >= 0.0, f"The start time cannot be negative: {startTime}." assert endTime >= startTime, f"The end time ({endTime}) is not greater than the start time ({startTime})." # open the H5 file directly with h5py.File(dbPath, "r") as h5: # read time steps in H5 file thisTime = 0.0 cycleNodes = [] for h5Key in h5.keys(): if h5Key == "inputs": continue thisTime = h5[h5Key]["Reactor"]["time"][0] if thisTime >= endTime: cycleNodes.append(h5Key) break elif thisTime >= startTime: cycleNodes.append(h5Key) # more validation if not cycleNodes: raise ValueError(f"Provided start time ({startTime}) was greater than the modeled period: {thisTime}.") elif errorIfNotExactlyOne and len(cycleNodes) != 1: raise ValueError(f"Did not find exactly one cycle/node pair: {cycleNodes}") return cycleNodes def packSpecialData( arrayData: [np.ndarray, JaggedArray], paramName: str ) -> Tuple[Optional[np.ndarray], Dict[str, Any]]: """ Reduce data that wouldn't otherwise play nicely with HDF5/numpy arrays to a format that will. This is the main entry point for conforming "strange" data into something that will both fit into a numpy array/HDF5 dataset, and be recoverable to its original-ish state when reading it back in. This is accomplished by detecting a handful of known offenders and using various HDF5 attributes to store necessary auxiliary data. It is important to keep in mind that the data that is passed in has already been converted to a numpy array, so the top dimension is always representing the collection of composites that are storing the parameters. For instance, if we are dealing with a Block parameter, the first index in the numpy array of data is the block index; so if each block has a parameter that is a dictionary, ``data`` would be a ndarray, where each element is a dictionary. This routine supports a number of different things: * Dict[str, float]: These are stored by finding the set of all keys for all instances, and storing those keys as a list in an attribute. The data themselves are stored as arrays indexed by object, then key index. Dictionaries lacking data for a key store a nan in it's place. This will work well in instances where most objects have data for most keys. * Jagged arrays: These are stored by concatenating all of the data into a single, one- dimensional array, and storing attributes to describe the shapes of each object's data, and an offset into the beginning of each object's data. * Arrays with ``None`` in them: These are stored by replacing each instance of ``None`` with a magical value that shouldn't be encountered in realistic scenarios. Parameters ---------- arrayData An ndarray or JaggedArray object storing the data that we want to stuff into the database. If the data is jagged, a special JaggedArray instance is passed in, which contains a 1D array with offsets and shapes. paramName The parameter name that we are trying to store. This is mostly used for diagnostics. See Also -------- unpackSpecialData """ if isinstance(arrayData, JaggedArray): data = arrayData.flattenedArray else: # Check to make sure that we even need to do this. If the numpy data type is not "O", # chances are we have nice, clean data. if arrayData.dtype != "O": return arrayData, {} else: data = arrayData attrs: Dict[str, Any] = {"specialFormatting": True} # make a copy of the data, so that the original is unchanged data = copy.copy(data) # Find locations of Nones. nones = np.where([d is None for d in data])[0] if len(nones) == data.shape[0]: # Everything is None, so why bother? return None, attrs if len(nones) > 0: attrs["nones"] = True # Pack different types of data if any(isinstance(d, dict) for d in data): # We're assuming that a dict is {str: float}. attrs["dict"] = True keys = sorted({k for d in data for k in d}) data = np.array([[d.get(k, np.nan) for k in keys] for d in data]) if data.dtype == "O": raise TypeError(f"Unable to coerce dictionary data into usable numpy array for {paramName}") # We store the union of all of the keys for all of the objects as a special "keys" # attribute, and store a value for all of those keys for all objects, whether or not there # is actually data associated with that key attrs["keys"] = np.array(keys).astype("S") return data, attrs elif isinstance(arrayData, JaggedArray): attrs["jagged"] = True attrs["offsets"] = arrayData.offsets attrs["shapes"] = arrayData.shapes attrs["noneLocations"] = arrayData.nones return data, attrs # conform non-numpy arrays to numpy for i, val in enumerate(data): if isinstance(val, (list, tuple)): data[i] = np.array(val) if not any(isinstance(d, np.ndarray) for d in data): # looks like 1-D plain-old-data data = replaceNonesWithNonsense(data, paramName, nones) return data, attrs elif any(isinstance(d, (tuple, list, np.ndarray)) for d in data): data = replaceNonesWithNonsense(data, paramName, nones) return data, attrs if len(nones) == 0: raise TypeError(f"Cannot write {paramName} to the database, it did not resolve to a numpy/HDF5 type.") runLog.error(f"Data unable to find special none value: {data}") raise TypeError(f"Failed to process special data for {paramName}") def unpackSpecialData(data: np.ndarray, attrs, paramName: str) -> np.ndarray: """ Extract data from a specially-formatted HDF5 dataset into a numpy array. This should invert the operations performed by :py:func:`packSpecialData`. Parameters ---------- data Specially-formatted data array straight from the database. attrs The attributes associated with the dataset that contained the data. paramName The name of the parameter that is being unpacked. Only used for diagnostics. Returns ------- np.ndarray An ndarray containing the closest possible representation of the data that was originally written to the database. See Also -------- packSpecialData """ if not attrs.get("specialFormatting", False): # The data were not subjected to any special formatting; short circuit. assert data.dtype != "O" return data unpackedData: List[Any] if attrs.get("nones", False) and not attrs.get("jagged", False): data = replaceNonsenseWithNones(data, paramName) return data if attrs.get("jagged", False): offsets = attrs["offsets"] shapes = attrs["shapes"] nones = attrs["noneLocations"] data = JaggedArray.fromH5(data, offsets, shapes, nones, data.dtype, paramName) return data if attrs.get("dict", False): keys = np.char.decode(attrs["keys"]) unpackedData = [] assert data.ndim == 2 for d in data: unpackedData.append({key: value for key, value in zip(keys, d) if not np.isnan(value)}) return np.array(unpackedData) raise ValueError( "Do not recognize the type of special formatting that was applied to {}. Attrs: {}".format( paramName, {k: v for k, v in attrs.items()} ) ) def collectBlockNumberDensities(blocks) -> Dict[str, np.ndarray]: """ Collect block-by-block homogenized number densities for each nuclide. Homogenize the component-level to the block level. These are written to the database and useful for visualization. """ # find the NuclidesBases object on the Reactor nuclideBases = None for b in blocks: if b.nuclideBases is not None: nuclideBases = b.nuclideBases break if not nuclideBases: return {} nucNames = sorted(list(set(nucName for b in blocks for nucName in b.getNuclides()))) nucBases = [nuclideBases.byName[nn] for nn in nucNames] # It's faster to loop over blocks first and get all number densities from each than it is to get one nuclide at a # time from each block because of area fraction calculations. So we use some RAM here instead. nucDensityMatrix = [] for block in blocks: nucDensityMatrix.append(block.getNuclideNumberDensities(nucNames)) nucDensityMatrix = np.array(nucDensityMatrix) dataDict = dict() for ni, nb in enumerate(nucBases): # the nth column is a vector of nuclide densities for this nuclide across all blocks dataDict[nb.getDatabaseName()] = nucDensityMatrix[:, ni] return dataDict ================================================ FILE: armi/bookkeeping/db/databaseInterface.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The database interface provides a way to save the reactor state to a file, throughout a simulation. """ import copy import os import pathlib import time from typing import ( MutableSequence, Optional, Sequence, Tuple, ) from armi import context, interfaces, runLog from armi.bookkeeping.db.database import Database, getH5GroupName from armi.bookkeeping.db.typedefs import Histories, History from armi.reactor.composites import ArmiObject from armi.reactor.parameters import parameterDefinitions from armi.settings.fwSettings.databaseSettings import ( CONF_FORCE_DB_PARAMS, CONF_SYNC_AFTER_WRITE, ) from armi.utils import getPreviousTimeNode, getStepLengths ORDER = interfaces.STACK_ORDER.BOOKKEEPING def describeInterfaces(cs): """Function for exposing interface(s) to other code.""" return (DatabaseInterface, {"enabled": cs["db"]}) class DatabaseInterface(interfaces.Interface): """ Handles interactions between the ARMI data model and the persistent data storage system. This reads/writes the ARMI state to/from the database and helps derive state information that can be derived. """ name = "database" def __init__(self, r, cs): interfaces.Interface.__init__(self, r, cs) self._db = None self._dbPath: Optional[pathlib.Path] = None if cs[CONF_FORCE_DB_PARAMS]: toSet = {paramName: set() for paramName in cs[CONF_FORCE_DB_PARAMS]} for (name, _), pDef in parameterDefinitions.ALL_DEFINITIONS.items(): if name in toSet.keys(): toSet[name].add(pDef) for name, pDefs in toSet.items(): runLog.info("Forcing parameter {} to be written to the database, per user input".format(name)) for pDef in pDefs: pDef.saveToDB = True def __repr__(self): return "<{} '{}' {} >".format(self.__class__.__name__, self.name, repr(self._db)) @property def database(self): """Presents the internal database object, if it exists.""" if self._db is not None: return self._db else: raise RuntimeError( "The Database interface has not yet created a database " "object. InteractBOL or loadState must be called first." ) def interactBOL(self): """Initialize the database if the main interface was not available. (Beginning of Life).""" if not self._db: self.initDB() def initDB(self, fName: Optional[os.PathLike] = None): """ Open the underlying database to be written to, and write input files to DB. Notes ----- Main Interface calls this so that the database is available as early as possible in the run. The database interface interacts near the end of the interface stack (so that all the parameters have been updated) while the Main Interface interacts first. """ if fName is None: self._dbPath = pathlib.Path(self.cs.caseTitle + ".h5") else: self._dbPath = pathlib.Path(fName) if self.cs["reloadDBName"].lower() == str(self._dbPath).lower(): raise ValueError( "It appears that reloadDBName is the same as the case title. " "This could lead to data loss! Rename the reload DB or the case." ) self._db = Database(self._dbPath, "w") self._db.open() self._db.writeInputsToDB(self.cs) def interactEveryNode(self, cycle, node): """ Write to database. DBs should receive the state information of the run at each node. Notes ----- - If tight coupling is enabled, the DB will be written in ``Operator::_timeNodeLoop`` via writeDBEveryNode. """ if self.o.cs["tightCoupling"]: # h5 can't handle overwriting so we skip here and write once the tight coupling loop has completed return self.writeDBEveryNode() def writeDBEveryNode(self): """Write the database at the end of the time node.""" self.r.core.p.minutesSinceStart = (time.time() - self.r.core.timeOfStart) / 60.0 self._db.writeToDB(self.r) if self.cs[CONF_SYNC_AFTER_WRITE]: self._db.syncToSharedFolder() def interactEOC(self, cycle=None): """ Do not write; this state doesn't tend to be important since its decay only step. Notes ----- The same time is available at start of next cycle. """ return def interactEOL(self): """DB's should be closed at run's end. (End of Life).""" # minutesSinceStarts should include as much of the ARMI run as possible so EOL is necessary, too. self.r.core.p.minutesSinceStart = (time.time() - self.r.core.timeOfStart) / 60.0 self._db.writeToDB(self.r, "EOL") self.closeDB() def closeDB(self): """Close the DB, writing to file.""" self._db.close(True) def interactError(self): """Get shutdown state information even if the run encounters an error.""" try: self.r.core.p.minutesSinceStart = (time.time() - self.r.core.timeOfStart) / 60.0 # this can result in a double-error if the error occurred in the database # writing self._db.writeToDB(self.r, "error") self._db.close(False) except Exception: # we're already responding to an error pass def interactDistributeState(self) -> None: """ Reconnect to pre-existing database. DB is created and managed by the primary node only but we can still connect to it from workers to enable things like history tracking. """ if context.MPI_RANK > 0: # DB may not exist if distribute state is called early. if self._dbPath is not None and os.path.exists(self._dbPath): self._db = Database(self._dbPath, "r") self._db.open() def distributable(self): return self.Distribute.SKIP def prepRestartRun(self): """ Load the data history from the database requested in the case setting `reloadDBName`. Reactor state is put at the cycle/node requested in the case settings `startCycle` and `startNode`, having loaded the state from all cycles prior to that in the requested database. .. impl:: Runs at a particular timenode can be re-instantiated for a snapshot. :id: I_ARMI_SNAPSHOT_RESTART :implements: R_ARMI_SNAPSHOT_RESTART This method loads the state of a reactor from a particular point in time from a standard ARMI :py:class:`Database <armi.bookkeeping.db.database.Database>`. This is a major use-case for having ARMI databases in the first case. And restarting from such a database is easy, you just need to set a few settings:: * reloadDBName - Path to existing H5 file to reload from. * startCycle - Operational cycle to restart from. * startNode - Time node to start from. Notes ----- Mixing the use of simple vs detailed cycles settings is allowed, provided that the cycle histories prior to `startCycle`/`startNode` are equivalent. ARMI expects the reload DB to have been made in the same version of ARMI as you are running. ARMI does not guarantee that a DB from a decade ago will be easily used to restart a run. """ reloadDBName = self.cs["reloadDBName"] runLog.info(f"Merging database history from {reloadDBName} for restart analysis.") startCycle = self.cs["startCycle"] startNode = self.cs["startNode"] with Database(reloadDBName, "r") as inputDB: loadDbCs = inputDB.loadCS() # pull the history up to the cycle/node prior to `startCycle`/`startNode` dbCycle, dbNode = getPreviousTimeNode( startCycle, startNode, self.cs, ) self._checkThatCyclesHistoriesAreEquivalentUpToRestartTime(loadDbCs, dbCycle, dbNode) self._db.mergeHistory(inputDB, startCycle, startNode) self.loadState(dbCycle, dbNode) def _checkThatCyclesHistoriesAreEquivalentUpToRestartTime(self, loadDbCs, dbCycle, dbNode): """Check that cycle histories are equivalent up to this point.""" dbStepLengths = getStepLengths(loadDbCs) currentCaseStepLengths = getStepLengths(self.cs) dbStepHistory = [] currentCaseStepHistory = [] try: for cycleIdx in range(dbCycle + 1): if cycleIdx == dbCycle: # truncate it at dbNode dbStepHistory.append(dbStepLengths[cycleIdx][:dbNode]) currentCaseStepHistory.append(currentCaseStepLengths[cycleIdx][:dbNode]) else: dbStepHistory.append(dbStepLengths[cycleIdx]) currentCaseStepHistory.append(currentCaseStepLengths[cycleIdx]) except IndexError: runLog.error(f"DB cannot be loaded to this time: cycle={dbCycle}, node={dbNode}") raise if dbStepHistory != currentCaseStepHistory: raise ValueError("The cycle history up to the restart cycle/node must be equivalent.") def _getLoadDB(self, fileName): """ Return the database to load from in order of preference. Notes ----- If filename is present only returns one database since specifically instructed to load from that database. """ if fileName is not None: # only yield 1 database if the file name is specified if self._db is not None and fileName == self._db._fileName: yield self._db elif os.path.exists(fileName): yield Database(fileName, "r") else: if self._db is not None: yield self._db if os.path.exists(self.cs["reloadDBName"]): yield Database(self.cs["reloadDBName"], "r") def loadState(self, cycle, timeNode, timeStepName="", fileName=None): """ Loads a fresh reactor and applies it to the Operator. Notes ----- Will load preferentially from the ``fileName`` if passed. Otherwise will load from existing database in memory or ``cs["reloadDBName"]`` in that order. Raises ------ RuntimeError If fileName is specified and that file does not have the time step. If fileName is not specified and neither the database in memory, nor the ``cs["reloadDBName"]`` have the time step specified. """ for potentialDatabase in self._getLoadDB(fileName): with potentialDatabase as loadDB: if loadDB.hasTimeStep(cycle, timeNode, statePointName=timeStepName): newR = loadDB.load( cycle, timeNode, statePointName=timeStepName, cs=self.cs, allowMissing=True, ) self.o.reattach(newR, self.cs) break else: # reactor was never set so fail if fileName: raise RuntimeError( "Cannot load state from specified file {} @ {}".format( fileName, getH5GroupName(cycle, timeNode, timeStepName) ) ) raise RuntimeError( "Cannot load state from <unspecified file> @ {}".format(getH5GroupName(cycle, timeNode, timeStepName)) ) def getHistory( self, comp: ArmiObject, params: Optional[Sequence[str]] = None, timeSteps: Optional[MutableSequence[Tuple[int, int]]] = None, byLocation: bool = False, ) -> History: """ Get historical parameter values for a single object. This is mostly a wrapper around the same function on the ``Database`` class, but knows how to return the current value as well. See Also -------- Database.getHistory """ # make a copy so that we can potentially remove timesteps without affecting the caller timeSteps = copy.copy(timeSteps) now = (self.r.p.cycle, self.r.p.timeNode) nowRequested = timeSteps is None if timeSteps is not None and now in timeSteps: nowRequested = True timeSteps.remove(now) if byLocation: history = self.database.getHistoryByLocation(comp, params, timeSteps) else: history = self.database.getHistory(comp, params, timeSteps) if nowRequested: for param in params or history.keys(): if param == "location": # might save as int or np.int64, so forcing int keeps things predictable history[param][now] = tuple(int(i) for i in comp.spatialLocator.indices) else: history[param][now] = comp.p[param] return history def getHistories( self, comps: Sequence[ArmiObject], params: Optional[Sequence[str]] = None, timeSteps: Optional[MutableSequence[Tuple[int, int]]] = None, byLocation: bool = False, ) -> Histories: """ Get historical parameter values for one or more objects. This is mostly a wrapper around the same function on the ``Database`` class, but knows how to return the current value as well. See Also -------- Database.getHistories """ now = (self.r.p.cycle, self.r.p.timeNode) nowRequested = timeSteps is None if timeSteps is not None: # make a copy so that we can potentially remove timesteps without affecting # the caller timeSteps = copy.copy(timeSteps) if timeSteps is not None and now in timeSteps: nowRequested = True timeSteps.remove(now) if byLocation: histories = self.database.getHistoriesByLocation(comps, params, timeSteps) else: histories = self.database.getHistories(comps, params, timeSteps) if nowRequested: for c in comps: for param in params or histories[c].keys(): if param == "location": histories[c][param][now] = tuple(int(i) for i in c.spatialLocator.indices) else: histories[c][param][now] = c.p[param] return histories ================================================ FILE: armi/bookkeeping/db/factory.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from typing import Optional import h5py from armi.bookkeeping.db import permissions from armi.bookkeeping.db.database import Database def databaseFactory(dbName: str, permission: str, version: Optional[str] = None): """ Return an appropriate object for interacting with a database file. Parameters ---------- dbName: str Path to db file, e.g. `baseCase.h5` permission: str String defining permission, `r` for read only. See armi.bookkeeping.db.permissions version: str, optional Version of database you want to read or write. In most cases ARMI will auto-detect. For advanced users. Notes ----- This is not a proper factory, as the different database versions do not present a common interface. However, this is useful code, since it at least creates an object based on some knowledge of how to probe around. This allows client code to just interrogate the type of the returned object to figure out to do based on whatever it needs. """ dbPath = pathlib.Path(dbName) # if it's not an hdf5 file, we dont even know where to start... if dbPath.suffix != ".h5": raise RuntimeError("Unknown database format for {}".format(dbName)) if permission in permissions.Permissions.read: if version is not None: raise ValueError("Cannot specify version when reading a database.") if not dbPath.exists() or not dbPath.is_file(): raise ValueError("Database file `{}` does not appear to be a file.".format(dbName)) # probe for the database version. We started adding these with "database 3", so if # databaseVersion is not present, assume it's the "old" version version = "2" tempDb = h5py.File(dbPath, "r") if "databaseVersion" in tempDb.attrs: version = tempDb.attrs["databaseVersion"] del tempDb majorversion = version.split(".")[0] if version else "2" if majorversion == "2": raise ValueError( 'Database version 2 ("XTView database") is no longer ' "supported. To migrate to a newer version, use version 0.1.5." ) if majorversion == "3": return Database(dbPath, permission) raise ValueError("Unable to determine Database version for {}".format(dbName)) elif permission in permissions.Permissions.write: majorversion = version.split(".")[0] if version else "3" if majorversion == "2": raise ValueError( 'Database version 2 ("XTView database") is no longer ' "supported. To migrate to a newer version, use version 0.1.5 to migrate." ) if majorversion == "3": return Database(dbPath, permission) return None ================================================ FILE: armi/bookkeeping/db/jaggedArray.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tooling to help flatten jagged (non rectangular) data into rectangular arrays. The goal here is to support jagged data for NumPy arrays to be written into the ARMI databases. """ from typing import List, Optional import numpy as np from armi import runLog class JaggedArray: """ Take a list of numpy arrays or lists and flatten them into a single 1D array. This implementation can preserve the structure of a multi-dimensional numpy array by storing the dimensions in self.shapes and then re-populating a numpy array of that shape from the flattened 1D array. However, it can only preserve one layer of jaggedness in a list of lists (or other iterables). For example, a list of tuples with varying lengths can be flattened and reconstituted exactly. But, if a list of lists of tuples is passed in, the tuples in that final layer of nesting will all be flattened to a single 1D numpy array after a round trip. No structure is retained from nested lists of jagged lists or tuples. """ def __init__(self, jaggedData, paramName): """ JaggedArray constructor. Parameters ---------- jaggedData: list of np.ndarray A list of numpy arrays (or lists or tuples) to be flattened into a single array paramName: str The name of the parameter represented by this data """ offset = 0 flattenedArray = [] offsets = [] shapes = [] nones = [] for i, arr in enumerate(jaggedData): if isinstance(arr, (np.ndarray, list, tuple)): if len(arr) == 0: nones.append(i) else: offsets.append(offset) try: numpyArray = np.array(arr) shapes.append(numpyArray.shape) offset += numpyArray.size flattenedArray.extend(numpyArray.flatten()) except: # noqa: E722 # numpy might fail if it's jagged flattenedList = self.flatten(arr) shapes.append( len(flattenedList), ) offset += len(flattenedList) flattenedArray.extend(flattenedList) elif isinstance(arr, (int, float)): offsets.append(offset) shapes.append((1,)) offset += 1 flattenedArray.append(arr) elif arr is None: nones.append(i) self.flattenedArray = np.array(flattenedArray) self.offsets = np.array(offsets) try: self.shapes = np.array(shapes) except ValueError as ee: runLog.error( "Error! It seems like ARMI may have tried to flatten a jagged array " "where the elements have different numbers of dimensions. `shapes` " "attribute of the JaggedArray for {} cannot be made into a numpy " "array; it might be jagged.".format(paramName) ) runLog.error(shapes) raise ValueError(ee) self.nones = np.array(nones) self.dtype = self.flattenedArray.dtype self.paramName = paramName def __iter__(self): """Iterate over the unpacked list.""" return iter(self.unpack()) def __contains__(self, other): return other in self.flattenedArray @staticmethod def flatten(x): """ Recursively flatten an iterable (list, tuple, or numpy.ndarray). x : list, tuple, np.ndarray An iterable. Can be a nested iterable in which the elements themselves are also iterable. """ if isinstance(x, (list, tuple, np.ndarray)): if len(x) == 0: return [] first, rest = x[0], x[1:] return JaggedArray.flatten(first) + JaggedArray.flatten(rest) else: return [x] @classmethod def fromH5(cls, data, offsets, shapes, nones, dtype, paramName): """ Create a JaggedArray instance from an HDF5 dataset. The JaggedArray is stored in HDF5 as a flat 1D array with accompanying attributes of "offsets" and "shapes" to define how to reconstitute the original data. Parameters ---------- data: np.ndarray A flattened 1D numpy array read in from an HDF5 file offsets: np.ndarray Offset indices for the zeroth element of each constituent array shapes: np.ndarray The shape of each constituent array nones: np.ndarray The location of Nones dtype: np.dtype The data type for the array paramName: str The name of the parameter represented by this data Returns ------- obj: JaggedArray An instance of JaggedArray populated with the input data """ obj = cls([], paramName) obj.flattenedArray = np.array(data) obj.offsets = np.array(offsets) obj.shapes = np.array(shapes) obj.nones = np.array(nones) obj.dtype = dtype obj.paramName = paramName return obj def tolist(self): """Alias for unpack() to make this class respond like a np.ndarray.""" return self.unpack() def unpack(self): """ Unpack a JaggedArray object into a list of arrays. Returns ------- unpackedJaggedData: list of np.ndarray List of numpy arrays with varying dimensions (i.e., jagged arrays) """ unpackedJaggedData: List[Optional[np.ndarray]] = [] shapeIndices = [i for i, x in enumerate(self.shapes) if sum(x) != 0] numElements = len(shapeIndices) + len(self.nones) j = 0 # non-None element counter for i in range(numElements): if i in self.nones: unpackedJaggedData.append(None) else: k = shapeIndices[j] unpackedJaggedData.append( np.ndarray( self.shapes[k], dtype=self.dtype, buffer=self.flattenedArray[self.offsets[k] :], ) ) j += 1 return unpackedJaggedData ================================================ FILE: armi/bookkeeping/db/layout.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Groundwork for ARMI Database, version 3.4. When interacting with the database file, the :py:class:`Layout` class is used to help map the hierarchical Composite Reactor Model to the flat representation in :py:class:`Database <armi.bookkeeping.db.database.Database>`. This module also stores packing/packing tools to support :py:class:`Database <armi.bookkeeping.db.database.Database>`, as well as database versioning information. """ import collections from typing import ( Any, Dict, List, Optional, Tuple, Type, ) import numpy as np from armi import runLog from armi.reactor import grids from armi.reactor.components import Component from armi.reactor.composites import ArmiObject from armi.reactor.excoreStructure import ExcoreStructure from armi.reactor.reactors import Core, Reactor # Here we store the Database version information. DB_MAJOR = 3 DB_MINOR = 4 DB_VERSION = f"{DB_MAJOR}.{DB_MINOR}" # CONSTANTS USED TO PACK AND UNPACK DATA LOC_NONE = "N" LOC_COORD = "C" LOC_INDEX = "I" LOC_MULTI = "M:" LOCATION_TYPE_LABELS = { type(None): LOC_NONE, grids.CoordinateLocation: LOC_COORD, grids.IndexLocation: LOC_INDEX, grids.MultiIndexLocation: LOC_MULTI, } # NOTE: Here we assume no one assigns min(int)+2 as a meaningful value NONE_MAP = {float: float("nan"), str: "<!None!>"} NONE_MAP.update( { intType: np.iinfo(intType).min + 2 for intType in ( int, np.int8, np.int16, np.int32, np.int64, ) } ) NONE_MAP.update( { intType: np.iinfo(intType).max - 2 for intType in ( np.uint, np.uint8, np.uint16, np.uint32, np.uint64, ) } ) NONE_MAP.update({floatType: floatType("nan") for floatType in (float, np.float64)}) class Layout: """ The Layout class describes the hierarchical layout of the Composite Reactor model in a flat representation for :py:class:`Database <armi.bookkeeping.db.database.Database>`. A Layout is built by starting at the root of a composite tree and recursively appending each node in the tree to a list of data. So the data will be ordered by depth-first search: [r, c, a1, a1b1, a1b1c1, a1b1c2, a1b2, a1b2c1, ..., a2, ...]. The layout is also responsible for storing Component attributes, like location, material, and temperatures, which aren't stored as Parameters. Temperatures, specifically, are rather complicated in ARMI. Notes ----- * Elements in Layout are stored in depth-first order. This permits use of algorithms such as Pre-Order Tree Traversal for efficient traversal of regions of the model. * ``indexInData`` increases monotonically within each object ``type``. For example, the data for all ``HexBlock`` children of a given parent are stored contiguously within the ``HexBlock`` group, and will not be interleaved with data from the ``HexBlock`` children of any of the parent's siblings. * Aside from the hierarchy, there is no guarantee what order objects are stored in the layout. The ``Core`` is not necessarily the first child of the ``Reactor``, and is not guaranteed to use the zeroth grid. """ def __init__(self, version: Tuple[int, int], h5group=None, comp=None): self.type: List[str] = [] self.name: List[str] = [] self.serialNum: List[int] = [] # The index into the parameter datasets corresponding to each object's class. # E.g., the 5th HexBlock object in the tree would get 5; to look up its # "someParameter" value, you would extract cXXnYY/HexBlock/someParameter[5]. self.indexInData: List[int] = [] # The number of direct children this object has. self.numChildren: List[int] = [] # The type of location that specifies the object's physical location; see the # associated pack/unpackLocation functions for more information about how # locations are handled. self.locationType: List[str] = [] # There is a minor asymmetry here in that before writing to the DB, this is # truly a flat list of tuples. However when reading, this may contain lists of # tuples, which represent MI locations. This comes from the fact that we map the # tuples to Location objects in Database._compose, but map from Locations to # tuples in Layout._createLayout. Ideally we would handle both directions in the # same place so this can be less surprising. Resolving this would require # changing the interface of the various pack/unpack functions, which have # multiple versions, so the update would need to be done with care. self.location: List[Tuple[int, int, int]] = [] # Which grid, as stored in the database, this object uses to arrange its # children self.gridIndex: List[int] = [] self.temperatures: List[float] = [] self.material: List[str] = [] # Used to cache all of the spatial locators so that we can pack them all at # once. The benefit here is that the version checking can happen up front and # less branching down below self._spatialLocators: List[grids.LocationBase] = [] # set of grid parameters that have been seen in _createLayout. For efficient # checks for uniqueness self._seenGridParams: Dict[Any, Any] = dict() # actual list of grid parameters, with stable order for safe indexing self.gridParams: List[Any] = [] self.version = version self.groupedComps: Dict[Type[ArmiObject], List[ArmiObject]] = collections.defaultdict(list) # it should be noted, one of the two inputs must be non-None: comp/h5group if comp is not None: self._createLayout(comp) self.locationType, self.location = _packLocations(self._spatialLocators) else: self._readLayout(h5group) self._snToLayoutIndex = {sn: i for i, sn in enumerate(self.serialNum)} # find all subclasses of Grid self.gridClasses = {c.__name__: c for c in Layout.allSubclasses(grids.Grid)} self.gridClasses["Grid"] = grids.Grid def __getitem__(self, sn): layoutIndex = self._snToLayoutIndex[sn] return ( self.type[layoutIndex], self.name[layoutIndex], self.serialNum[layoutIndex], self.indexInData[layoutIndex], self.numChildren[layoutIndex], self.locationType[layoutIndex], self.location[layoutIndex], self.temperatures[layoutIndex], self.material[layoutIndex], ) def _createLayout(self, comp): """ Populate a hierarchical representation and group the reactor model items by type. This is used when writing a reactor model to the database. Notes ----- This is recursive. See Also -------- _readLayout : does the opposite """ compList = self.groupedComps[type(comp)] compList.append(comp) self.type.append(comp.__class__.__name__) self.name.append(comp.name) self.serialNum.append(comp.p.serialNum) self.indexInData.append(len(compList) - 1) self.numChildren.append(len(comp)) # determine how many components have been read in, to set the grid index if comp.spatialGrid is not None: gridType = type(comp.spatialGrid).__name__ gridParams = (gridType, comp.spatialGrid.reduce()) if gridParams not in self._seenGridParams: self._seenGridParams[gridParams] = len(self.gridParams) self.gridParams.append(gridParams) self.gridIndex.append(self._seenGridParams[gridParams]) else: self.gridIndex.append(None) self._spatialLocators.append(comp.spatialLocator) # set the materials and temperatures try: self.temperatures.append((comp.inputTemperatureInC, comp.temperatureInC)) self.material.append(comp.material.__class__.__name__) except Exception: self.temperatures.append((-900, -900)) # an impossible temperature self.material.append("") try: comps = sorted(list(comp)) except ValueError: runLog.error( "Failed to sort some collection of ArmiObjects for database output: {} value {}".format( type(comp), list(comp) ) ) raise # depth-first search recursion of all components for c in comps: self._createLayout(c) def _readLayout(self, h5group): """ Populate a hierarchical representation and group the reactor model items by type. This is used when reading a reactor model from a database. See Also -------- _createLayout : does the opposite """ try: # location is either an index, or a point # iter over list is faster locations = h5group["layout/location"][:].tolist() self.locationType = np.char.decode(h5group["layout/locationType"][:]).tolist() self.location = _unpackLocations(self.locationType, locations, self.version[1]) self.type = np.char.decode(h5group["layout/type"][:]) self.name = np.char.decode(h5group["layout/name"][:]) self.serialNum = h5group["layout/serialNum"][:] self.indexInData = h5group["layout/indexInData"][:] self.numChildren = h5group["layout/numChildren"][:] self.material = np.char.decode(h5group["layout/material"][:]) self.temperatures = h5group["layout/temperatures"][:] self.gridIndex = replaceNonsenseWithNones(h5group["layout/gridIndex"][:], "layout/gridIndex") gridGroup = h5group["layout/grids"] gridTypes = [t.decode() for t in gridGroup["type"][:]] self.gridParams = [] for iGrid, gridType in enumerate(gridTypes): thisGroup = gridGroup[str(iGrid)] unitSteps = thisGroup["unitSteps"][:] bounds = [] for ibound in range(3): boundName = "bounds_{}".format(ibound) if boundName in thisGroup: bounds.append(thisGroup[boundName][:]) else: bounds.append(None) unitStepLimits = thisGroup["unitStepLimits"][:] offset = thisGroup["offset"][:] if thisGroup.attrs["offset"] else None geomType = thisGroup["geomType"].asstr()[()] if "geomType" in thisGroup else None symmetry = thisGroup["symmetry"].asstr()[()] if "symmetry" in thisGroup else None self.gridParams.append( ( gridType, grids.GridParameters( unitSteps, bounds, unitStepLimits, offset, geomType, symmetry, ), ) ) except KeyError as e: runLog.error("Failed to get layout information from group: {}".format(h5group.name)) raise e def _initComps(self, caseTitle, bp): comps = [] groupedComps = collections.defaultdict(list) for ( compType, name, serialNum, numChildren, location, locationType, material, temperatures, gridIndex, ) in zip( self.type, self.name, self.serialNum, self.numChildren, self.location, self.locationType, self.material, self.temperatures, self.gridIndex, ): Klass = ArmiObject.TYPES[compType] if issubclass(Klass, Reactor): comp = Klass(caseTitle, bp) elif issubclass(Klass, Core): comp = Klass(name) elif issubclass(Klass, ExcoreStructure): comp = Klass(name) elif issubclass(Klass, Component): # init all dimensions to 0, they will be loaded and assigned after load kwargs = dict.fromkeys(Klass.DIMENSION_NAMES, 0) kwargs["modArea"] = None kwargs["material"] = material kwargs["name"] = name kwargs["Tinput"] = temperatures[0] kwargs["Thot"] = temperatures[1] comp = Klass(**kwargs) else: comp = Klass(name) if gridIndex is not None: gridParams = self.gridParams[gridIndex] comp.spatialGrid = self.gridClasses[gridParams[0]](*gridParams[1], armiObject=comp) comps.append((comp, serialNum, numChildren, location, locationType)) groupedComps[compType].append(comp) return comps, groupedComps def writeToDB(self, h5group): """Write a chunk of data to the database. .. impl:: Write data to the DB for a given time step. :id: I_ARMI_DB_TIME0 :implements: R_ARMI_DB_TIME This method writes a snapshot of the current state of the reactor to the database. It takes a pointer to an existing HDF5 file as input, and it writes the reactor data model to the file in depth-first search order. Other than this search order, there are no guarantees as to what order the objects are written to the file. Though, this turns out to still be very powerful. For instance, the data for all ``HexBlock`` children of a given parent are stored contiguously within the ``HexBlock`` group, and will not be interleaved with data from the ``HexBlock`` children of any of the parent's siblings. """ if "layout/type" in h5group: # It looks like we have already written the layout to DB, skip for now return try: h5group.create_dataset( "layout/type", data=np.array(self.type).astype("S"), compression="gzip", ) h5group.create_dataset( "layout/name", data=np.array(self.name).astype("S"), compression="gzip", ) h5group.create_dataset("layout/serialNum", data=self.serialNum, compression="gzip") h5group.create_dataset("layout/indexInData", data=self.indexInData, compression="gzip") h5group.create_dataset( "layout/numChildren", data=self.numChildren, compression="gzip", track_order=True, ) h5group.create_dataset( "layout/location", data=self.location, compression="gzip", track_order=True, ) h5group.create_dataset( "layout/locationType", data=np.array(self.locationType).astype("S"), compression="gzip", track_order=True, ) h5group.create_dataset( "layout/material", data=np.array(self.material).astype("S"), compression="gzip", track_order=True, ) h5group.create_dataset( "layout/temperatures", data=self.temperatures, compression="gzip", track_order=True, ) h5group.create_dataset( "layout/gridIndex", data=replaceNonesWithNonsense(np.array(self.gridIndex), "layout/gridIndex"), compression="gzip", ) gridsGroup = h5group.create_group("layout/grids", track_order=True) gridsGroup.attrs["nGrids"] = len(self.gridParams) gridsGroup.create_dataset( "type", data=np.array([gp[0] for gp in self.gridParams]).astype("S"), track_order=True, ) for igrid, gridParams in enumerate(gp[1] for gp in self.gridParams): thisGroup = gridsGroup.create_group(str(igrid), track_order=True) thisGroup.create_dataset("unitSteps", data=gridParams.unitSteps, track_order=True) for ibound, bound in enumerate(gridParams.bounds): if bound is not None: bound = np.array(bound) thisGroup.create_dataset("bounds_{}".format(ibound), data=bound, track_order=True) thisGroup.create_dataset("unitStepLimits", data=gridParams.unitStepLimits, track_order=True) offset = gridParams.offset thisGroup.attrs["offset"] = offset is not None if offset is not None: thisGroup.create_dataset("offset", data=offset, track_order=True) thisGroup.create_dataset("geomType", data=gridParams.geomType, track_order=True) thisGroup.create_dataset("symmetry", data=gridParams.symmetry, track_order=True) except RuntimeError: runLog.error("Failed to create datasets in: {}".format(h5group)) raise @staticmethod def computeAncestors(serialNum, numChildren, depth=1) -> List[Optional[int]]: """ Return a list containing the serial number of the parent corresponding to each object at the given depth. Depth in this case means how many layers to reach up to find the desired ancestor. A depth of 1 will yield the direct parent of each element, depth of 2 would yield the elemen's parent's parent, and so on. The zero-th element will always be None, as the first object is the root element and so has no parent. Subsequent depths will result in more Nones. This function is useful for forming a lightweight sense of how the database contents stitch together, without having to go to the trouble of fully unpacking the Reactor model. Parameters ---------- serialNum : List of int List of serial numbers for each object/element, as laid out in Layout numChildren : List of int List of numbers of children for each object/element, as laid out in Layout Note ---- This is not using a recursive approach for a couple of reasons. First, the iterative form isn't so bad; we just need two stacks. Second, the interface of the recursive function would be pretty unwieldy. We are progressively consuming two lists, of which we would need to keep passing down with an index/cursor, or progressively slice them as we go, which would be pretty inefficient. """ ancestors: List[Optional[int]] = [None] snStack = [serialNum[0]] ncStack = [numChildren[0]] for sn, nc in zip(serialNum[1:], numChildren[1:]): ncStack[-1] -= 1 if nc > 0: ancestors.append(snStack[-1]) snStack.append(sn) ncStack.append(nc) else: ancestors.append(snStack[-1]) while ncStack and ncStack[-1] == 0: snStack.pop() ncStack.pop() if depth > 1: # handle deeper scenarios. This is a bit tricky. Store the original # ancestors for the first generation, since that ultimately contains all of # the information that we need. Then in a loop, keep hopping one more layer # of indirection, and indexing into the corresponding location in the # original ancestor array indexMap = {sn: i for i, sn in enumerate(serialNum)} origAncestors = ancestors for _ in range(depth - 1): ancestors = [origAncestors[indexMap[ia]] if ia is not None else None for ia in ancestors] return ancestors @staticmethod def allSubclasses(cls) -> set: """Find all subclasses of the given class, in any namespace.""" return set(cls.__subclasses__()).union([s for c in cls.__subclasses__() for s in Layout.allSubclasses(c)]) def _packLocations( locations: List[grids.LocationBase], minorVersion: int = DB_MINOR ) -> Tuple[List[str], List[Tuple[int, int, int]]]: """ Extract information from a location needed to write it to this DB. Each locator has one locationType and up to N location-defining datums, where N is the number of entries in a possible multiindex, or just 1 for everything else. Shrink grid locator names for storage efficiency. Notes ----- Contains some conditionals to still load databases made before db version 3.3 which can be removed once no users care about those DBs anymore. """ if minorVersion <= 2: locationTypes, locationData = _packLocationsV1(locations) elif minorVersion == 3: locationTypes, locationData = _packLocationsV2(locations) elif minorVersion > 3: locationTypes, locationData = _packLocationsV3(locations) else: raise ValueError("Unsupported minor version: {}".format(minorVersion)) return locationTypes, locationData def _packLocationsV1( locations: List[grids.LocationBase], ) -> Tuple[List[str], List[Tuple[int, int, int]]]: """Delete when reading v <=3.2 DB's no longer wanted.""" locTypes = [] locData: List[Tuple[int, int, int]] = [] for loc in locations: locationType = loc.__class__.__name__ if loc is None: locationType = "None" locDatum = [(0.0, 0.0, 0.0)] elif isinstance(loc, grids.IndexLocation): locDatum = [loc.indices] else: raise ValueError(f"Invalid location type: {loc}") locTypes.append(locationType) locData.extend(locDatum) return locTypes, locData def _packLocationsV2( locations: List[grids.LocationBase], ) -> Tuple[List[str], List[Tuple[int, int, int]]]: """Location packing implementation for minor version 3. See module docstring above.""" locTypes = [] locData: List[Tuple[int, int, int]] = [] for loc in locations: locationType = LOCATION_TYPE_LABELS[type(loc)] if loc is None: locDatum = [(0.0, 0.0, 0.0)] elif loc.__class__ is grids.CoordinateLocation: locDatum = [loc.indices] elif loc.__class__ is grids.IndexLocation: locDatum = [loc.indices] elif loc.__class__ is grids.MultiIndexLocation: # encode number of sub-locations to allow in-line unpacking. locationType += f"{len(loc)}" locDatum = [subloc.indices for subloc in loc] else: raise ValueError(f"Invalid location type: {loc}") locTypes.append(locationType) locData.extend(locDatum) return locTypes, locData def _packLocationsV3( locations: List[grids.LocationBase], ) -> Tuple[List[str], List[Tuple[int, int, int]]]: """Location packing implementation for minor version 4. See module docstring above.""" locTypes = [] locData: List[Tuple[int, int, int]] = [] for loc in locations: locationType = LOCATION_TYPE_LABELS[type(loc)] if loc is None: locDatum = [(0.0, 0.0, 0.0)] elif type(loc) is grids.IndexLocation: locDatum = [loc.getCompleteIndices()] elif type(loc) is grids.CoordinateLocation: # CoordinateLocations do not implement getCompleteIndices properly, and we # do not really have a motivation to store them as we do with index # locations. locDatum = [loc.indices] elif type(loc) is grids.MultiIndexLocation: locationType += f"{len(loc)}" locDatum = [subloc.indices for subloc in loc] else: raise ValueError(f"Invalid location type: {loc}") locTypes.append(locationType) locData.extend(locDatum) return locTypes, locData def _unpackLocations(locationTypes, locData, minorVersion: int = DB_MINOR): """ Convert location data as read from DB back into data structure for building reactor model. location and locationType will only have different lengths when multiindex locations are used. """ if minorVersion < 3: return _unpackLocationsV1(locationTypes, locData) else: return _unpackLocationsV2(locationTypes, locData) def _unpackLocationsV1(locationTypes, locData): """Delete when reading v <=3.2 DB's no longer wanted.""" locsIter = iter(locData) unpackedLocs = [] for lt in locationTypes: if lt == "None": loc = next(locsIter) unpackedLocs.append(None) elif lt == "IndexLocation": loc = next(locsIter) # the data is stored as float, so cast back to int unpackedLocs.append(tuple(int(i) for i in loc)) else: loc = next(locsIter) unpackedLocs.append(tuple(loc)) return unpackedLocs def _unpackLocationsV2(locationTypes, locData): """Location unpacking implementation for minor version 3+. See module docstring above.""" locsIter = iter(locData) unpackedLocs = [] for lt in locationTypes: if lt == LOC_NONE: loc = next(locsIter) unpackedLocs.append(None) elif lt == LOC_INDEX: loc = next(locsIter) # the data is stored as float, so cast back to int unpackedLocs.append(tuple(int(i) for i in loc)) elif lt == LOC_COORD: loc = next(locsIter) unpackedLocs.append(tuple(loc)) elif lt.startswith(LOC_MULTI): # extract number of sublocations from e.g. "M:345" string. numSubLocs = int(lt.split(":")[1]) multiLocs = [] for _ in range(numSubLocs): subLoc = next(locsIter) # All multiindexes sublocs are index locs multiLocs.append(tuple(int(i) for i in subLoc)) unpackedLocs.append(multiLocs) else: raise ValueError(f"Read unknown location type {lt}. Invalid DB.") return unpackedLocs def replaceNonesWithNonsense(data: np.ndarray, paramName: str, nones: np.ndarray = None) -> np.ndarray: """ Replace instances of ``None`` with nonsense values that can be detected/recovered when reading. Parameters ---------- data The numpy array containing ``None`` values that need to be replaced. paramName The name of the parameter who's data we are treating. Only used for diagnostics. nones An array containing the index locations on the ``None`` elements. It is a little strange to pass these, in but we find these indices to determine whether we need to call this function in the first place, so might as well pass it in, so that we don't need to perform the operation again. Notes ----- This only supports situations where the data is a straight-up ``None``, or a valid, database-storable numpy array (or easily convertible to one (e.g. tuples/lists with numerical values)). This does not support, for instance, a numpy ndarray with some Nones in it. For example, the following is supported:: [[1, 2, 3], None, [7, 8, 9]] However, the following is not:: [[1, 2, 3], [4, None, 6], [7, 8, 9]] See Also -------- replaceNonsenseWithNones Reverses this operation. """ if nones is None: nones = np.where([d is None for d in data])[0] try: # loop to find what the default value should be. This is the first non-None # value that we can find. defaultValue = None realType = None val = None for val in data: if isinstance(val, np.ndarray): # if multi-dimensional, val[0] could still be an array, val.flat is # a flattened iterator, so next(val.flat) gives the first value in # an n-dimensional array realType = type(next(val.flat)) if realType is type(None): continue defaultValue = np.reshape(np.repeat(NONE_MAP[realType], val.size), val.shape) break else: realType = type(val) if realType is type(None): continue defaultValue = NONE_MAP[realType] break else: # Couldn't find any non-None entries, so it really doesn't matter what type we # use. Using float, because NaN is nice. realType = float defaultValue = NONE_MAP[realType] if isinstance(val, np.ndarray): data = np.array([d if d is not None else defaultValue for d in data]) else: data[nones] = defaultValue except Exception as ee: runLog.error( "Error while attempting to determine default for {}.\nvalue: {}\nError: {}".format(paramName, val, ee) ) raise TypeError( "Could not determine None replacement for {} with type {}, val {}, default {}".format( paramName, realType, val, defaultValue ) ) try: data = data.astype(realType) except Exception: raise ValueError("Could not coerce data for {} to {}, data:\n{}".format(paramName, realType, data)) if data.dtype.kind == "O": raise TypeError("Failed to convert data to valid HDF5 type {}, data:{}".format(paramName, data)) return data def replaceNonsenseWithNones(data: np.ndarray, paramName: str) -> np.ndarray: """ Replace special nonsense values with ``None``. This essentially reverses the operations performed by :py:func:`replaceNonesWithNonsense`. Parameters ---------- data The array from the database that contains special ``None`` nonsense values. paramName The param name who's data we are dealing with. Only used for diagnostics. See Also -------- replaceNonesWithNonsense """ # NOTE: This is closely-related to the NONE_MAP. if np.issubdtype(data.dtype, np.floating): isNone = np.isnan(data) elif np.issubdtype(data.dtype, np.integer): isNone = data == np.iinfo(data.dtype).min + 2 elif np.issubdtype(data.dtype, np.str_): isNone = data == "<!None!>" else: raise TypeError("Unable to resolve values that should be None for `{}`".format(paramName)) if data.ndim > 1: result = np.ndarray(data.shape[0], dtype=np.dtype("O")) for i in range(data.shape[0]): if isNone[i].all(): result[i] = None elif isNone[i].any(): # This is the meat of the logic to replace "nonsense" with None. result[i] = np.array(data[i], dtype=np.dtype("O")) result[i][isNone[i]] = None else: result[i] = data[i] else: result = np.ndarray(data.shape, dtype=np.dtype("O")) result[:] = data result[isNone] = None return result ================================================ FILE: armi/bookkeeping/db/passiveDBLoadPlugin.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Provides the ability to ignore parameters sections of blueprint files. This plugin can allow you to more easily open a database, because you can ignore sections of the blueprint files, and ignore any parameters as you want. This was designed to allow loading an ARMI database without the application that created it. """ import yamlize from armi import plugins from armi.reactor import parameters from armi.reactor.parameters import ParamLocation from armi.utils import units class PassThroughYamlize(yamlize.Object): """Just a helper for PassiveDBLoadPlugin, to allow for ignore unknown blueprints sections.""" @classmethod def from_yaml(cls, loader, node, round_trip_data=None): node.value = [] return yamlize.Object.from_yaml.__func__(PassThroughYamlize, loader, node, round_trip_data) class PassiveDBLoadPlugin(plugins.ArmiPlugin): """Provides the ability to passively load a reactor data model from an ARMI DB even if there are unknown parameters and blueprint sections. This plugin allows you two define two things: 1. Sections of blueprint files to ignore entirely. 2. A collection of unknown parameters that will be loaded without units or underlying metadata. To use this plugin, you need to set two class variables before instantiating the ARMI App: 1. Set ``SKIP_BP_SECTIONS`` to a list of BP section names (strings). 2. Set ``UNKNOWN_PARAMS`` to a mapping from param class to name: ``{Core: ["a", "b", "c"]}`` Notes ----- Obviously, if you are loading huge numbers of unknown parameters and ignoring whole sections of blueprints, you are losing information. There is no way to use this plugin and still claim full fidelity of your understanding of the reactor. ARMI does not support any such claims. """ SKIP_BP_SECTIONS = [] UNKNOWN_PARAMS = {} @staticmethod @plugins.HOOKIMPL def defineBlueprintsSections(): """Ignore a pre-determined set of blueprint sections.""" skips = [] for skippedBp in PassiveDBLoadPlugin.SKIP_BP_SECTIONS: skips.append( ( skippedBp.replace(" ", ""), yamlize.Attribute(key=skippedBp, type=PassThroughYamlize, default=None), PassThroughYamlize, ) ) return skips @staticmethod @plugins.HOOKIMPL def defineParameters(): """Define parameters for the plugin.""" # build all the parameters we are missing in default ARMI params = {} for dataClass, paramNames in PassiveDBLoadPlugin.UNKNOWN_PARAMS.items(): if len(paramNames): params[dataClass] = PassiveDBLoadPlugin.buildParamColl(paramNames) return params @staticmethod def buildParamColl(names): """Try replacing any missing parameters with unitless nonsense.""" # build a collection of defaulted parameters to passively ignore desc = "This is just a placeholder Parameter; it's meaning is unknown." pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE) as pb: for param in names: pb.defParam(param, units=units.UNITLESS, description=desc, saveToDB=False) return pDefs ================================================ FILE: armi/bookkeeping/db/permissions.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class Permissions: """Mappings to HDF5 permissions flags.""" READ_ONLY_FME = "r" # File Must Exist READ_WRITE_FME = "r+" # File Must Exist CREATE_FILE_TIE = "w" # Truncate If Exists CREATE_FILE_FIE = "w-" # Fail If Exists CREATE_FILE_FIE2 = "x" # Fail If Exists, Alternate option READ_WRITE_CREATE = "a" DEFAULT = READ_WRITE_CREATE # Strictly reading, not writing or creating a file if it doesn't exist read = {READ_ONLY_FME, READ_WRITE_FME} write = { READ_WRITE_FME, CREATE_FILE_TIE, CREATE_FILE_FIE, CREATE_FILE_FIE2, READ_WRITE_CREATE, } create = {CREATE_FILE_TIE, CREATE_FILE_FIE, CREATE_FILE_FIE2, READ_WRITE_CREATE} all = { READ_ONLY_FME, READ_WRITE_FME, CREATE_FILE_TIE, CREATE_FILE_FIE, CREATE_FILE_FIE2, READ_WRITE_CREATE, } ================================================ FILE: armi/bookkeeping/db/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Database tests.""" ================================================ FILE: armi/bookkeeping/db/tests/test_comparedb3.py ================================================ # Copyright 2021 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the compareDB3 module.""" import unittest import warnings from unittest.mock import patch import h5py import numpy as np from armi.bookkeeping.db.compareDB3 import ( DiffResults, OutputWriter, _compareAuxData, _compareSets, _diffSimpleData, _diffSpecialData, compareDatabases, ) from armi.bookkeeping.db.databaseInterface import DatabaseInterface from armi.reactor.tests import test_reactors from armi.tests import TEST_ROOT, mockRunLogs from armi.utils.directoryChangers import TemporaryDirectoryChanger class TestCompareDB3(unittest.TestCase): """Tests for the compareDB3 module.""" def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() def tearDown(self): self.td.__exit__(None, None, None) def test_outputWriter(self): fileName = "test_outputWriter.txt" with OutputWriter(fileName) as out: out.writeln("Rubber Baby Buggy Bumpers") txt = open(fileName, "r").read() self.assertIn("Rubber", txt) def test_compareSets(self): shorter = set({1, 2, 3}) longer = set({1, 2, 3, 4}) fileName = "fakeOutWriter.txt" with OutputWriter(fileName) as out: nDiffs = _compareSets(shorter, longer, out, name="number") self.assertEqual(nDiffs, 1) nDiffs = _compareSets(longer, shorter, out, name="number") self.assertEqual(nDiffs, 1) def test_diffResultsBasic(self): # init an instance of the class dr = DiffResults(0.01) self.assertEqual(len(dr._columns), 0) self.assertEqual(len(dr._structureDiffs), 0) self.assertEqual(len(dr.diffs), 0) # simple test of addDiff dr.addDiff("thing", "what", 123.4, 122.2345, 555) self.assertEqual(len(dr._columns), 0) self.assertEqual(len(dr._structureDiffs), 0) self.assertEqual(len(dr.diffs), 3) self.assertEqual(dr.diffs["thing/what mean(abs(diff))"][0], 123.4) self.assertEqual(dr.diffs["thing/what mean(diff)"][0], 122.2345) self.assertEqual(dr.diffs["thing/what max(abs(diff))"][0], 555) # simple test of addTimeStep dr.addTimeStep("timeStep") self.assertEqual(dr._structureDiffs[0], 0) self.assertEqual(dr._columns[0], "timeStep") # simple test of addStructureDiffs dr.addStructureDiffs(7) self.assertEqual(len(dr._structureDiffs), 1) self.assertEqual(dr._structureDiffs[0], 7) # simple test of _getDefault self.assertEqual(len(dr._getDefault()), 0) # simple test of nDiffs self.assertEqual(dr.nDiffs(), 10) def test_compareDatabaseDuplicate(self): """End-to-end test of compareDatabases() on a photocopy database.""" # build two super-simple H5 files for testing o, r = test_reactors.loadTestReactor( TEST_ROOT, customSettings={"reloadDBName": "reloadingDB.h5"}, inputFileName="smallestTestReactor/armiRunSmallest.yaml", ) # create two DBs, identical but for file names dbs = [] for i in range(2): # create the tests DB dbi = DatabaseInterface(r, o.cs) dbi.initDB(fName=self._testMethodName + str(i) + ".h5") db = dbi.database # validate the file exists, and force it to be readable again b = h5py.File(db._fullPath, "r") self.assertEqual(list(b.keys()), ["inputs"]) self.assertEqual(sorted(b["inputs"].keys()), ["blueprints", "settings"]) b.close() # append to lists dbs.append(db) # end-to-end validation that comparing a photocopy database works diffs = compareDatabases(dbs[0]._fullPath, dbs[1]._fullPath) self.assertEqual(len(diffs.diffs), 0) self.assertEqual(diffs.nDiffs(), 0) def test_compareDatabaseSim(self): """End-to-end test of compareDatabases() on very similar databases.""" # build two super-simple H5 files for testing o, r = test_reactors.loadTestReactor( TEST_ROOT, customSettings={"reloadDBName": "reloadingDB.h5"}, inputFileName="smallestTestReactor/armiRunSmallest.yaml", ) # create two DBs, identical but for file names and cycle lengths dbs = [] for lenCycle in range(1, 3): # build some test data days = 100 cs = o.cs.modified( newSettings={ "cycles": [{"step days": [days, days], "power fractions": [1, 0.5]}], "reloadDBName": "something_fake.h5", } ) # create the tests DB dbi = DatabaseInterface(r, cs) dbi.initDB(fName=self._testMethodName + str(lenCycle) + ".h5") db = dbi.database # populate the db with something r.p.cycle = 0 for node in range(2): r.p.timeNode = node r.p.cycleLength = days * lenCycle db.writeToDB(r) # validate the file exists, and force it to be readable again b = h5py.File(db._fullPath, "r") dbKeys = sorted(b.keys()) self.assertEqual(len(dbKeys), 3) self.assertIn("inputs", dbKeys) self.assertIn("c00n00", dbKeys) self.assertEqual(sorted(b["inputs"].keys()), ["blueprints", "settings"]) b.close() # append to lists dbs.append(db) # end-to-end validation that comparing a photocopy database works with warnings.catch_warnings(): warnings.filterwarnings("ignore") diffs = compareDatabases( dbs[0]._fullPath, dbs[1]._fullPath, timestepCompare=[(0, 0), (0, 1)], ) # spot check the diffs self.assertGreater(len(diffs.diffs), 200) self.assertLess(len(diffs.diffs), 800) self.assertIn("/c00n00", diffs._columns) self.assertIn("/c00n01", diffs._columns) self.assertIn(0, diffs._structureDiffs) self.assertEqual(sum(diffs._structureDiffs), 0) self.assertEqual(diffs.tolerance, 0) self.assertIn("SpentFuelPool/flags max(abs(diff))", diffs.diffs) self.assertIn("Circle/volume mean(diff)", diffs.diffs) self.assertIn("Reactor/flags mean(diff)", diffs.diffs) self.assertEqual(diffs.nDiffs(), 3) def test_diffSpecialData(self): dr = DiffResults(0.01) fileName = "test_diffSpecialData.txt" with OutputWriter(fileName) as out: # spin up one example H5 Dataset f1 = h5py.File("test_diffSpecialData1.hdf5", "w") a1 = np.arange(100, dtype="<f8") refData = f1.create_dataset("numberDensities", data=a1) refData.attrs["1"] = 1 refData.attrs["2"] = 22 refData.attrs["numDens"] = a1 # spin up an identical example H5 Dataset f2 = h5py.File("test_diffSpecialData2.hdf5", "w") srcData = f2.create_dataset("numberDensities", data=a1) srcData.attrs["1"] = 1 srcData.attrs["2"] = 22 srcData.attrs["numDens"] = a1 # there should be no difference _diffSpecialData(refData, srcData, out, dr) self.assertEqual(dr.nDiffs(), 0) # spin up a different size example H5 Dataset f3 = h5py.File("test_diffSpecialData3.hdf5", "w") a2 = np.arange(90, dtype="<f8") srcData3 = f3.create_dataset("numberDensities", data=a2) srcData3.attrs["1"] = 1 srcData3.attrs["2"] = 22 srcData3.attrs["numDens"] = a2 # there should a logged error with mockRunLogs.BufferLog() as mock: _diffSpecialData(refData, srcData3, out, dr) self.assertIn("Special formatting parameters for", mock.getStdout()) # make an H5 datasets that will cause unpackSpecialData to fail f4 = h5py.File("test_diffSpecialData4.hdf5", "w") refData4 = f4.create_dataset("numberDensities", data=a2) refData4.attrs["shapes"] = "2" refData4.attrs["numDens"] = a2 refData4.attrs["specialFormatting"] = True f5 = h5py.File("test_diffSpecialData5.hdf5", "w") srcData5 = f5.create_dataset("numberDensities", data=a2) srcData5.attrs["shapes"] = "2" srcData5.attrs["numDens"] = a2 srcData5.attrs["specialFormatting"] = True # there should a log message with mockRunLogs.BufferLog() as mock: _diffSpecialData(refData4, srcData5, out, dr) self.assertIn("Unable to unpack special data for", mock.getStdout()) # make an H5 datasets that will add a np.inf diff because keys don't match f6 = h5py.File("test_diffSpecialData6.hdf5", "w") refData6 = f6.create_dataset("numberDensities", data=a2) refData6.attrs["shapes"] = "2" refData6.attrs["numDens"] = a2 f7 = h5py.File("test_diffSpecialData7.hdf5", "w") srcData7 = f7.create_dataset("densities", data=a2) srcData7.attrs["colors"] = "2" srcData7.attrs["numberDens"] = a2 _diffSpecialData(refData6, srcData7, out, dr) def test_diffSimpleData(self): dr = DiffResults(0.01) # spin up one example H5 Dataset f1 = h5py.File("test_diffSimpleData1.hdf5", "w") a1 = np.arange(1, 101, dtype="<f8") refData = f1.create_dataset("numberDensities", data=a1) refData.attrs["1"] = 1 refData.attrs["2"] = 22 refData.attrs["numDens"] = a1 # spin up an identical example H5 Dataset f2 = h5py.File("test_diffSimpleData2.hdf5", "w") srcData = f2.create_dataset("numberDensities", data=a1) srcData.attrs["1"] = 1 srcData.attrs["2"] = 22 srcData.attrs["numDens"] = a1 # there should be no difference _diffSimpleData(refData, srcData, dr) self.assertEqual(dr.nDiffs(), 0) # spin up a different size example H5 Dataset f3 = h5py.File("test_diffSimpleData3.hdf5", "w") a2 = np.arange(1, 91, dtype="<f8") srcData3 = f3.create_dataset("numberDensities", data=a2) srcData3.attrs["1"] = 1 srcData3.attrs["2"] = 22 srcData3.attrs["numDens"] = a2 # there should be a small difference _diffSimpleData(refData, srcData3, dr) self.assertEqual(dr.nDiffs(), 3) def test_compareAuxData(self): dr = DiffResults(0.01) fileName = "test_diffSpecialData.txt" with OutputWriter(fileName) as out: # spin up one example H5 Dataset f1 = h5py.File("test_compareAuxData1.hdf5", "w") a1 = np.arange(100, dtype="<f8") refData = f1.create_group("numberDensities") refData.attrs["1"] = 1 refData.attrs["2"] = 22 refData.attrs["numDens"] = a1 # spin up an identical example H5 Dataset f2 = h5py.File("test_compareAuxData2.hdf5", "w") srcData = f2.create_group("numberDensities") srcData.attrs["1"] = 1 srcData.attrs["2"] = 22 srcData.attrs["numDens"] = a1 # there should be no difference _compareAuxData(out, refData, srcData, dr) self.assertEqual(dr.nDiffs(), 0) def test_differentlySizedSpecialData(self): """Ensure that special formatting data that are differently sized report a diff.""" differ = DiffResults(0.0) with h5py.File(self._testMethodName + ".h5", "w") as f, OutputWriter(self._testMethodName + ".txt") as out: # Create two datasets with no data, but with different attributes # The attributes are used in the special data checks short = f.create_dataset("short", dtype=float) short.attrs["offsets"] = np.arange(10) long = f.create_dataset("long", dtype=float) long.attrs["offsets"] = np.arange(100) with patch.object(out, "writeln") as writeln: _diffSpecialData(short, long, out, differ) # Ensure the user is alerted the datasets have different parameters writeln.assert_called_once() # Ensure this is treated as a diff self.assertGreater(differ.nDiffs(), 0) def test_nothingForDictionaries(self): """Ensure we alert the user we do not perform diffs on dictionaries.""" differ = DiffResults(0.0) with h5py.File(self._testMethodName + ".h5", "w") as f, OutputWriter(self._testMethodName + ".txt") as out: first = f.create_dataset("first_dictionary", dtype=float) first.attrs["dict"] = True second = f.create_dataset("second_dictionary", dtype=float) second.attrs["dict"] = True with patch.object(out, "writeln") as writeln: _diffSpecialData(first, second, out, differ) # Not considered a diff self.assertEqual(differ.nDiffs(), 0) # But we've let the user know writeln.assert_called_once() # And the parameter is in the printed message msg = writeln.call_args.args[0] # NOTE If you try to grab first.name on the closed DB, you get None which is not helpful self.assertIn(first.name, msg) ================================================ FILE: armi/bookkeeping/db/tests/test_database.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the Database class.""" import io import os import shutil import subprocess import unittest from glob import glob from unittest.mock import Mock, patch import h5py import numpy as np from armi.bookkeeping.db import _getH5File, database, loadOperator from armi.bookkeeping.db.database import Database from armi.bookkeeping.db.databaseInterface import DatabaseInterface from armi.bookkeeping.db.jaggedArray import JaggedArray from armi.reactor import parameters from armi.reactor.excoreStructure import ExcoreCollection, ExcoreStructure from armi.reactor.grids import CoordinateLocation, MultiIndexLocation from armi.reactor.reactors import Core, Reactor from armi.reactor.spentFuelPool import SpentFuelPool from armi.reactor.tests.test_blocks import loadTestBlock from armi.settings.fwSettings.globalSettings import ( CONF_GROW_TO_FULL_CORE_AFTER_LOAD, CONF_SORT_REACTOR, ) from armi.testing import TESTING_ROOT, loadTestReactor from armi.tests import TEST_ROOT, mockRunLogs from armi.utils import getPreviousTimeNode, safeCopy from armi.utils.directoryChangers import TemporaryDirectoryChanger # determine if this is a parallel run, and git is installed GIT_EXE = None if shutil.which("git") is not None: GIT_EXE = "git" elif shutil.which("git.exe") is not None: GIT_EXE = "git.exe" class TestDatabase(unittest.TestCase): """Tests for the Database class that require a large, complicated reactor.""" def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() self.o, self.r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml", customSettings={"reloadDBName": "reloadingDB.h5"}, ) self.dbi = DatabaseInterface(self.r, self.o.cs) self.dbi.initDB(fName=self._testMethodName + ".h5") self.db: Database = self.dbi.database self.stateRetainer = self.r.retainState().__enter__() # used to test location-based history. see details below self.centralAssemSerialNums = [] self.centralTopBlockSerialNums = [] def tearDown(self): self.db.close() self.stateRetainer.__exit__() self.td.__exit__(None, None, None) def makeShuffleHistory(self): """Walk the reactor through a few time steps with some shuffling.""" # Serial numbers *are not stable* (i.e., they can be different between test runs due to parallelism and test run # order). However, they are the simplest way to check correctness of location-based history tracking. So we # stash the serial numbers at the location of interest so we can use them later to check our work. self.centralAssemSerialNums = [] self.centralTopBlockSerialNums = [] grid = self.r.core.spatialGrid t = 0 for cycle in range(2): a1 = self.r.core.childrenByLocator[grid[cycle, 0, 0]] a2 = self.r.core.childrenByLocator[grid[0, 0, 0]] olda1Loc = a1.spatialLocator a1.moveTo(a2.spatialLocator) a2.moveTo(olda1Loc) c = self.r.core.childrenByLocator[grid[0, 0, 0]] self.centralAssemSerialNums.append(c.p.serialNum) self.centralTopBlockSerialNums.append(c[-1].p.serialNum) for node in range(2): # something that splitDatabase won't change, so that we can make sure that the right data went to the # right new groups/cycles self.r.p.cycleLength = cycle self.r.p.cycle = cycle self.r.p.timeNode = node t += 1.0 self.r.p.time = t self.db.writeToDB(self.r) # Add some more data that isn't written to the database to test the DatabaseInterface API. self.r.p.cycle = 2 self.r.p.timeNode = 0 self.r.p.cycleLength = cycle self.r.core[0].p.chargeTime = 2 # add some fake missing parameter data to test allowMissing self.db.h5db["c00n00/Reactor/missingParam"] = "i don't exist" def test_load(self): """Load a reactor at different time steps, from the database. .. test:: Load the reactor from the database. :id: T_ARMI_DB_TIME1 :tests: R_ARMI_DB_TIME """ self.makeShuffleHistory() with self.assertRaises(KeyError): _r = self.db.load(0, 0) # Default load, should pass without error _r = self.db.load(0, 0, allowMissing=True) # Show that we can use negative indices to load r = self.db.load(0, -2, allowMissing=True) self.assertEqual(r.p.timeNode, 1) with self.assertRaises(ValueError): # makeShuffleHistory only populates 2 nodes, but the case settings defines 3, so we must check -4 before # getting an error self.db.load(0, -4, allowMissing=True) # show we can delete a specify H5 key. del self.db.h5db["c00n00/Reactor/missingParam"] _r = self.db.load(0, 0, allowMissing=False) # show we can delete an entire time now from the DB. del self.db[0, 0, ""] with self.assertRaises(KeyError): self.db.load(0, 0, allowMissing=False) # We should not be able to set the fileName if a file is open. with self.assertRaises(RuntimeError): self.db.fileName = "whatever.h5" def test_loadSortSetting(self): self.makeShuffleHistory() # default load, should pass without error r0 = self.db.load(0, 0, allowMissing=True) # test that the reactor loads differently, dependent on the setting cs = self.db.loadCS() cs = cs.modified(newSettings={CONF_SORT_REACTOR: False}) r1 = self.db.load(0, 0, cs=cs, allowMissing=True) # the reactor / core should be the same size self.assertEqual(len(r0), len(r1)) self.assertEqual(len(r0.core), len(r1.core)) def test_history(self): self.makeShuffleHistory() grid = self.r.core.spatialGrid testAssem = self.r.core.childrenByLocator[grid[0, 0, 0]] testBlock = testAssem[-1] # Test assem hist = self.db.getHistoryByLocation(testAssem, params=["chargeTime", "serialNum"]) expectedSn = {(c, n): self.centralAssemSerialNums[c] for c in range(2) for n in range(2)} self.assertEqual(expectedSn, hist["serialNum"]) # test block hists = self.db.getHistoriesByLocation([testBlock], params=["serialNum"], timeSteps=[(0, 0), (1, 0)]) expectedSn = {(c, 0): self.centralTopBlockSerialNums[c] for c in range(2)} self.assertEqual(expectedSn, hists[testBlock]["serialNum"]) # can't mix blocks and assems, since they are different distance from core with self.assertRaises(ValueError): self.db.getHistoriesByLocation([testAssem, testBlock], params=["serialNum"]) # if requested time step isn't written, return no content hist = self.dbi.getHistory(self.r.core[0], params=["chargeTime", "serialNum"], byLocation=True) self.assertIn((2, 0), hist["chargeTime"].keys()) self.assertEqual(hist["chargeTime"][(2, 0)], 2) # test edge case: ancient DB file with patch.object(self.db, "_versionMinor", 3), self.assertRaises(ValueError): self.db.getHistoriesByLocation([testBlock], params=["serialNum"], timeSteps=[(0, 0), (1, 0)]) # test edge case: DB is not version 3 with patch.object(self.db, "_versionMajor", 2), self.assertRaises(ValueError): self.db.getHistoryByLocation(testAssem, params=["chargeTime", "serialNum"]) with patch.object(self.db, "_versionMajor", 4), self.assertRaises(ValueError): self.db.getHistoryByLocation(testAssem, params=["chargeTime", "serialNum"]) def test_fullCoreOnDbLoad(self): """Test we can expand a reactor to full core when loading from DB via settings.""" self.assertFalse(self.r.core.isFullCore) self.db.writeToDB(self.r) cs = self.db.loadCS() cs = cs.modified(newSettings={CONF_GROW_TO_FULL_CORE_AFTER_LOAD: True}) r: Reactor = self.db.load(0, 0, cs=cs) self.assertTrue(r.core.isFullCore) def test_dontExpandIfFullCoreInDB(self): """Test that a full core reactor in the database is not expanded further.""" self.assertFalse(self.r.core.isFullCore) self.db.writeToDB(self.r) cs = self.db.loadCS() cs = cs.modified(newSettings={CONF_GROW_TO_FULL_CORE_AFTER_LOAD: True}) mockGrow = Mock() with ( patch("armi.reactor.cores.Core.isFullCore", Mock(return_value=True)), patch("armi.reactor.cores.Core.growToFullCore", mockGrow), ): self.db.load(0, 0, cs=cs) mockGrow.assert_not_called() def test_getCycleNodeAtTime(self): self.makeShuffleHistory() self.db.close() # test that the math works correctly cycleNodes = Database.getCycleNodeAtTime(self.db.fileName, 0, 0.87, False) self.assertEqual(cycleNodes, ["c00n00"]) cycleNodes = Database.getCycleNodeAtTime(self.db.fileName, 0.23, 1.2, False) self.assertEqual(cycleNodes, ["c00n00", "c00n01"]) cycleNodes = Database.getCycleNodeAtTime(self.db.fileName, 0.001, 2.345, False) self.assertEqual(cycleNodes, ["c00n00", "c00n01", "c01n00"]) cycleNodes = Database.getCycleNodeAtTime(self.db.fileName, 0, 3.123, False) self.assertEqual(cycleNodes, ["c00n00", "c00n01", "c01n00", "c01n01"]) cycleNodes = Database.getCycleNodeAtTime(self.db.fileName, 0.123, 4.0, False) self.assertEqual(cycleNodes, ["c00n00", "c00n01", "c01n00", "c01n01"]) # test some exceptions are correctly raised with self.assertRaises(AssertionError): Database.getCycleNodeAtTime(self.db.fileName, -1, 1, False) with self.assertRaises(AssertionError): Database.getCycleNodeAtTime(self.db.fileName, 3, 1, False) with self.assertRaises(ValueError): Database.getCycleNodeAtTime(self.db.fileName, 5, 6, False) with self.assertRaises(ValueError): Database.getCycleNodeAtTime(self.db.fileName, 1, 140, True) class TestDatabaseSmaller(unittest.TestCase): """Tests for the Database class, that can use a smaller test reactor.""" def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() self.o, self.r = loadTestReactor( TEST_ROOT, customSettings={"reloadDBName": "reloadingDB.h5"}, inputFileName="smallestTestReactor/armiRunSmallest.yaml", ) self.dbi = DatabaseInterface(self.r, self.o.cs) self.dbi.initDB(fName=self._testMethodName + ".h5") self.db: Database = self.dbi.database self.stateRetainer = self.r.retainState().__enter__() def tearDown(self): self.db.close() self.stateRetainer.__exit__() self.td.__exit__(None, None, None) def makeHistory(self): """Walk the reactor through a few time steps and write them to the db.""" for cycle, node in ((cycle, node) for cycle in range(2) for node in range(2)): self.r.p.cycle = cycle self.r.p.timeNode = node # something that splitDatabase won't change, so that we can make sure that # the right data went to the right new groups/cycles self.r.p.cycleLength = cycle self.db.writeToDB(self.r) def test_loadOperator(self): self.makeHistory() self.db.close() # Write a bad setting to the DB with h5py.File(self.db.fileName, "r+") as hf: settingz = hf["inputs/settings"].asstr()[()] settingz += " fakeTerminator: I'll be back" stream = io.StringIO(settingz) csString = stream.read() del hf["inputs/settings"] hf["inputs/settings"] = csString # Test with no complaints with mockRunLogs.BufferLog() as mock: _o = loadOperator( self._testMethodName + ".h5", 0, 0, allowMissing=True, handleInvalids=False, ) self.assertNotIn("fakeTerminator", mock.getStdout()) # Test with complaints with mockRunLogs.BufferLog() as mock: _o = loadOperator( self._testMethodName + ".h5", 0, 0, allowMissing=True, handleInvalids=True, ) self.assertIn("Ignoring invalid settings", mock.getStdout()) self.assertIn("fakeTerminator", mock.getStdout()) def _compareArrays(self, ref, src): """ Compare two numpy arrays. Comparing numpy arrays that may have unsavory data (NaNs, Nones, jagged data, etc.) is really difficult. For now, convert to a list and compare element-by-element. """ self.assertEqual(type(ref), type(src)) if isinstance(ref, np.ndarray): ref = ref.tolist() src = src.tolist() for v1, v2 in zip(ref, src): # Entries may be None if isinstance(v1, np.ndarray): v1 = v1.tolist() if isinstance(v2, np.ndarray): v2 = v2.tolist() self.assertEqual(v1, v2) def _compareRoundTrip(self, data): """Make sure that data is unchanged by packing/unpacking.""" packed, attrs = database.packSpecialData(data, "testing") roundTrip = database.unpackSpecialData(packed, attrs, "testing") self._compareArrays(data, roundTrip) def test_getArrayShape(self): """Tests a helper method for ``_writeParams``.""" base = [1, 2, 3, 4] self.assertEqual(Database._getArrayShape(base), (4,)) self.assertEqual(Database._getArrayShape(tuple(base)), (4,)) arr = np.array(base) self.assertEqual(Database._getArrayShape(arr), (4,)) arr = np.array([base]) self.assertEqual(Database._getArrayShape(arr), (1, 4)) # not array type self.assertEqual(Database._getArrayShape(1), 1) self.assertEqual(Database._getArrayShape(None), 1) def test_writeToDB(self): """Test writing to the database. .. test:: Write a single time step of data to the database. :id: T_ARMI_DB_TIME0 :tests: R_ARMI_DB_TIME """ self.r.p.cycle = 0 self.r.p.cycleLength = 1 self.r.p.time = 0 self.r.p.timeNode = 0 # Adding some nonsense in, to test NoDefault params self.r.p.availabilityFactor = parameters.NoDefault # validate that the H5 file gets bigger after the write self.assertEqual(list(self.db.h5db.keys()), ["inputs"]) self.db.writeToDB(self.r) self.assertEqual(sorted(self.db.h5db.keys()), ["c00n00", "inputs"]) # check the keys for a single time step keys = [ "Circle", "Core", "DerivedShape", "Helix", "HexAssembly", "HexBlock", "Hexagon", "Reactor", "SpentFuelPool", "layout", ] self.assertEqual(sorted(self.db.h5db["c00n00"].keys()), sorted(keys)) # validate availabilityFactor did not make it into the H5 file, but the time parameters did rKeys = [ "cycle", "cycleLength", "time", "timeNode", ] h5Keys = sorted(self.db.h5db["c00n00"]["Reactor"].keys()) for rKey in rKeys: self.assertIn(rKey, h5Keys) def test_getH5File(self): """ Get the h5 file for the database, because that file format is language-agnostic. .. test:: Show the database is H5-formatted. :id: T_ARMI_DB_H5 :tests: R_ARMI_DB_H5 """ with self.assertRaises(TypeError): _getH5File(None) h5 = _getH5File(self.db) self.assertEqual(type(h5), h5py.File) def test_auxData(self): path = self.db.getAuxiliaryDataPath((2, 0), "test_stuff") self.assertEqual(path, "c02n00/test_stuff") with self.assertRaises(KeyError): self.db.genAuxiliaryData((-1, -1)) def test_replaceNones(self): """Super basic test that we handle Nones correctly in database read/writes.""" data3 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) data1 = np.array([1, 2, 3, 4, 5, 6, 7, 8]) data1iNones = np.array([1, 2, None, 5, 6]) data1fNones = np.array([None, 2.0, None, 5.0, 6.0]) data2fNones = np.array([None, [[1.0, 2.0, 6.0], [2.0, 3.0, 4.0]]], dtype=object) twoByTwo = np.array([[1, 2], [3, 4]]) twoByOne = np.array([[1], [None]]) threeByThree = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) dataJag = JaggedArray([twoByTwo, threeByThree], "testParam") dataJagNones = JaggedArray([twoByTwo, twoByOne, threeByThree], "testParam") dataDict = np.array([{"bar": 2, "baz": 3}, {"foo": 4, "baz": 6}, {"foo": 7, "bar": 8}]) self._compareRoundTrip(data3) self._compareRoundTrip(data1) self._compareRoundTrip(data1iNones) self._compareRoundTrip(data1fNones) self._compareRoundTrip(data2fNones) self._compareRoundTrip(dataJag) self._compareRoundTrip(dataJagNones) self._compareRoundTrip(dataDict) def test_mergeHistory(self): self.makeHistory() # put some big data in an HDF5 attribute. This will exercise the code that pulls such attributes into a formal # dataset and a reference. self.r.p.cycle = 1 self.r.p.timeNode = 0 tnGroup = self.db.getH5Group(self.r) randomText = "this isn't a reference to another dataset" Database._writeAttrs( tnGroup["layout/serialNum"], tnGroup, { "fakeBigData": np.eye(8), "someString": randomText, }, ) dbPath = "restartDB.h5" db2 = Database(dbPath, "w") with db2: db2.mergeHistory(self.db, 2, 2) self.r.p.cycle = 1 self.r.p.timeNode = 0 tnGroup = db2.getH5Group(self.r) # this test is a little bit implementation-specific, but nice to be explicit self.assertEqual(tnGroup["layout/serialNum"].attrs["someString"], randomText) # exercise the _resolveAttrs function attrs = Database._resolveAttrs(tnGroup["layout/serialNum"].attrs, tnGroup) self.assertTrue(np.array_equal(attrs["fakeBigData"], np.eye(8))) keys = sorted(db2.keys()) self.assertEqual(len(keys), 4) self.assertEqual(keys[:3], ["/c00n00", "/c00n01", "/c01n00"]) # check edge case: major vesion is not 3 db3 = Database("restartDBedgeCase1.h5", "w") with patch.object(db3, "_versionMajor", 2), self.assertRaises(ValueError): with db3: db3.mergeHistory(self.db, 2, 2) def test_splitDatabase(self): self.makeHistory() self.db.splitDatabase([(c, n) for c in (0, 1) for n in range(2)], "-all-iterations") # Closing to copy back from fast path self.db.close() with h5py.File("test_splitDatabase.h5", "r") as newDb: self.assertEqual(newDb["c00n00/Reactor/cycle"][()], 0) self.assertEqual(newDb["c00n00/Reactor/cycleLength"][()][0], 0) self.assertNotIn("c03n00", newDb) self.assertEqual(newDb.attrs["databaseVersion"], database.DB_VERSION) # validate that the min set of meta data keys exists meta_data_keys = [ "appName", "armiLocation", "databaseVersion", "hostname", "localCommitHash", "machines", "platform", "platformArch", "platformRelease", "platformVersion", "pluginPaths", "python", "startTime", "successfulCompletion", "user", "version", ] for meta_key in meta_data_keys: self.assertIn(meta_key, newDb.attrs) self.assertIsNotNone(newDb.attrs[meta_key]) # test an edge case - no DB to split with self.assertRaises(ValueError): self.db.h5db = None self.db.splitDatabase([(c, n) for c in (0, 1) for n in range(2)], "-all-iterations") @unittest.skipIf(GIT_EXE is None, "This test needs Git.") def test_grabLocalCommitHash(self): """Test of static method to grab a local commit hash with ARMI version.""" # 1. test outside a Git repo localHash = Database.grabLocalCommitHash() self.assertEqual(localHash, "unknown") # 2. test inside an empty git repo try: code = subprocess.run( ["git", "init", "."], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ).returncode except FileNotFoundError: print("Skipping this test because it is being run outside a git repo.") return self.assertEqual(code, 0) localHash = Database.grabLocalCommitHash() self.assertEqual(localHash, "unknown") # 3. test inside a git repo with one tag # commit the empty repo code = subprocess.run( ["git", "commit", "--allow-empty", "-m", '"init"', "--author", '"sam <>"'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ).returncode if code == 128: # GitHub Actions blocks certain kinds of Git commands return # create a tag off our new commit code = subprocess.run( ["git", "tag", "thanks", "-m", '"you_rock"'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ).returncode self.assertEqual(code, 0) # test that we recover the correct commit hash localHash = Database.grabLocalCommitHash() self.assertEqual(localHash, "thanks") # delete the .git directory code = subprocess.run(["git", "clean", "-f"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode self.assertEqual(code, 0) code = subprocess.run( ["git", "clean", "-f", "-d"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ).returncode self.assertEqual(code, 0) def test_fileName(self): # test the file name getter self.assertEqual(str(self.db.fileName), "test_fileName.h5") # test the file name setter self.db.close() self.db.fileName = "thing.h5" self.assertEqual(str(self.db.fileName), "thing.h5") def test_readInputsFromDB(self): """Test that we can read inputs from the database. .. test:: Save and retrieve settings from the database. :id: T_ARMI_DB_CS :tests: R_ARMI_DB_CS .. test:: Save and retrieve blueprints from the database. :id: T_ARMI_DB_BP :tests: R_ARMI_DB_BP """ inputs = self.db.readInputsFromDB() self.assertEqual(len(inputs), 2) # settings self.assertGreater(len(inputs[0]), 100) self.assertIn("settings:", inputs[0]) # blueprints self.assertGreater(len(inputs[1]), 2400) self.assertIn("blocks:", inputs[1]) def test_deleting(self): self.assertTrue(isinstance(self.db, Database)) del self.db self.assertFalse(hasattr(self, "db")) self.db = self.dbi.database def test_open(self): self.assertTrue(self.db.isOpen()) with self.assertRaises(ValueError): self.db.open() def test_loadCS(self): cs = self.db.loadCS() self.assertEqual(cs["nTasks"], 1) self.assertEqual(cs["nCycles"], 2) def test_loadBlueprints(self): bp = self.db.loadBlueprints() self.assertIsNone(bp.nuclideFlags) self.assertEqual(len(bp.assemblies), 0) def test_prepRestartRun(self): """ This test is based on the armiRun.yaml case that is loaded during the `setUp` above. In that cs, `reloadDBName` is set to 'reloadingDB.h5', `startCycle` = 1, and `startNode` = 2. The nonexistent 'reloadingDB.h5' must first be created here for this test. .. test:: Runs can be restarted from a snapshot. :id: T_ARMI_SNAPSHOT_RESTART :tests: R_ARMI_SNAPSHOT_RESTART """ # first successfully call to prepRestartRun o, r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml", customSettings={"reloadDBName": "reloadingDB.h5"}, ) cs = o.cs ratedPower = cs["power"] startCycle = cs["startCycle"] startNode = cs["startNode"] cyclesSetting = [ {"step days": [1000, 1000], "power fractions": [1, 1]}, {"step days": [1000, 1000], "power fractions": [1, 1]}, {"step days": [1000, 1000], "power fractions": [1, 1]}, ] cycleP, nodeP = getPreviousTimeNode(startCycle, startNode, cs) cyclesSetting[cycleP]["power fractions"][nodeP] = 0.5 numCycles = 2 numNodes = 2 cs = cs.modified( newSettings={ "nCycles": numCycles, "cycles": cyclesSetting, "reloadDBName": "something_fake.h5", } ) # create a db based on the cs dbi = DatabaseInterface(r, cs) dbi.initDB(fName="reloadingDB.h5") db = dbi.database # populate the db with some things for cycle, node in ((cycle, node) for cycle in range(numCycles) for node in range(numNodes)): r.p.cycle = cycle r.p.timeNode = node r.p.cycleLength = sum(cyclesSetting[cycle]["step days"]) r.core.p.power = ratedPower * cyclesSetting[cycle]["power fractions"][node] db.writeToDB(r) self.assertTrue(db.isOpen()) db.close() self.assertFalse(db.isOpen()) self.dbi.prepRestartRun() # prove that the reloaded reactor has the correct power self.assertEqual(self.o.r.p.cycle, cycleP) self.assertEqual(self.o.r.p.timeNode, nodeP) self.assertEqual(cyclesSetting[cycleP]["power fractions"][nodeP], 0.5) self.assertEqual( self.o.r.core.p.power, ratedPower * cyclesSetting[cycleP]["power fractions"][nodeP], ) # now make the cycle histories clash and confirm that an error is thrown cs = cs.modified( newSettings={ "cycles": [ {"step days": [666, 666], "power fractions": [1, 1]}, {"step days": [666, 666], "power fractions": [1, 1]}, {"step days": [666, 666], "power fractions": [1, 1]}, ], } ) # create a db based on the cs dbi = DatabaseInterface(r, cs) dbi.initDB(fName="reloadingDB.h5") db = dbi.database # populate the db with something for cycle, node in ((cycle, node) for cycle in range(numCycles) for node in range(numNodes)): r.p.cycle = cycle r.p.timeNode = node r.p.cycleLength = 2000 db.writeToDB(r) self.assertTrue(db.isOpen()) db.close() self.assertFalse(db.isOpen()) with self.assertRaises(ValueError): self.dbi.prepRestartRun() def test_computeParents(self): # The below arrays represent a tree structure like this: # 71 -----------------------. # | \ # 12--.-----.------. 72 # / | \ \ \ # 22 30 4---. 6 18-. # / | | | \ \ / | \ # 8 17 2 32 52 62 1 9 10 # # This should cover a handful of corner cases numChildren = [2, 5, 2, 0, 0, 1, 0, 3, 0, 0, 0, 0, 3, 0, 0, 0, 0] serialNums = [71, 12, 22, 8, 17, 30, 2, 4, 32, 53, 62, 6, 18, 1, 9, 10, 72] expected_1 = [None, 71, 12, 22, 22, 12, 30, 12, 4, 4, 4, 12, 12, 18, 18, 18, 71] expected_2 = [ None, None, 71, 12, 12, 71, 12, 71, 12, 12, 12, 71, 71, 12, 12, 12, None, ] expected_3 = [ None, None, None, 71, 71, None, 71, None, 71, 71, 71, None, None, 71, 71, 71, None, ] self.assertEqual(database.Layout.computeAncestors(serialNums, numChildren), expected_1) self.assertEqual(database.Layout.computeAncestors(serialNums, numChildren, 2), expected_2) self.assertEqual(database.Layout.computeAncestors(serialNums, numChildren, 3), expected_3) class TestWriteReadDatabase(unittest.TestCase): """Round-trip tests that we can write/read data to and from a Database.""" SMALL_YAML = """!include refOneBlockReactor.yaml systems: core: grid name: core origin: x: 0.0 y: 0.0 z: 0.0 sfp: type: sfp grid name: sfp origin: x: 1000.0 y: 1000.0 z: 1000.0 evst: type: excore grid name: evst origin: x: 2000.0 y: 2000.0 z: 2000.0 grids: core: geom: hex_corners_up lattice map: | IC symmetry: full evst: lattice pitch: x: 32.0 y: 32.0 geom: hex symmetry: full """ def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() # copy these test files over, so we can edit them thisDir = self.td.destination yamls = glob(os.path.join(TEST_ROOT, "smallestTestReactor", "*.yaml")) for yam in yamls: safeCopy(os.path.join(TEST_ROOT, "smallestTestReactor", yam), thisDir) # Add an EVST to this reactor with open("refSmallestReactor.yaml", "w") as f: f.write(self.SMALL_YAML) self.o, self.r = loadTestReactor(thisDir, inputFileName="armiRunSmallest.yaml") self.dbi = DatabaseInterface(self.r, self.o.cs) self.dbi.initDB(fName=f"{self._testMethodName}.h5") self.db: Database = self.dbi.database def tearDown(self): self.db.close() self.td.__exit__(None, None, None) def test_readWriteRoundTrip(self): """Test DB some round tripping, writing some data to a DB, then reading from it. In particular, we test some parameters on the reactor, core, and blocks. And we move an assembly from the core to an EVST between timenodes, and test that worked. """ # put some data in the DB, for timenode 0 self.r.p.cycle = 0 self.r.p.timeNode = 0 self.r.core.p.keff = 0.99 b = self.r.core.getFirstBlock() self.assertIsInstance(b[0].spatialLocator, MultiIndexLocation) self.assertIsInstance(b[-1].spatialLocator, CoordinateLocation) b.p.power = 12345.6 self.db.writeToDB(self.r) # put some data in the DB, for timenode 1 self.r.p.timeNode = 1 self.r.core.p.keff = 1.01 # move the assembly from the core to the EVST a = self.r.core.getFirstAssembly() loc = self.r.excore.evst.spatialGrid[(0, 0, 0)] self.r.core.remove(a) self.r.excore.evst.add(a, loc) self.db.writeToDB(self.r) # close the DB self.db.close() # open the DB and verify, the first timenode with Database(self.db.fileName) as db: r0 = db.load(0, 0, allowMissing=True) self.assertEqual(r0.p.cycle, 0) self.assertEqual(r0.p.timeNode, 0) self.assertEqual(r0.core.p.keff, 0.99) # check the types of the data model objects self.assertTrue(isinstance(r0, Reactor)) self.assertTrue(isinstance(r0.core, Core)) self.assertTrue(isinstance(r0.excore, ExcoreCollection)) self.assertTrue(isinstance(r0.excore.evst, ExcoreStructure)) self.assertTrue(isinstance(r0.excore.sfp, SpentFuelPool)) # Prove our one special block is in the core self.assertEqual(len(r0.core.getChildren()), 1) b0 = r0.core.getFirstBlock() self.assertEqual(b0.p.power, 12345.6) self.assertIsInstance(b0[0].spatialLocator, MultiIndexLocation) np.testing.assert_array_equal(b[0].spatialLocator.indices, b0[0].spatialLocator.indices) self.assertIsInstance(b0[-1].spatialLocator, CoordinateLocation) np.testing.assert_array_equal(b[-1].spatialLocator.indices, b0[-1].spatialLocator.indices) # the ex-core structures should be empty self.assertEqual(len(r0.excore["sfp"].getChildren()), 0) self.assertEqual(len(r0.excore["evst"].getChildren()), 0) # open the DB and verify, the second timenode with Database(self.db.fileName, "r") as db: r1 = db.load(0, 1, allowMissing=True) self.assertEqual(r1.p.cycle, 0) self.assertEqual(r1.p.timeNode, 1) self.assertEqual(r1.core.p.keff, 1.01) # check the types of the data model objects self.assertTrue(isinstance(r1, Reactor)) self.assertTrue(isinstance(r1.core, Core)) self.assertTrue(isinstance(r1.excore, ExcoreCollection)) self.assertTrue(isinstance(r1.excore.evst, ExcoreStructure)) self.assertTrue(isinstance(r1.excore.sfp, SpentFuelPool)) # Prove our one special block is NOT in the core, or the SFP self.assertEqual(len(r1.core.getChildren()), 0) self.assertEqual(len(r1.excore["sfp"].getChildren()), 0) self.assertEqual(len(r1.excore.sfp.getChildren()), 0) # Prove our one special block is in the EVST evst = r1.excore["evst"] self.assertEqual(len(evst.getChildren()), 1) b1 = evst.getChildren()[0].getChildren()[0] self.assertEqual(b1.p.power, 12345.6) def test_badData(self): # create a DB to be modified self.db.writeToDB(self.r) self.db.close() # modify the HDF5 file to corrupt a dataset with h5py.File(self.db.fileName, "r+") as hf: circleGroup = hf["c00n00"]["Circle"] circleMass = np.array(circleGroup["massHmBOL"][()]) badData = circleMass[:-1] del circleGroup["massHmBOL"] circleGroup.create_dataset("massHmBOL", data=badData) with self.assertRaises(ValueError): with Database(self.db.fileName, "r") as db: _r = db.load(0, 0, allowMissing=True) class TestSimplestDatabaseItems(unittest.TestCase): """The tests here are simple, direct tests of Database, that don't need a DatabaseInterface or Reactor.""" def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() def tearDown(self): self.td.__exit__(None, None, None) def test_open(self): dbPath = "test_open.h5" db = Database(dbPath, "w") self.assertFalse(db.isOpen()) db._permission = "mock" with self.assertRaises(ValueError): db.open() class TestStaticDatabaseItems(unittest.TestCase): def test_applyComponentNumberDensitiesMigration(self): b = loadTestBlock() comps = [b[0], b[1]] unpacked = [ {"U235": 1.23e-3, "U238": 2.34e-3}, {"PU239": 5.6e-4, "PU240": 7.8e-4}, ] Database._applyComponentNumberDensitiesMigration(comps, unpacked) for comp, orig in zip(comps, unpacked): expected_nucs = np.array(list(orig.keys()), dtype="S6") expected_nds = np.array(list(orig.values()), dtype=np.float64) # verify nuclide names and dtype self.assertTrue(np.array_equal(comp.p["nuclides"], expected_nucs)) self.assertEqual(comp.p["nuclides"].dtype, np.dtype("S6")) # verify number densities and dtype self.assertTrue(np.allclose(comp.p["numberDensities"], expected_nds)) self.assertEqual(comp.p["numberDensities"].dtype, np.float64) ================================================ FILE: armi/bookkeeping/db/tests/test_databaseInterface.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests of the Database Interface.""" import os import types import unittest import h5py import numpy as np from numpy.testing import assert_allclose, assert_equal from armi import __version__ as version from armi import interfaces, runLog, settings from armi.bookkeeping.db.database import Database from armi.bookkeeping.db.databaseInterface import DatabaseInterface from armi.cases import case from armi.context import PROJECT_ROOT from armi.physics.neutronics.settings import CONF_LOADING_FILE from armi.reactor import blueprints, grids from armi.reactor.blueprints import loadFromCs from armi.reactor.flags import Flags from armi.reactor.reactors import Reactor from armi.testing import TESTING_ROOT, loadTestReactor, reduceTestReactorRings from armi.tests import TEST_ROOT from armi.utils import directoryChangers def getSimpleDBOperator(cs): """ Return a very simple operator that covers most of the database interactions. Notes ----- This reactor has only 1 assembly with 1 type of block. It's used to make the db unit tests run very quickly. """ newSettings = {} newSettings[CONF_LOADING_FILE] = "smallestTestReactor/refSmallestReactor.yaml" newSettings["verbosity"] = "important" newSettings["db"] = True newSettings["runType"] = "Standard" newSettings["nCycles"] = 1 cs = cs.modified(newSettings=newSettings) genDBCase = case.Case(cs) runLog.setVerbosity("info") o = genDBCase.initializeOperator() o.interfaces = [interface for interface in o.interfaces if interface.name in ["database", "main"]] return o, cs class MockInterface(interfaces.Interface): name = "mockInterface" def __init__(self, r, cs, action=None): interfaces.Interface.__init__(self, r, cs) self.action = action def interactEveryNode(self, cycle, node): self.action(cycle, node) class TestDatabaseInterfaceBOL(unittest.TestCase): """Test the DatabaseInterface class at the BOL.""" def test_interactBOL(self): """This test is in its own class, because of temporary directory issues.""" with directoryChangers.TemporaryDirectoryChanger(): self.o, self.r = loadTestReactor(TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml") self.dbi = DatabaseInterface(self.r, self.o.cs) dbName = f"{self._testMethodName}.h5" self.dbi.initDB(fName=dbName) self.db: Database = self.dbi.database self.stateRetainer = self.r.retainState().__enter__() self.assertIsNotNone(self.dbi._db) self.dbi.interactBOL() self.dbi.closeDB() self.dbi._db = None self.assertIsNone(self.dbi._db) if os.path.exists(dbName): os.remove(dbName) class TestDatabaseInterface(unittest.TestCase): """Tests for the DatabaseInterface class.""" def setUp(self): self.td = directoryChangers.TemporaryDirectoryChanger() self.td.__enter__() self.o, self.r = loadTestReactor(TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml") self.dbi = DatabaseInterface(self.r, self.o.cs) self.dbi.initDB(fName=self._testMethodName + ".h5") self.db: Database = self.dbi.database self.stateRetainer = self.r.retainState().__enter__() def tearDown(self): self.db.close() self.stateRetainer.__exit__() self.td.__exit__(None, None, None) # test_interactBOL leaves behind some dirt (accessible after db close) that the # TempDirChanger is not catching bolDirt = [ os.path.join(PROJECT_ROOT, "armiRun.h5"), os.path.join(PROJECT_ROOT, "armiRunSmallest.h5"), ] for dirt in bolDirt: if os.path.exists(dirt): os.remove(dirt) def test_distributable(self): self.assertEqual(self.dbi.distributable(), 4) self.dbi.interactDistributeState() self.assertEqual(self.dbi.distributable(), 4) def test_demonstrateWritingInteractions(self): """Test what nodes are written to the database during the interaction calls.""" self.o.cs["burnSteps"] = 2 # make test insensitive to burn steps r = self.r # BOC/BOL doesn't write anything r.p.cycle, r.p.timeNode = 0, 0 self.assertFalse(self.dbi.database.hasTimeStep(0, 0)) self.dbi.interactBOL() self.assertFalse(self.dbi.database.hasTimeStep(0, 0)) self.dbi.interactBOC(0) self.assertFalse(self.dbi.database.hasTimeStep(0, 0)) # but the first time node does self.dbi.interactEveryNode(0, 0) self.assertTrue(self.dbi.database.hasTimeStep(0, 0)) # EOC 0 shouldn't write, its written by last time node r.p.cycle, r.p.timeNode = 0, self.o.cs["burnSteps"] self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode)) self.dbi.interactEOC(r.p.cycle) self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode)) # The last node of the step should write though self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode)) self.dbi.interactEveryNode(r.p.cycle, r.p.timeNode) self.assertTrue(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode)) # EOL should also write, but lets write last time node first r.p.cycle, r.p.timeNode = self.o.cs["nCycles"] - 1, self.o.cs["burnSteps"] self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode)) self.dbi.interactEveryNode(r.p.cycle, r.p.timeNode) self.assertTrue(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode)) # now write EOL self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode, "EOL")) self.dbi.interactEOL() # this also saves and closes db # reopen db to show EOL is written with Database(self._testMethodName + ".h5", "r") as db: self.assertTrue(db.hasTimeStep(r.p.cycle, r.p.timeNode, "EOL")) # and confirm that last time node is still there/separate self.assertTrue(db.hasTimeStep(r.p.cycle, r.p.timeNode)) def test_interactEveryNodeReturnTightCoupling(self): """Test that the DB is NOT written to if cs["tightCoupling"] = True.""" self.o.cs["tightCoupling"] = True self.dbi.interactEveryNode(0, 0) self.assertFalse(self.dbi.database.hasTimeStep(0, 0)) def test_timeNodeLoop_tightCoupling(self): """Test that database is written out after the coupling loop has completed.""" # clear out interfaces (no need to run physics) but leave database self.o.interfaces = [self.dbi] self.o.cs["tightCoupling"] = True self.assertFalse(self.dbi._db.hasTimeStep(0, 0)) self.o._timeNodeLoop(0, 0) self.assertTrue(self.dbi._db.hasTimeStep(0, 0)) def test_syncDbAfterWrite(self): """ Test to ensure that the fast-path database is copied to working directory at every time node when ``syncDbAfterWrite`` is ``True``. """ r = self.r self.o.cs["syncDbAfterWrite"] = True self.o.cs["burnSteps"] = 2 # make test insensitive to burn steps self.dbi.interactBOL() self.assertFalse(os.path.exists(self.dbi.database.fileName)) # Go through a few time nodes to ensure appending is working for timeNode in range(self.o.cs["burnSteps"]): r.p.cycle = 0 r.p.timeNode = timeNode self.dbi.interactEveryNode(r.p.cycle, r.p.timeNode) # The file should have been copied to working directory self.assertTrue(os.path.exists(self.dbi.database.fileName)) # The copied file should have the newest time node with Database(self.dbi.database.fileName, "r") as db: for tn in range(timeNode + 1): self.assertTrue(db.hasTimeStep(r.p.cycle, tn)) # The in-memory database should have been reloaded properly for tn in range(timeNode + 1): self.assertTrue(self.dbi.database.hasTimeStep(r.p.cycle, tn)) # Make sure EOL runs smoothly self.dbi.interactEOL() self.assertTrue(os.path.exists(self.dbi.database.fileName)) def test_noSyncDbAfterWrite(self): """ Test to ensure that the fast-path database is NOT copied to working directory at every time node when ``syncDbAfterWrite`` is ``False``. """ self.o.cs["syncDbAfterWrite"] = False self.dbi.interactBOL() self.assertFalse(os.path.exists(self.dbi.database.fileName)) self.dbi.interactEveryNode(0, 0) self.assertFalse(os.path.exists(self.dbi.database.fileName)) self.dbi.interactEOL() self.assertTrue(os.path.exists(self.dbi.database.fileName)) def test_writeDBFromDBLoadSameDir(self): """ Test to ensure that a reactor loaded from a database can be written to a working database file (one that has case settings and blueprints if applicable). """ # Write this reactor to a database file. dbi = DatabaseInterface(self.r, self.o.cs) dbi.initDB(fName="testDB1.h5") db = dbi.database db.writeToDB(self.r) db.close() # Now load the db again with Database("testDB1.h5", "r") as db: cs2 = db.loadCS() r2 = db.load(0, 0, cs=cs2) self.assertIsInstance(cs2, settings.Settings) self.assertIsInstance(r2, Reactor) # Now write this db to this folder dbi = DatabaseInterface(r2, cs2) dbi.initDB(fName="testDB2.h5") db = dbi.database db.writeToDB(r2) db.close() # Now load this db. It should load with Database("testDB2.h5", "r") as db: cs3 = db.loadCS() bp3 = loadFromCs(cs3) self.assertIsInstance(bp3, blueprints.Blueprints) r3 = db.load(0, 0, cs=cs3, bp=bp3) self.assertIsInstance(cs3, settings.Settings) self.assertIsInstance(r3, Reactor) def test_writeDBFromDBLoadDifDir(self): """ Test to ensure that a reactor loaded from a database can be written to a working database file (one that has case settings and blueprints if applicable). The directory is changed between writing and loading. """ # Write this reactor to a database file. dbi = DatabaseInterface(self.r, self.o.cs) dbi.initDB(fName="testDB1.h5") db = dbi.database db.writeToDB(self.r) db.close() # Let's move to a different folder os.makedirs("sub", exist_ok=True) os.chdir("sub") # Now load the db again with Database(os.path.join(os.pardir, "testDB1.h5"), "r") as db: cs2 = db.loadCS() r2 = db.load(0, 0, cs=cs2) self.assertIsInstance(cs2, settings.Settings) self.assertIsInstance(r2, Reactor) # Now write this db to this folder dbi = DatabaseInterface(r2, cs2) dbi.initDB(fName="testDB2.h5") db = dbi.database db.writeToDB(r2) db.close() # Now load this db. It should load with Database("testDB2.h5", "r") as db: cs3 = db.loadCS() r3 = db.load(0, 0, cs=cs3) self.assertIsInstance(cs3, settings.Settings) self.assertIsInstance(r3, Reactor) class TestDatabaseWriter(unittest.TestCase): def setUp(self): self.td = directoryChangers.TemporaryDirectoryChanger() self.td.__enter__() cs = settings.Settings(os.path.join(TEST_ROOT, "armiRun.yaml")) cs = cs.modified(newSettings={"power": 0.0, "powerDensity": 9e4}) self.o, cs = getSimpleDBOperator(cs) self.r = self.o.r self.stateRetainer = self.r.retainState().__enter__() def tearDown(self): self.td.__exit__(None, None, None) self.stateRetainer.__exit__() def test_writeSystemAttributes(self): """Test the writeSystemAttributes method. .. test:: Validate that we can directly write system attributes to a database file. :id: T_ARMI_DB_QA0 :tests: R_ARMI_DB_QA """ with h5py.File("test_writeSystemAttributes.h5", "w") as h5: Database.writeSystemAttributes(h5) with h5py.File("test_writeSystemAttributes.h5", "r") as h5: self.assertIn("user", h5.attrs) self.assertIn("python", h5.attrs) self.assertIn("armiLocation", h5.attrs) self.assertIn("startTime", h5.attrs) self.assertIn("machines", h5.attrs) self.assertIn("platform", h5.attrs) self.assertIn("hostname", h5.attrs) self.assertIn("platformRelease", h5.attrs) self.assertIn("platformVersion", h5.attrs) self.assertIn("platformArch", h5.attrs) def test_metaData_endSuccessfully(self): """Test databases have the correct metadata in them. .. test:: Validate that databases have system attributes written to them during the usual workflow. :id: T_ARMI_DB_QA1 :tests: R_ARMI_DB_QA """ # the power should start at zero self.assertEqual(self.r.core.p.power, 0) def goodMethod(cycle, node): pass self.o.interfaces.append(MockInterface(self.o.r, self.o.cs, goodMethod)) with self.o: self.o.operate() self.assertEqual(0, self.r.p.cycle) self.assertEqual(2, self.r.p.timeNode) with h5py.File(self.o.cs.caseTitle + ".h5", "r") as h5: self.assertTrue(h5.attrs["successfulCompletion"]) self.assertEqual(h5.attrs["version"], version) self.assertIn("caseTitle", h5.attrs) self.assertIn("settings", h5["inputs"]) self.assertIn("blueprints", h5["inputs"]) # validate system attributes self.assertIn("user", h5.attrs) self.assertIn("python", h5.attrs) self.assertIn("armiLocation", h5.attrs) self.assertIn("startTime", h5.attrs) self.assertIn("machines", h5.attrs) self.assertIn("platform", h5.attrs) self.assertIn("hostname", h5.attrs) self.assertIn("platformRelease", h5.attrs) self.assertIn("platformVersion", h5.attrs) self.assertIn("platformArch", h5.attrs) # after operating, the power will be greater than zero self.assertGreater(self.r.core.p.power, 1e9) def test_metaDataEndFail(self): def failMethod(cycle, node): if cycle == 0 and node == 1: raise Exception("forcing failure") self.o.interfaces.append(MockInterface(self.o.r, self.o.cs, failMethod)) with self.assertRaises(Exception): with self.o: self.o.operate() self.assertEqual(0, self.r.p.cycle) self.assertEqual(1, self.r.p.timeNode) with h5py.File(self.o.cs.caseTitle + ".h5", "r") as h5: self.assertFalse(h5.attrs["successfulCompletion"]) self.assertEqual(h5.attrs["version"], version) self.assertIn("caseTitle", h5.attrs) def test_getHistory(self): expectedFluxes0 = {} expectedFluxes7 = {} def setFluxAwesome(cycle, node): for bi, b in enumerate(self.r.core.iterBlocks()): b.p.flux = 1e6 * bi + 1e3 * cycle + node if bi == 0: expectedFluxes0[cycle, node] = b.p.flux if bi == 7: expectedFluxes7[cycle, node] = b.p.flux # use as attribute so it is accessible within getFluxAwesome self.called = False def getFluxAwesome(cycle, node): if cycle != 0 or node != 2: return b0 = next(self.r.core.iterBlocks()) db = self.o.getInterface("database")._db # we are now in cycle 1, node 2 ... AFTER setFluxAwesome, but BEFORE writeToDB actualFluxes0 = db.getHistory(b0)["flux"] self.assertEqual(expectedFluxes0, actualFluxes0) self.called = True self.o.interfaces.insert(0, MockInterface(self.o.r, self.o.cs, setFluxAwesome)) self.o.interfaces.insert(1, MockInterface(self.o.r, self.o.cs, getFluxAwesome)) with self.o: self.o.operate() self.assertTrue(self.called) def test_getHistoryByLocation(self): def setFluxAwesome(cycle, node): for bi, b in enumerate(self.r.core.iterBlocks()): b.p.flux = 1e6 * bi + 1e3 * cycle + node def getFluxAwesome(cycle, node): if cycle != 1 or node != 2: return b = next(self.r.core.iterBlocks()) db = self.o.getInterface("database").database # we are now in cycle 1, node 2 ... AFTER setFluxAwesome _fluxes = db.getHistory(b, params=["flux"]) self.o.interfaces.append(MockInterface(self.o.r, self.o.cs, setFluxAwesome)) self.o.interfaces.append(MockInterface(self.o.r, self.o.cs, getFluxAwesome)) with self.o: self.o.operate() with h5py.File(self.o.cs.caseTitle + ".h5", "r") as h5: self.assertEqual(h5.attrs["version"], version) class TestDatabaseReading(unittest.TestCase): @classmethod def setUpClass(cls): cls.td = directoryChangers.TemporaryDirectoryChanger() cls.td.__enter__() # The database writes the settings object to the DB rather than the original input file. # This allows settings to be changed in memory like this and survive for testing. newSettings = {"verbosity": "extra"} cls.nCycles = 2 newSettings["nCycles"] = cls.nCycles newSettings["burnSteps"] = 2 o, r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml", customSettings=newSettings, ) reduceTestReactorRings(r, o.cs, 3) o.interfaces = [i for i in o.interfaces if isinstance(i, (DatabaseInterface))] dbi = o.getInterface("database") dbi.enabled(True) dbi.initDB() # Main Interface normally does this # update a few parameters def writeFlux(cycle, node): for bi, b in enumerate(o.r.core.iterBlocks()): b.p.flux = 1e6 * bi + cycle * 100 + node b.p.mgFlux = np.repeat(b.p.flux / 33, 33) o.interfaces.insert(0, MockInterface(o.r, o.cs, writeFlux)) with o: o.operate() cls.cs = o.cs cls.bp = o.r.blueprints cls.dbName = o.cs.caseTitle + ".h5" # needed for test_readWritten cls.r = o.r @classmethod def tearDownClass(cls): cls.td.__exit__(None, None, None) del cls.r cls.r = None def _fullCoreSizeChecker(self, r): self.assertEqual(r.core.numRings, 3) self.assertEqual(r.p.cycle, 0) self.assertEqual(len(r.core.assembliesByName), 19) self.assertEqual(len(r.core.circularRingList), 0) self.assertEqual(len(r.core.blocksByName), 57) def test_loadReadOnly(self): with Database(self.dbName, "r") as db: r = db.loadReadOnly(0, 0) # now show we can no longer edit those parameters with self.assertRaises(RuntimeError): r.core.p.keff = 0.99 b = r.core.getFirstBlock() with self.assertRaises(RuntimeError): b.p.power = 432.1 for c in b: self.assertGreater(c.getVolume(), 0) def test_growToFullCore(self): with Database(self.dbName, "r") as db: r = db.load(0, 0, allowMissing=True) # test partial core values self.assertEqual(r.core.numRings, 3) self.assertEqual(r.p.cycle, 0) self.assertEqual(len(r.core.assembliesByName), 7) self.assertEqual(len(r.core.circularRingList), 0) self.assertEqual(len(r.core.blocksByName), 21) r.core.growToFullCore(None) self._fullCoreSizeChecker(r) def test_growToFullCoreWithCS(self): with Database(self.dbName, "r") as db: r = db.load(0, 0, allowMissing=True) r.core.growToFullCore(self.cs) self._fullCoreSizeChecker(r) def test_growToFullCoreFromFactory(self): from armi.bookkeeping.db import databaseFactory db = databaseFactory(self.dbName, "r") with db: r = db.load(0, 0, allowMissing=True) r.core.growToFullCore(None) self._fullCoreSizeChecker(r) def test_growToFullCoreFromFactoryWithCS(self): from armi.bookkeeping.db import databaseFactory db = databaseFactory(self.dbName, "r") with db: r = db.load(0, 0, allowMissing=True) r.core.growToFullCore(self.cs) self._fullCoreSizeChecker(r) def test_readWritten(self): with Database(self.dbName, "r") as db: r2 = db.load(0, 0, self.cs) for a1, a2 in zip(self.r.core, r2.core): # assemblies assign a name based on assemNum at initialization self.assertEqual(a1.name, a2.name) assert_equal(a1.spatialLocator.indices, a2.spatialLocator.indices) self.assertEqual(a1.p.assemNum, a2.p.assemNum) self.assertEqual(a1.p.serialNum, a2.p.serialNum) for b1, b2 in zip(a1, a2): # blocks assign a name based on assemNum at initialization self.assertEqual(b1.name, b2.name) assert_equal(b1.spatialLocator.indices, b2.spatialLocator.indices) self.assertEqual(b1.p.serialNum, b2.p.serialNum) for c1, c2 in zip(sorted(b1), sorted(b2)): self.assertEqual(c1.name, c2.name) if isinstance(c1.spatialLocator, grids.MultiIndexLocation): assert_equal( np.array(c1.spatialLocator.indices), np.array(c2.spatialLocator.indices), ) else: assert_equal(c1.spatialLocator.indices, c2.spatialLocator.indices) self.assertEqual(c1.p.serialNum, c2.p.serialNum) # volume is pretty difficult to get right. it relies upon linked dimensions v1 = b1.getVolume() v2 = b2.getVolume() assert_allclose(v1, v2) self.assertEqual(b1.p.serialNum, b2.p.serialNum) self.assertEqual( self.r.core.childrenByLocator[0, 0, 0].p.serialNum, r2.core.childrenByLocator[0, 0, 0].p.serialNum, ) def test_readWithoutInputs(self): with Database(self.dbName, "r") as db: r2 = db.load(0, 0) for b1, b2 in zip(self.r.core.iterBlocks(), r2.core.iterBlocks()): for c1, c2 in zip(sorted(b1), sorted(b2)): self.assertEqual(c1.name, c2.name) for bi, b in enumerate(r2.core.iterBlocks()): assert_allclose(b.p.flux, 1e6 * bi) def test_variousTypesWork(self): with Database(self.dbName, "r") as db: r2 = db.load(1, 1) b1 = self.r.core.getFirstBlock(Flags.FUEL) b2 = r2.core.getFirstBlock(Flags.FUEL) self.assertIsInstance(b1.p.mgFlux, np.ndarray) self.assertIsInstance(b2.p.mgFlux, np.ndarray) assert_allclose(b1, b2) c1 = b1.getComponent(Flags.FUEL) c2 = b2.getComponent(Flags.FUEL) for i, v1 in enumerate(c1.p.numberDensities): self.assertAlmostEqual(v1, c2.p.numberDensities[i]) def test_timesteps(self): with Database(self.dbName, "r") as db: # build time steps in the DB file timesteps = [] for cycle in range(self.nCycles): for bStep in range(3): timesteps.append(f"/c0{cycle}n0{bStep}") timesteps.append("/c01n02EOL") # verify the timesteps are correct, including the EOL self.assertEqual(list(db.keys()), timesteps) class TestBadName(unittest.TestCase): def test_badDBName(self): cs = settings.Settings(os.path.join(TEST_ROOT, "armiRun.yaml")) cs = cs.modified(newSettings={"reloadDBName": "aRmIRuN.h5"}) dbi = DatabaseInterface(None, cs) with self.assertRaises(ValueError): # an error should be raised when the database loaded from # has the same name as the run to avoid overwriting. dbi.initDB() class TestStandardFollowOn(unittest.TestCase): """Tests related to doing restart runs (loading from DB with Standard operator).""" @classmethod def setUpClass(cls): cls.td = directoryChangers.TemporaryDirectoryChanger() cls.td.__enter__() # make DB to load from o = cls._getOperatorThatChangesVariables(settings.Settings(os.path.join(TEST_ROOT, "armiRun.yaml"))) with o: o.operate() cls.FIRST_END_TIME = o.r.p.time if cls.FIRST_END_TIME == 0: # Can't use self.assertEqual in the class method but we still need this information raise RuntimeError("Time should have advanced by the end of the run.") cls.LOAD_DB_PATH = "loadFrom.h5" os.rename("armiRun.h5", cls.LOAD_DB_PATH) @classmethod def tearDownClass(cls): cls.td.__exit__(None, None, None) @staticmethod def _getOperatorThatChangesVariables(cs): """ Return an operator that advances time so that restart runs can be tested. Notes ----- Ensures that parameters are consistent between Standard runs and restart runs. """ o, cs = getSimpleDBOperator(cs) mock = MockInterface(o.r, o.cs, None) def interactEveryNode(self, cycle, node): # Could use just += 1 but this will show more errors since it is less # susceptible to cancellation of errors off by one. self.r.p.time += self.r.p.timeNode + 1 # Magic to change the method only on this instance of the class. mock.interactEveryNode = types.MethodType(interactEveryNode, mock) # insert 1 before the database interface so that changes are written to db. o.interfaces.insert(1, mock) return o def test_standardRestart(self): o = self._getRestartOperator() # the interact BOL has historically failed due to trying to write inputs # which are already in the DB from the _mergeStandardRunDB call with o: o.operate() self.assertEqual( self.FIRST_END_TIME, o.r.p.time, "End time should have been the same for the restart run.\n" "First end time: {},\nSecond End time: {}".format(self.FIRST_END_TIME, o.r.p.time), ) def _getRestartOperator(self): cs = settings.Settings(os.path.join(TEST_ROOT, "armiRun.yaml")) newSettings = {} newSettings["loadStyle"] = "fromDB" newSettings["reloadDBName"] = self.LOAD_DB_PATH newSettings["startCycle"] = 0 newSettings["startNode"] = 1 cs = cs.modified(newSettings=newSettings) o = self._getOperatorThatChangesVariables(cs) return o ================================================ FILE: armi/bookkeeping/db/tests/test_jaggedArray.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the JaggedArray class.""" import unittest import h5py import numpy as np from armi.bookkeeping.db.jaggedArray import JaggedArray from armi.utils.directoryChangers import TemporaryDirectoryChanger class TestJaggedArray(unittest.TestCase): """Tests for the JaggedArray class.""" def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() def tearDown(self): self.td.__exit__(None, None, None) def test_roundTrip(self): """Basic test that we handle Nones correctly in database read/writes.""" dataSet = [1, 2.0, None, [], [3, 4], (5, 6, 7), np.array([8, 9, 10, 11])] self._compareRoundTrip(dataSet, "test-numbers") def test_roundTripBool(self): """Basic test that we handle Nones correctly in database read/writes.""" dataSet = [True, True, [False, True, False]] self._compareRoundTrip(dataSet, "test-bool") def test_flatten(self): """Test the recursive flattening static method.""" testdata = [(1, 2), [3, 4, 5], [], None, 6, np.array([7, 8, 9])] flatArray = JaggedArray.flatten(testdata) self.assertEqual(flatArray, [1, 2, 3, 4, 5, None, 6, 7, 8, 9]) def test_backwardsCompatible(self): """ Test that the new JaggedArray can unpack the old database jagged data format. The "old" database format contains shapes and offsets for locations that have None. The "new" database format only contains shapes and offsets for non-None values. The "new" unpacking routine is able to read either format. """ paramName = "test_old" data = [[1, 2], None, [3, 4, 5], None, None, [6, 7, 8, 9]] flattenedArray = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]) shapes = [(2,), (0,), (3,), (0,), (0,), (4,)] offsets = [0, 2, 2, 5, 5, 5, 5] nones = [1, 3, 4] h5file = "test_oldFormat.h5" with h5py.File(h5file, "w") as hf: dset = hf.create_dataset( data=flattenedArray, name=paramName, ) dset.attrs["jagged"] = True dset.attrs["offsets"] = offsets dset.attrs["shapes"] = shapes dset.attrs["noneLocations"] = nones with h5py.File(h5file, "r") as hf: dataset = hf[paramName] values = dataset[()] offsets = dataset.attrs["offsets"] shapes = dataset.attrs["shapes"] nones = dataset.attrs["noneLocations"] roundTrip = JaggedArray.fromH5( values, offsets, shapes, nones, dtype=flattenedArray.dtype, paramName=paramName, ) self._compareArrays(data, roundTrip) def _compareRoundTrip(self, data, paramName): """Make sure that data is unchanged by packing/unpacking.""" jaggedArray = JaggedArray(data, paramName) # write to HDF5 h5file = "test_jaggedArray.h5" with h5py.File(h5file, "w") as hf: dset = hf.create_dataset( data=jaggedArray.flattenedArray, name=jaggedArray.paramName, ) dset.attrs["jagged"] = True dset.attrs["offsets"] = jaggedArray.offsets dset.attrs["shapes"] = jaggedArray.shapes dset.attrs["noneLocations"] = jaggedArray.nones with h5py.File(h5file, "r") as hf: dataset = hf[paramName] values = dataset[()] offsets = dataset.attrs["offsets"] shapes = dataset.attrs["shapes"] nones = dataset.attrs["noneLocations"] roundTrip = JaggedArray.fromH5( values, offsets, shapes, nones, dtype=jaggedArray.flattenedArray.dtype, paramName=paramName, ) self._compareArrays(data, roundTrip) def _compareArrays(self, ref, src): """ Compare two numpy arrays. Comparing numpy arrays that may have unsavory data (NaNs, Nones, jagged data, etc.) is really difficult. For now, convert to a list and compare element-by-element. Several types of data do not survive a round trip. The if-elif branch here converts the initial data into the format expected to be produced by the round trip. The conversions are: - For scalar values (int, float, etc.), the data becomes a numpy array with a dimension of 1 after the round trip. - Tuples and lists become numpy arrays - Empty lists become `None` """ self.assertEqual(type(src), JaggedArray) if isinstance(ref, np.ndarray): ref = ref.tolist() src = src.tolist() for v1, v2 in zip(ref, src): # Entries may be None if isinstance(v1, np.ndarray): v1 = v1.tolist() elif isinstance(v1, tuple): v1 = list(v1) elif isinstance(v1, int): v1 = np.array([v1]) elif isinstance(v1, float): v1 = np.array([v1], dtype=np.float64) elif v1 is None: pass elif len(v1) == 0: v1 = None if isinstance(v2, np.ndarray): v2 = v2.tolist() self.assertEqual(v1, v2) ================================================ FILE: armi/bookkeeping/db/tests/test_layout.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the db Layout and associated tools.""" import os import unittest from armi import context from armi.bookkeeping.db import database, layout from armi.reactor import grids from armi.utils.directoryChangers import TemporaryDirectoryChanger class TestLocationPacking(unittest.TestCase): """Tests for database location.""" def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() def tearDown(self): self.td.__exit__(None, None, None) def test_locationPacking(self): loc1 = grids.IndexLocation(1, 2, 3, None) loc2 = grids.CoordinateLocation(4.0, 5.0, 6.0, None) loc3 = grids.MultiIndexLocation(None) loc3.append(grids.IndexLocation(7, 8, 9, None)) loc3.append(grids.IndexLocation(10, 11, 12, None)) locs = [loc1, loc2, loc3] tp, data = layout._packLocations(locs) self.assertEqual(tp[0], layout.LOC_INDEX) self.assertEqual(tp[1], layout.LOC_COORD) self.assertEqual(tp[2], layout.LOC_MULTI + "2") unpackedData = layout._unpackLocations(tp, data) self.assertEqual(unpackedData[0], (1, 2, 3)) self.assertEqual(unpackedData[1], (4.0, 5.0, 6.0)) self.assertEqual(unpackedData[2], [(7, 8, 9), (10, 11, 12)]) def test_locationPackingOlderVersions(self): for version in [1, 2]: loc1 = grids.IndexLocation(1, 2, 3, None) loc2 = grids.CoordinateLocation(4.0, 5.0, 6.0, None) loc3 = grids.MultiIndexLocation(None) loc3.append(grids.IndexLocation(7, 8, 9, None)) loc3.append(grids.IndexLocation(10, 11, 12, None)) locs = [loc1, loc2, loc3] tp, data = layout._packLocations(locs, minorVersion=version) self.assertEqual(tp[0], "IndexLocation") self.assertEqual(tp[1], "CoordinateLocation") self.assertEqual(tp[2], "MultiIndexLocation") unpackedData = layout._unpackLocations(tp, data, minorVersion=version) self.assertEqual(unpackedData[0], (1, 2, 3)) self.assertEqual(unpackedData[1], (4.0, 5.0, 6.0)) self.assertEqual(unpackedData[2][0].tolist(), [7, 8, 9]) self.assertEqual(unpackedData[2][1].tolist(), [10, 11, 12]) def test_locationPackingOldVersion(self): version = 3 loc1 = grids.IndexLocation(1, 2, 3, None) loc2 = grids.CoordinateLocation(4.0, 5.0, 6.0, None) loc3 = grids.MultiIndexLocation(None) loc3.append(grids.IndexLocation(7, 8, 9, None)) loc3.append(grids.IndexLocation(10, 11, 12, None)) locs = [loc1, loc2, loc3] tp, data = layout._packLocations(locs, minorVersion=version) self.assertEqual(tp[0], "I") self.assertEqual(tp[1], "C") self.assertEqual(tp[2], "M:2") unpackedData = layout._unpackLocations(tp, data, minorVersion=version) self.assertEqual(unpackedData[0], (1, 2, 3)) self.assertEqual(unpackedData[1], (4.0, 5.0, 6.0)) self.assertEqual(unpackedData[2][0], (7, 8, 9)) self.assertEqual(unpackedData[2][1], (10, 11, 12)) def test_close(self): intendedFileName = "xyz.h5" db = database.Database(intendedFileName, "w") self.assertEqual(db._fileName, intendedFileName) self.assertIsNone(db._fullPath) # this isn't set until the db is opened db.open() self.assertEqual(db._fullPath, os.path.join(context.getFastPath(), intendedFileName)) db.close() # this should move the file out of the FAST_PATH self.assertEqual(db._fullPath, os.path.join(os.path.abspath("."), intendedFileName)) ================================================ FILE: armi/bookkeeping/db/tests/test_passiveDBLoadPlugin.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides functionality for testing the PassiveDBLoadPlugin.""" import unittest from copy import deepcopy from io import StringIO from ruamel.yaml import RoundTripLoader from ruamel.yaml.nodes import MappingNode, ScalarNode from armi import context, getApp from armi.bookkeeping.db.passiveDBLoadPlugin import ( PassiveDBLoadPlugin, PassThroughYamlize, ) from armi.reactor.blocks import Block class TestPassiveDBLoadPlugin(unittest.TestCase): def setUp(self): """ Manipulate the standard App. We can't just configure our own, since the pytest environment bleeds between tests. """ self.app = getApp() self._backupApp = deepcopy(self.app) self._cacheBPSections = PassiveDBLoadPlugin.SKIP_BP_SECTIONS self._cacheUnkownParams = PassiveDBLoadPlugin.UNKNOWN_PARAMS PassiveDBLoadPlugin.SKIP_BP_SECTIONS = [] PassiveDBLoadPlugin.UNKNOWN_PARAMS = {} def tearDown(self): """Restore the App to its original state.""" import armi armi._app = self._backupApp context.APP_NAME = "armi" PassiveDBLoadPlugin.SKIP_BP_SECTIONS = self._cacheBPSections PassiveDBLoadPlugin.UNKNOWN_PARAMS = self._cacheUnkownParams def test_passiveDBLoadPlugin(self): plug = PassiveDBLoadPlugin() # default case bpSections = plug.defineBlueprintsSections() self.assertEqual(len(bpSections), 0) params = plug.defineParameters() self.assertEqual(len(params), 0) # non-empty cases PassiveDBLoadPlugin.SKIP_BP_SECTIONS = ["hi", "mom"] PassiveDBLoadPlugin.UNKNOWN_PARAMS = {Block: ["fake1", "fake2"]} bpSections = plug.defineBlueprintsSections() self.assertEqual(len(bpSections), 2) self.assertTrue(type(bpSections[0]), tuple) self.assertEqual(bpSections[0][0], "hi") self.assertTrue(type(bpSections[1]), tuple) self.assertEqual(bpSections[1][0], "mom") params = plug.defineParameters() self.assertEqual(len(params), 1) self.assertIn(Block, params) class TestPassThroughYamlize(unittest.TestCase): def test_passThroughYamlizeExample1(self): # create node from known BP-style YAML object node = MappingNode( "test_passThroughYamlizeExample1", [ ( ScalarNode(tag="tag:yaml.org,2002:str", value="core-wide"), MappingNode( tag="tag:yaml.org,2002:map", value=[ ( ScalarNode( tag="tag:yaml.org,2002:str", value="fuel axial expansion", ), ScalarNode(tag="tag:yaml.org,2002:bool", value="False"), ), ( ScalarNode( tag="tag:yaml.org,2002:str", value="grid plate radial expansion", ), ScalarNode(tag="tag:yaml.org,2002:bool", value="True"), ), ], ), ) ], ) # test that node is non-zero and has the "core-wide" section self.assertEqual(node.value[0][0].value, "core-wide") # pass the YAML string through the known YAML pty = PassThroughYamlize() loader = RoundTripLoader(StringIO("")) _p = pty.from_yaml(loader, node) # prove the section has been cleared self.assertEqual(len(node.value), 0) ================================================ FILE: armi/bookkeeping/db/typedefs.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Tuple from armi.reactor.composites import ArmiObject from armi.reactor.grids import LocationBase # Return type for the getHistories() method # param time node value History = Dict[str, Dict[Tuple[int, int], Any]] Histories = Dict[ArmiObject, History] LocationHistories = Dict[LocationBase, History] ================================================ FILE: armi/bookkeeping/historyTracker.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The History Tracker is a bookkeeping interface that accesses and reports time-dependent state information from the database. At the end of a run, these write text files to show the histories for various follow-on mechanical analysis, fuel performance analysis, etc. Other interfaces may find this useful as well, to get an assembly history for fuel performance analysis, etc. This is particularly useful in equilibrium runs, where the ``EqHistoryTrackerInterface`` will unravel the full history from a single equilibrium cycle. Getting history information --------------------------- Loop over blocks, keys, and timesteps of interest and use commands like this:: history.getBlockHistoryVal(armiBlock.getName(), key, ts) Using the database-based history trackers ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You can pre-load information before gathering it to get much better performance:: history.preloadBlockHistoryVals(blockNames, historyKeys, timeSteps) This is essential for performance when history information is going to be accessed in loops over assemblies or blocks. Reading each param directly from the database individually in loops is paralyzingly slow. Specifying parameters to add to the EOL history report ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To add state parameters to the list of things that get their history reported, you need to define an interface method called `getHistoryParams`. It should return a list of block parameters that will become available. For example:: def getHistoryParams(self): return ["flux", "percentBu"] When you'd like to access history information, you need to grab the history interface. The history interfaces is present by default in your interface stack. To get it, just call:: history = self.getInterface("history") Now you can do a few things, such as:: # get some info about what's stored in the history assemsWithHistory = history.getDetailAssemblies() timeStepsAvailable = history.getTimeIndices() # now go out and get some time-dependent block params: fluxAtTimeStep3 = history.getBlockHistoryVal("B1003A", "flux", 3) Specifying blocks and assemblies to track ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ See :ref:`detail-assems`. """ import traceback from typing import TYPE_CHECKING, List from armi import interfaces, operators, runLog from armi.reactor import grids from armi.reactor.flags import Flags from armi.utils import tabulate ORDER = 2 * interfaces.STACK_ORDER.BEFORE + interfaces.STACK_ORDER.BOOKKEEPING if TYPE_CHECKING: from armi.reactor.assemblies import Assembly from armi.reactor.blocks import Block def describeInterfaces(cs): """Function for exposing interface(s) to other code.""" if cs["runType"] not in (operators.RunTypes.EQUILIBRIUM): klass = HistoryTrackerInterface return (klass, {}) return None class HistoryTrackerInterface(interfaces.Interface): """ Makes reports of the state that individual assemblies encounter. .. impl:: This interface allows users to retrieve run data from somewhere other than the database. :id: I_ARMI_HIST_TRACK :implements: R_ARMI_HIST_TRACK This is a special :py:class:`Interface <armi.interfaces.Interface>` that is designed to store assembly and cross section data throughout time. This is done directly, with time-based lists of assembly data, and dictionaries of cross- section data. Users turn this feature on or off using the ``"detailAllAssems"`` setting. Notes ----- This pre-dates the ARMI database system, and we would like to stop supporting this. Please do not find new uses for this; use the databases. Attributes ---------- detailAssemblyNames : list List of detail assembly names in the reactor time : list list of reactor time in years """ name = "history" DETAILED_ASSEMBLY_FLAGS = [Flags.FUEL, Flags.CONTROL] def __init__(self, r, cs): """ HistoryTracker that uses the database to look up parameter history rather than storing them in memory. Warning ------- If the current timestep history is requested and the database has not yet been written this timestep, the current value of the requested parameter is provided. It is possible that this is not the value that will be written to the database during this time step since many interfaces that change parameters may interact between this call and the database write. """ interfaces.Interface.__init__(self, r, cs) self.detailAssemblyNames = [] self._preloadedBlockHistory = None def interactBOL(self): self.addDetailAssembliesBOL() def interactBOC(self, cycle=None): """Look for any new assemblies that are asked for and add them to tracking.""" self.addDetailAssemsByAssemNums() if self.cs["detailAllAssems"]: self.addAllDetailedAssems() def interactEOL(self): """Generate the history reports.""" self._writeDetailAssemblyHistories() def addDetailAssembliesBOL(self): """Find and activate assemblies that the user requested detailed treatment of.""" if self.cs["detailAssemLocationsBOL"]: for locLabel in self.cs["detailAssemLocationsBOL"]: ring, pos, _axial = grids.locatorLabelToIndices(locLabel) i, j = self.r.core.spatialGrid.getIndicesFromRingAndPos(ring, pos) aLoc = self.r.core.spatialGrid[i, j, 0] try: a = self.r.core.childrenByLocator[aLoc] except KeyError: runLog.error( f"Detail assembly in location {locLabel} (requested via `detailAssemLocationsBOL`) is not in " "the core. Update settings." ) raise self.addDetailAssembly(a) if self.cs["detailAllAssems"]: self.addAllDetailedAssems() # This also gets called at BOC but we still do it here for operators that do not call BOC. self.addDetailAssemsByAssemNums() def addAllDetailedAssems(self): """Add all assems who have the DETAILED_ASSEMBLY_FLAGS as detail assems.""" for a in self.r.core: if a.hasFlags(self.DETAILED_ASSEMBLY_FLAGS): self.addDetailAssembly(a) def addDetailAssemsByAssemNums(self): """ Activate detail assemblies from input based on assembly number. This is used to activate detail assembly tracking on assemblies that are not present in the core at BOL. See Also -------- addDetailAssembliesBOL : Similar but for BOL """ detailAssemNums = self.cs["detailAssemNums"] if not detailAssemNums: return for a in self.r.core: thisNum = a.getNum() # check for new detail assemblies if thisNum in detailAssemNums: self.addDetailAssembly(a) def _writeDetailAssemblyHistories(self): """Write data file with assembly histories.""" detailAssems = self.getDetailAssemblies() if len(detailAssems) == 0: return allBlockHistories = self.getAssemHistories(detailAssems) dbi = self.getInterface("database") locHistory = dbi.getHistories(detailAssems, ["location"]) assemLocations = {a: locHistory[a]["location"] for a in detailAssems} self.writeAssemHistories(detailAssems, allBlockHistories, assemLocations) def _getAssemHistoryFileName(self, assem): return self._getHistoryFileName(assem.getName(), "a") def _getHistoryFileName(self, label, letter): return f"{self.cs.caseTitle}-{label}-{letter}Hist.txt" def getTrackedParams(self): """Give the list of block parameters that are being tracked.""" trackedParams = {"residence", "ztop", "zbottom"} # loop through interfaces to allow them to add custom params. for i in self.o.getInterfaces(): for newParam in i.getHistoryParams(): if newParam not in trackedParams: trackedParams.add(newParam) return sorted(trackedParams) def addDetailAssembly(self, a: "Assembly"): """Track the name of assemblies that are flagged for detailed treatment.""" aName = a.getName() if aName not in self.detailAssemblyNames: self.detailAssemblyNames.append(aName) def getDetailAssemblies(self) -> list["Assembly"]: """Returns the assemblies that have been signaled as detail assemblies.""" assems = [] if not self.detailAssemblyNames: runLog.info("No detail assemblies HistoryTrackerInterface") for name in self.detailAssemblyNames: try: assems.append(self.r.core.getAssemblyByName(name)) except KeyError: if name in {a.name for a in self.r.core}: raise Exception("Found it") runLog.warning( "Cannot find detail assembly {} in assemblies-by-name lookup table, which has {} entries".format( name, len(self.r.core.assembliesByName) ) ) return assems def getDetailBlocks(self) -> list["Block"]: """Get all blocks in all detail assemblies.""" return [block for a in self.getDetailAssemblies() for block in a] def nonStationaryBlocks(self, a: "Assembly"): return [b for b in a if not any(b.hasFlags(sbf) for sbf in self.r.core.stationaryBlockFlagsList)] def getAssemHistories(self, assemList: List["Assembly"]): """Get the histories for all blocks in detailed assemblies.""" return self.getInterface("database").getHistories( [b for a in assemList for b in self.nonStationaryBlocks(a)], self.getTrackedParams(), ) def writeAssemHistories(self, detailAssems, allBlockHistories, assemLocations): """Write detailed assembly histories to text files.""" dbi = self.getInterface("database") times = dbi.getHistory(self.r, ["time"])["time"] params = self.getTrackedParams() for a in detailAssems: fName = self._getAssemHistoryFileName(a) with open(fName, "w") as out: # ts is a tuple, remove the spaces from the string representation so it is easy to load into a # spreadsheet or whatever headers = [str(ts).replace(" ", "") for ts in times.keys()] out.write( tabulate.tabulate( data=(times.values(),), headers=headers, tableFmt="plain", floatFmt="11.5E", ) ) out.write("\n") for param in params: out.write("\n\nkey: {0}\n".format(param)) data = [allBlockHistories[b][param].values() for b in self.nonStationaryBlocks(a)] out.write(tabulate.tabulate(data, tableFmt="plain", floatFmt="11.5E")) out.write("\n") # loc is a tuple, remove the spaces from the string representation so it is easy to load into a # spreadsheet or whatever location = [str(loc).replace(" ", "") for loc in assemLocations[a].values()] out.write("\n\nkey: location\n") out.write(tabulate.tabulate((location,), tableFmt="plain")) out.write("\n\n\n") headers = "EOL bottom top center".split() data = [("", b.p.zbottom, b.p.ztop, b.p.z) for b in self.nonStationaryBlocks(a)] out.write(tabulate.tabulate(data, headers=headers, tableFmt="plain", floatFmt="10.3f")) out.write("\n\n\nAssembly info\n") out.write(f"{a.getName()} {a.getType()}\n") for b in self.nonStationaryBlocks(a): out.write(f'"{b.getType()}" {b.p.xsType} {b.p.envGroup}\n') def preloadBlockHistoryVals(self, names, keys, timesteps): """ Pre-load block data so it can be more quickly accessed in the future. Notes ----- Pre-loading has value because the database is organized in a fashion that is easy/inexpensive to look up data for many of time steps simultaneously. These can then be stored and provided when the specific timestep is requested. The method ``getBlockHistoryVal`` still looks at the database if the preloaded values don't have the needed data, so the same results should be given if this method is not called. """ try: dbi = self.getInterface("database") blocks = [self.r.core.getBlockByName(name) for name in names] # weird special stuff for loc, just leave it be. keys = [key for key in keys if key != "loc"] data = dbi.getHistories(blocks, keys, timesteps) self._preloadedBlockHistory = data except Exception: # fails during the beginning of standard runs, but that's ok runLog.info(f"Unable to pre-load block history values due to error:\n{traceback.format_exc()}") self.unloadBlockHistoryVals() def unloadBlockHistoryVals(self): """Remove all cached db reads.""" self._preloadedBlockHistory = None def getBlockHistoryVal(self, name: str, paramName: str, ts: tuple[int, int]): """ Use the database interface to return the parameter values for the supplied block names, and timesteps. Notes ----- If the current timestep history is requested and the database has not yet been written this timestep, the current value of the requested parameter is returned. Parameters ---------- name name of block paramName parameter keys of interest ts cycle and node from which to load data Raises ------ KeyError When param not found in database. """ block = self.r.core.getBlockByName(name) if self._isCurrentTimeStep(ts) and not self._databaseHasDataForTimeStep(ts): # Current timenode may not have been written to the DB. Use the current value in the param system. Works for # fuel performance, for some params, e.g. burnup, dpa. return block.p[paramName] try: val = self._preloadedBlockHistory[block][paramName][ts] # not in preloaded or preloaded failed except (TypeError, ValueError, KeyError, IndexError): dbi = self.getInterface("database") try: data = dbi.database.getHistory(block, [paramName], [ts]) val = data[paramName][ts] except KeyError: runLog.error(f"No value in DB. param name: {paramName} requested index: {ts}") raise return val def _isCurrentTimeStep(self, ts: tuple[int, int]) -> bool: """Return True if the timestep requested is the current time step.""" return ts == (self.r.p.cycle, self.r.p.timeNode) def _databaseHasDataForTimeStep(self, ts) -> bool: """Return True if the database has data for the requested time step.""" dbi = self.getInterface("database") return ts in dbi.database.genTimeSteps() def getTimeSteps(self, a: "Assembly" = None) -> list[float]: """ Given a fuel assembly, return list of time steps values (in years) that are available. Parameters ---------- a A fuel assembly that has been designated a detail assem. If passed, only timesteps where this assembly is in the core will be tracked. Returns ------- timeSteps times in years that are available in the history See Also -------- getTimeIndices : gets indices where an assembly is in the core """ dbi = self.getInterface("database") timeInYears = dbi.getHistory(self.r, ["time"])["time"] # remove the time step info. Clients don't want it timeInYears = [t[1] for t in timeInYears] if a: b = self._getBlockInAssembly(a) ids = dbi.getHistory(["id"])["id"] timeInYears = [time for time, ids in zip(timeInYears, ids) if b.p.id in ids] return timeInYears @staticmethod def _getBlockInAssembly(a: "Assembly") -> "Block": """Get a representative fuel block from a fuel assembly.""" b = a.getFirstBlock(Flags.FUEL) if not b: runLog.error(f"Assembly {a} does not contain fuel") for b in a: runLog.error(f"Block {b}") raise RuntimeError( "A tracked assembly does not contain fuel and has caused this error, see the details in stdout." ) return b ================================================ FILE: armi/bookkeeping/mainInterface.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module performs some file manipulations, cleanups, state loads, etc. It's a bit of a catch-all interface, and it's name is admittedly not very descriptive. """ import glob import itertools import os import re from armi import context, interfaces, runLog, utils from armi.bookkeeping.db.database import Database from armi.settings.fwSettings.globalSettings import ( CONF_COPY_FILES_FROM, CONF_COPY_FILES_TO, CONF_ZONE_DEFINITIONS, CONF_ZONES_FILE, ) from armi.utils import pathTools from armi.utils.customExceptions import InputError ORDER = interfaces.STACK_ORDER.PREPROCESSING def describeInterfaces(_cs): """Function for exposing interface(s) to other code.""" return (MainInterface, {"reverseAtEOL": True}) class MainInterface(interfaces.Interface): """ Do some basic manipulations, calls, Instantiates the database. Notes ----- Interacts early so that the database is accessible as soon as possible in the run. The database interfaces runs near the end of the interface stack, but the main interface interacts first. """ name = "main" @staticmethod def specifyInputs(cs): return {CONF_ZONES_FILE: [cs[CONF_ZONES_FILE]]} def interactBOL(self): interfaces.Interface.interactBOL(self) self._moveFiles() def _moveFiles(self): """ At the start of each run, arbitrary lists of user-defined files can be copied around. This logic is controlled by the settings ``copyFilesFrom`` & ``copyFilesTo``. ``copyFilesFrom`` : - List of files to copy (cannot be directories). - Can be of length zero (that just means no files will be copied). - The file names listed can use the ``*`` glob syntax, to reference multiple files. ``copyFilesTo`` : - List of directories to copy the files into. - Can be of length zero; all files will be copied to the local dir. - Can be of length one; all files will be copied to that dir. - The only other valid length for this list _must_ be the same length as the "from" list. Notes ----- If a provided "from" file is missing, this method will silently pass over that. It will only check if the length of the "from" and "to" lists are valid in the end. """ # handle a lot of asterisks and missing files copyFilesFrom = [ filePath for possiblePath in self.cs[CONF_COPY_FILES_FROM] for filePath in glob.glob(possiblePath) ] copyFilesTo = self.cs[CONF_COPY_FILES_TO] if len(copyFilesTo) in (len(copyFilesFrom), 0, 1): # if any files to copy, then use the first as the default, i.e. len() == 1, # otherwise assume '.' default = copyFilesTo[0] if any(copyFilesTo) else "." for filename, dest in itertools.zip_longest(copyFilesFrom, copyFilesTo, fillvalue=default): pathTools.copyOrWarn(CONF_COPY_FILES_FROM, filename, dest) else: runLog.error( f"cs['{CONF_COPY_FILES_TO}'] must either be length 0, 1, or have the same number " f"of entries as cs['{CONF_COPY_FILES_FROM}']. Actual values:\n" f" {CONF_COPY_FILES_TO} : {copyFilesTo}\n" f" {CONF_COPY_FILES_FROM} : {copyFilesFrom}" ) raise InputError(f"Failed to process {CONF_COPY_FILES_FROM}/{CONF_COPY_FILES_TO}") def interactBOC(self, cycle=None): """Typically the first interface to interact beginning of cycle.""" runLog.important(f"Beginning of Cycle {cycle}") runLog.LOG.clearSingleLogs() if self.cs["rmExternalFilesAtBOC"]: self.cleanLastCycleFiles() def interactEveryNode(self, cycle, node): """Loads from db if necessary.""" if self.cs["loadStyle"] == "fromDB" and self.cs["loadFromDBEveryNode"]: if cycle == 0 and node == 0: # skip at BOL because interactBOL handled it. pass else: with Database(self.cs["reloadDBName"], "r") as db: r = db.load(cycle, node, self.cs) self.o.reattach(r, self.cs) if self.cs[CONF_ZONES_FILE] or self.cs[CONF_ZONE_DEFINITIONS]: self.r.core.buildManualZones(self.cs) def interactEOL(self): if self.cs["rmExternalFilesAtEOL"]: # successful run with rmExternalFilesAtEOL activated. Clean things up. self.cleanARMIFiles() runLog.warningReport() def cleanARMIFiles(self): """ Delete temporary ARMI run files like simulation inputs/outputs. Useful if running a clean job that doesn't require restarts. """ if context.MPI_RANK != 0: # avoid inadvertently calling from worker nodes which could cause filesystem lockups. raise ValueError("Only the master node is allowed to clean files here.") runLog.important("Cleaning ARMI files due to rmExternalFilesAtEOL option") for fileName in os.listdir(os.getcwd()): # clean simulation inputs and outputs for candidate in [".BCD", ".inp", ".out", "ISOTXS-"]: if candidate in fileName: if ".htos.out" in fileName: continue if "sassys.inp" in fileName: continue os.remove(fileName) if re.search("ISO..F?$", fileName): # clean intermediate XS os.remove(fileName) for snapText in self.cs["dumpSnapshot"]: # snapText is a CCCNNN with C=cycle and N=node cycle = int(snapText[0:3]) node = int(snapText[3:]) newFolder = "snapShot{0}_{1}".format(cycle, node) utils.pathTools.cleanPath(newFolder, forceClean=True) # delete database if it's SQLlite # no need to delete because the database won't have copied it back if using fastpath. # clean temp directories. if os.path.exists("shuffleBranches"): utils.pathTools.cleanPath("shuffleBranches") # Potentially, wait for all the processes to catch up. if os.path.exists("failedRuns"): utils.pathTools.cleanPath("failedRuns") def cleanLastCycleFiles(self): """Delete ARMI files from previous cycle that aren't necessary for the next cycle. Unless you're doing reloads, of course. """ runLog.important("Cleaning ARMI files due to rmExternalFilesAtBOC option") for fileName in os.listdir(os.getcwd()): # clean MC**2 and REBUS inputs and outputs for candidate in [".BCD", ".inp", ".out", "ISOTXS-"]: if candidate in fileName: # Do not remove .htos.out files. if ".htos.out" in fileName: continue if re.search(r"mcc[A-Z0-9]+\.inp", fileName): continue # don't remove mccIA1.inp stuff in case we go out of a burnup bound. try: os.remove(fileName) except OSError: runLog.warning( "Error removing file {0} during cleanup. It is still in use, probably".format(fileName) ) ================================================ FILE: armi/bookkeeping/memoryProfiler.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Interface to help diagnose memory issues during debugging/development. There are many approaches to memory profiling. 1. You can ask psutil for the memory used by the process from an OS perspective. This is great for top-down analysis. This module provides printouts that show info from every process running. This is very fast. 2. You can use ``gc.get_objects()`` to list all objects that the garbage collector is tracking. If you want, you can filter it down and get the counts and sizes of objects of interest (e.g. all armi objects). This module has tools to do all of this. It should help you out. NOTE: Psutil and sys.getsizeof will certainly report slightly different results. NOTE: In Windows, it seems that even if your garbage is collected, Windows does not de-allocate all the memory. So if you are a worker and you just got a 2GB reactor but then deleted it, Windows will keep you at 2GB for a while. See Also -------- https://pythonhosted.org/psutil/ https://docs.python.org/3/library/gc.html#gc.garbage """ import gc import sys from os import cpu_count from typing import Optional from armi import context, interfaces, mpiActions, runLog from armi.reactor.composites import ArmiObject from armi.utils import tabulate from armi.utils.customExceptions import NonexistentSetting try: # psutil is an optional requirement, since it doesn't support MacOS very well import psutil _havePsutil = True except ImportError: runLog.warning("Failed to import psutil; MemoryProfiler will not provide meaningful data.") _havePsutil = False ORDER = interfaces.STACK_ORDER.POSTPROCESSING REPORT_COUNT = 100000 def describeInterfaces(cs): """Function for exposing interface(s) to other code.""" return (MemoryProfiler, {}) def getTotalJobMemory(nTasks, cpusPerTask): """Function to calculate the total memory of a job. This is a constant during a simulation.""" cpuPerNode = cpu_count() ramPerCpuGB = psutil.virtual_memory().total / (1024**3) / cpuPerNode jobMem = nTasks * cpusPerTask * ramPerCpuGB return jobMem def getCurrentMemoryUsage(): """This scavenges the memory profiler in ARMI to get the current memory usage.""" memUsageAction = PrintSystemMemoryUsageAction() memUsageAction.broadcast() smpu = SystemAndProcessMemoryUsage() memUsages = memUsageAction.gather(smpu) # Grab virtual memory instead of physical. There is a large discrepancy, we will be conservative memoryUsageInMB = sum([mu.processVirtualMemoryInMB for mu in memUsages]) return memoryUsageInMB class MemoryProfiler(interfaces.Interface): name = "memoryProfiler" def __init__(self, r, cs): interfaces.Interface.__init__(self, r, cs) self.sizes = {} def interactBOL(self): interfaces.Interface.interactBOL(self) self.printCurrentMemoryState() mpiAction = PrintSystemMemoryUsageAction() mpiAction.broadcast().invoke(self.o, self.r, self.cs) mpiAction.printUsage("BOL SYS_MEM") # so we can debug mem profiler quickly if self.cs["debugMem"]: mpiAction = ProfileMemoryUsageAction("EveryNode") mpiAction.broadcast().invoke(self.o, self.r, self.cs) def interactEveryNode(self, cycle, node): self.printCurrentMemoryState() mp = PrintSystemMemoryUsageAction() mp.broadcast() mp.invoke(self.o, self.r, self.cs) mp.printUsage("c{} n{} SYS_MEM".format(cycle, node)) self.r.core.p.minProcessMemoryInMB = round(mp.minProcessMemoryInMB * 10) / 10.0 self.r.core.p.maxProcessMemoryInMB = round(mp.maxProcessMemoryInMB * 10) / 10.0 if self.cs["debugMem"]: mpiAction = ProfileMemoryUsageAction("EveryNode") mpiAction.broadcast().invoke(self.o, self.r, self.cs) def interactEOL(self): """End of life hook. Good place to wrap up or print out summary outputs.""" if self.cs["debugMem"]: mpiAction = ProfileMemoryUsageAction("EOL") mpiAction.broadcast().invoke(self.o, self.r, self.cs) def printCurrentMemoryState(self): """Print the current memory footprint and available memory.""" try: cpusPerTask = self.cs["cpusPerTask"] except NonexistentSetting: runLog.extra( "To view memory consumed, remaining available, and total allocated for a case, " "add the setting 'cpusPerTask' to your application." ) return nTasks = self.cs["nTasks"] totalMemoryInGB = getTotalJobMemory(nTasks, cpusPerTask) currentMemoryUsageInGB = getCurrentMemoryUsage() / 1024 availableMemoryInGB = totalMemoryInGB - currentMemoryUsageInGB runLog.info( f"Currently using {currentMemoryUsageInGB} GB of memory. " f"There is {availableMemoryInGB} GB of memory left. " f"There is a total allocation of {totalMemoryInGB} GB." ) def displayMemoryUsage(self, timeDescription): r""" Print out some information to stdout about the memory usage of ARMI. Useful when the debugMem setting is set to True. Turn these on as appropriate to find all your problems. """ runLog.important("----- Memory Usage Report at {} -----".format(timeDescription)) self._printFullMemoryBreakdown(reportSize=self.cs["debugMemSize"]) self._reactorAssemblyTrackingBreakdown() runLog.important("----- End Memory Usage Report at {} -----".format(timeDescription)) def _reactorAssemblyTrackingBreakdown(self): runLog.important("Reactor attribute ArmiObject tracking count") for attrName, attrObj in self.r.core.__dict__.items(): if not attrObj: continue if isinstance(attrObj, list) and isinstance(attrObj[0], ArmiObject): runLog.important("List {:30s} has {:4d} ArmiObjects".format(attrName, len(attrObj))) if isinstance(attrObj, dict) and isinstance(list(attrObj.values())[0], ArmiObject): runLog.important("Dict {:30s} has {:4d} ArmiObjects".format(attrName, len(attrObj))) if self.r.excore.get("sfp") is not None: runLog.important("SFP has {:4d} ArmiObjects".format(len(self.r.excore["sfp"]))) def checkForDuplicateObjectsOnArmiModel(self, attrName, refObject): """Scans through ARMI model for duplicate objects.""" if self.r is None: return uniqueIds = set() uniqueObjTypes = set() def checkAttr(subObj): if getattr(subObj, attrName, refObject) != refObject: uniqueIds.add(id(getattr(subObj, attrName))) uniqueObjTypes.add(subObj.__class__.__name__) for a in self.r.core.getAssemblies(includeAll=True): checkAttr(a) for b in a: checkAttr(b) for c in b: checkAttr(c) checkAttr(c.material) for i in self.o.getInterfaces(): checkAttr(i) if i.name == "xsGroups": for _, block in i.representativeBlocks.items(): checkAttr(block) if len(uniqueIds) == 0: runLog.important("There are no duplicate `.{}` attributes".format(attrName)) else: runLog.error( "There are {} unique objects stored as `.{}` attributes!\n" "Expected id {}, but got {}.\nExpected object:{}\n" "These types of objects had unique attributes: {}".format( len(uniqueIds) + 1, attrName, id(refObject), uniqueIds, refObject, ", ".join(uniqueObjTypes), ) ) raise RuntimeError def _printFullMemoryBreakdown(self, reportSize=True, printReferrers=False): """ Looks for any class from any module in the garbage collector and prints their count and size. Parameters ---------- reportSize : bool, optional calculate size as well as counting individual objects. Notes ----- Just because you use startsWith=armi doesn't mean you'll capture all ARMI objects. Some are in lists and dictionaries. """ cs = self.cs operator = self.o reactor = self.r if reportSize: self.o.detach() gc.collect() allObjects = gc.get_objects() runLog.info("GC returned {} objects".format(len(allObjects))) instanceCounters = KlassCounter(reportSize) instanceCounters.countObjects(allObjects) for counter in sorted(instanceCounters.counters.values()): runLog.info( "UNIQUE_INSTANCE_COUNT: {:60s} {:10d} {:10.1f} MB".format( counter.classType.__name__, counter.count, counter.memSize / (1024**2.0), ) ) if printReferrers and counter.memSize / (1024**2.0) > 100: referrers = gc.get_referrers(counter.first) runLog.info(" Referrers of first one: ") for referrer in referrers: runLog.info(" {}".format(repr(referrer)[:150])) runLog.info("gc garbage: {}".format(gc.garbage)) if printReferrers: # if you want more info on the garbage referrers, run this. WARNING, it's generally like 1000000 lines. runLog.info("referrers") for o in gc.garbage: for r in gc.get_referrers(o): runLog.info("ref for {}: {}".format(o, r)) if reportSize: operator.reattach(reactor, cs) @staticmethod def getReferrers(obj): """Print referrers in a useful way (as opposed to gigabytes of text.""" runLog.info("Printing first 100 character of first 100 referrers") for ref in gc.get_referrers(obj)[:100]: runLog.important("ref for {}: {}".format(obj, repr(ref)[:100])) class KlassCounter: """ Helper class, to allow us to count instances of various classes in the Python standard library garbage collector (gc). Counting can be done simply, or by memory footprint. """ def __init__(self, reportSize): self.counters = dict() self.reportSize = reportSize self.count = 0 def __getitem__(self, classType): if classType not in self.counters: self.counters[classType] = InstanceCounter(classType, self.reportSize) return self.counters[classType] def countObjects(self, ao): """ Recursively find objects inside arbitrarily-deeply-nested containers. This is designed to work with the garbage collector, so it focuses on objects potentially being held in dict, tuple, list, or sets. """ counter = self[type(ao)] if counter.add(ao): self.count += 1 if self.count % REPORT_COUNT == 0: runLog.info("Counted {} items".format(self.count)) if isinstance(ao, dict): for k, v in ao.items(): self.countObjects(k) self.countObjects(v) elif isinstance(ao, (list, tuple, set)): for v in iter(ao): self.countObjects(v) class InstanceCounter: def __init__(self, classType, reportSize): self.classType = classType self.count = 0 self.reportSize = reportSize if reportSize: self.memSize = 0 else: self.memSize = float("nan") self.items = set() self.ids = set() self.first = None def add(self, item): itemId = id(item) if itemId in self.ids: return False self.ids.add(itemId) if self.reportSize: self.memSize += sys.getsizeof(item) self.count += 1 return True def __cmp__(self, that): return (self.count > that.count) - (self.count < that.count) def __ls__(self, that): return self.count < that.count def __gt__(self, that): return self.count > that.count class ProfileMemoryUsageAction(mpiActions.MpiAction): def __init__(self, timeDescription): mpiActions.MpiAction.__init__(self) self.timeDescription = timeDescription def invokeHook(self): mem = self.o.getInterface("memoryProfiler") mem.displayMemoryUsage(self.timeDescription) class SystemAndProcessMemoryUsage: def __init__(self): self.nodeName = context.MPI_NODENAME self.percentNodeRamUsed: Optional[float] = None self.processMemoryInMB: Optional[float] = None self.processVirtualMemoryInMB: Optional[float] = None # no psutil, no memory diagnostics if _havePsutil: self.percentNodeRamUsed = psutil.virtual_memory().percent self.processMemoryInMB = psutil.Process().memory_info().rss / (1024.0**2) self.processVirtualMemoryInMB = psutil.Process().memory_info().vms / (1024.0**2) def __isub__(self, other): if self.percentNodeRamUsed is not None and other.percentNodeRamUsed is not None: self.percentNodeRamUsed -= other.percentNodeRamUsed self.processMemoryInMB -= other.processMemoryInMB self.processVirtualMemoryInMB -= other.processVirtualMemoryInMB return self class PrintSystemMemoryUsageAction(mpiActions.MpiAction): def __init__(self): mpiActions.MpiAction.__init__(self) self.usages = [] self.percentNodeRamUsed: Optional[float] = None def __iter__(self): return iter(self.usages) def __isub__(self, other): if self.percentNodeRamUsed is not None and other.percentNodeRamUsed is not None: self.percentNodeRamUsed -= other.percentNodeRamUsed for mine, theirs in zip(self, other): mine -= theirs return self @property def minProcessMemoryInMB(self): if len(self.usages) == 0: return 0.0 return min(mu.processMemoryInMB or 0.0 for mu in self) @property def maxProcessMemoryInMB(self): if len(self.usages) == 0: return 0.0 return max(mu.processMemoryInMB or 0.0 for mu in self) def invokeHook(self): spmu = SystemAndProcessMemoryUsage() self.percentNodeRamUsed = spmu.percentNodeRamUsed self.usages = self.gather(spmu) def printUsage(self, description=None): """This method prints the usage of all MPI nodes. The printout looks something like: SYS_MEM HOSTNAME 14.4% RAM. Proc mem (MB): 491 472 471 471 471 470 SYS_MEM HOSTNAME 13.9% RAM. Proc mem (MB): 474 473 472 471 460 461 SYS_MEM HOSTNAME ... SYS_MEM HOSTNAME ... """ printedNodes = set() prefix = description or "SYS_MEM" memoryData = [] for memoryUsage in self: if memoryUsage.nodeName in printedNodes: continue printedNodes.add(memoryUsage.nodeName) nodeUsages = [mu for mu in self if mu.nodeName == memoryUsage.nodeName] sysMemAvg = sum(mu.percentNodeRamUsed or 0.0 for mu in nodeUsages) / len(nodeUsages) memoryData.append( ( "{:<24}".format(memoryUsage.nodeName), "{:5.1f}%".format(sysMemAvg), "{}".format(" ".join("{:5.0f}".format(mu.processMemoryInMB or 0.0) for mu in nodeUsages)), ) ) runLog.info( "Summary of the system memory usage at `{}`:\n".format(prefix) + tabulate.tabulate( memoryData, headers=[ "Machine", "Average System RAM Usage", "Processor Memory Usage (MB)", ], tableFmt="armi", ) ) ================================================ FILE: armi/bookkeeping/report/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Package for generating reports as printable groups and HTML in ARMI.""" from armi.bookkeeping.report import data def setData(name, value, group=None, reports=None): """ Stores data in accordance with the specified parameters for use later. Parameters ---------- name : str value : Object Any value desired. group : data.Group reports : data.Report """ from armi.bookkeeping.report.reportInterface import ReportInterface if not name or not isinstance(name, str): raise AttributeError(f"Given name {name} not acceptable.") group = group or UNGROUPED if not isinstance(group, data.Group): raise AttributeError(f"Given group {group} not acceptable/approved.") reports = reports or [] if not isinstance(reports, (list, set, tuple)): reports = [reports] if ALL not in reports: reports.append(ALL) if not all(isinstance(tag, data.Report) for tag in reports): raise AttributeError(f"Unapproved reports for {name}") for report in reports: if report not in ReportInterface.reports: ReportInterface.reports.add(report) report.addToReport(group, name, value) # -------------------------------------------- # GROUP DEFINITIONS # -------------------------------------------- BLOCK_AREA_FRACS = data.Table( "Assembly Area Fractions", " Of First Fuel Block", header=["Component", "Area (cm^2)", "Fraction"], ) BOND_DIMS = data.Table("Bond Dimensions", " Of First Fuel Block") CASE_CONTROLS = data.Table("Case Controls") CASE_PARAMETERS = data.Table("Case Parameters") CLAD_DIMS = data.Table("Cladding Dimensions", " Of First Fuel Block") COOLANT_DIMS = data.Table("Coolant Dimensions", " Of First Fuel Block") DUCT_DIMS = data.Table("Duct Dimensions", " Of First Fuel Block") FUEL_DIMS = data.Table("Fuel Dimensions", " Of First Fuel Block") GAP_DIMS = data.Table("Gap Dimensions", " Of First Fuel Block") INTERCOOLANT_DIMS = data.Table("Intercoolant Dimensions", " Of First Fuel Block") LINER_DIMS = data.Table("Liner Dimensions", " Of First Fuel Block") NEUT_LOSS = data.Table("Neutron Loss") NEUT_PROD = data.Table("Full Core Neutron Production", header=["", "n/s"]) PIN_ASSEM_DESIGN = data.Table("Pin/Assembly Design Summary (averages)") RUN_META = data.Table("Run Meta") UNGROUPED = data.Table("Ungrouped", "No grouping specified for the following information.") WIRE_DIMS = data.Table("Wire Dimensions", " Of First Fuel Block") # ----------------------------------------- ASSEM_TYPES = data.Image( "Assembly Types", "The axial block and enrichment distributions of assemblies in the core at " "beginning of life. The percentage represents the block enrichment (U-235 or B-10), where as " "the additional character represents the cross section id of the block. " "The number of fine-mesh subdivisions are provided on the secondary y-axis.", ) FACE_MAP = data.Image("Reactor Face Map", "The surface map of the reactor.") FLUX_PLOT = data.Image("Plot of flux", "flux plot") KEFF_PLOT = data.Image("Plot of K-Effective vs. Time", "k-eff vs. time") MOVES_PLOT = data.Image("Plot of Moves vs. Time", "moves vs. time") TIME_PLOT = data.Image("Plot of Value vs. Time", "value vs. time") TIMELINE = data.Image("Timeline", "Time occupied by certain method invocations in run") XS_PLOT = data.Image("Plot of Xs vs. Time", "xs vs. time") # -------------------------------------------- # REPORT DEFINITIONS # -------------------------------------------- ALL = data.Report( "Comprehensive Core Report", "Every piece of reported information about the ARMI run.", ) DESIGN = data.Report("Core Design Report", "Information related to the core design parameters") # -------------------------------------------- # FURTHER STYLIZATION # -------------------------------------------- # have every report render these in the following order if present data.Report.groupsOrderFirst = [ FACE_MAP, RUN_META, CASE_PARAMETERS, CASE_CONTROLS, ASSEM_TYPES, ] # This a grouping of components which span the entire html page rather than being sectioned into # smaller columns. data.Report.componentWellGroups = [ FACE_MAP, ASSEM_TYPES, CLAD_DIMS, WIRE_DIMS, DUCT_DIMS, COOLANT_DIMS, INTERCOOLANT_DIMS, FUEL_DIMS, BOND_DIMS, ] ================================================ FILE: armi/bookkeeping/report/data.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data formats for reports.""" import collections import copy import re from armi import runLog class Report: """Storage for data separated out for a particular kind of user.""" # stubs for "further stylization" in the report package init groupsOrderFirst = [] componentWellGroups = [] def __init__(self, title, description): self.title = title self.description = description self.groups = {} # {Global Instance : Local Instance} @property def _groupRenderOrder(self): """Helper method to the rendering methods on this class for rendering order of contained info.""" presentGroupsOrderFirst = [group for group in self.groupsOrderFirst if group in self.groups] completeGroupOrder = presentGroupsOrderFirst + [ group for group in self.groups.keys() if group not in presentGroupsOrderFirst ] specialsRemovedOrder = [group for group in completeGroupOrder if group not in self.componentWellGroups] return specialsRemovedOrder def __str__(self): str_ = "\n{} - (REPORT) {}\n".format(self.title, self.description) for global_group in self.groups.values(): str_ += re.sub("\n", "\n\t", "{}".format(Group.__str__(global_group))) # Don't use subclassed methods return str_ def addToReport(self, group, name, value): """Inserts the datum into the correct group of the report.""" if group not in self.groups: self.groups[group] = copy.deepcopy(group) self.groups[group][name] = value def __getitem__(self, group): try: return self.groups[group] except KeyError: runLog.warning("Cannot locate group {} in report {}".format(group.title, self.title)) return None class Group: """Abstract class, when extended is used for storage for data within a report. Only accepts things wrapped in the ReportDatum class. """ def __init__(self, title, description=""): self.title = title self.description = description self.data = collections.OrderedDict() self.descStyle = "font-weight: normal; font-style: italic; font-size: 14px; padding-left: 5px;" self.titleStyle = "font-weight: bold; padding-top: 20px;" def __str__(self): str_ = "\n{} - (GROUP) {}\n".format(self.title, self.description) for name, value in self.data.items(): str_ += "\t{:<30} {}\n".format(name, value) return str_ def __getitem__(self, name): try: return self.data[name] except KeyError: runLog.warning("Given name {} not present in report group {}".format(name, self.title)) return None def __setitem__(self, name, value): self.data[name] = value class Table(Group): def __init__(self, title, description="", header=None): Group.__init__(self, title, description=description) self.header = header def __str__(self): """Truer to content representation.""" # error handling if not len(self.data): return "" # set up prototypical_data = list(self.data.values())[0] num_cols = len(prototypical_data) + 1 border_dashes = "-" * (num_cols * 31) + "\n" # create header str_ = border_dashes str_ += "{} - {}\n".format(self.title, self.description) if self.header: for column_title in self.header: str_ += "{:<30} ".format(column_title) str_ += "\n" str_ += border_dashes # create table body for name, value in sorted(self.data.items(), key=self._lowerCaseSortForTuples): str_ += "{:<30} ".format(name) for item in value: str_ += "{:<30} ".format(item) str_ += "\n" return str_ @staticmethod def _lowerCaseSortForTuples(nameValPair): """Force the key in a key-value pair to lower case.""" return nameValPair[0].lower() def __setitem__(self, name, value): if not isinstance(value, list): value = [value] Group.__setitem__(self, name, value) class Image(Group): def __init__(self, title, description=""): Group.__init__(self, title, description=description) self._shortformTitle = title.replace(" ", "").lower() ================================================ FILE: armi/bookkeeping/report/reportInterface.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This interface serves the reporting needs of ARMI. If there is any information that a user desires to show in PDF form to others this is the place to do it. """ import re from armi import interfaces, runLog from armi.bookkeeping import report from armi.bookkeeping.report import reportingUtils from armi.physics import neutronics from armi.physics.neutronics.settings import CONF_NEUTRONICS_TYPE from armi.reactor.flags import Flags from armi.utils import reportPlotting, units ORDER = interfaces.STACK_ORDER.BEFORE + interfaces.STACK_ORDER.BOOKKEEPING def describeInterfaces(cs): """Function for exposing interface(s) to other code.""" if cs["genReports"]: return (ReportInterface, {}) return None class ReportInterface(interfaces.Interface): """An interface to manage the use of the report system.""" name = "report" reports = set() def __init__(self, r, cs): interfaces.Interface.__init__(self, r, cs) self.fuelCycleSummary = {"bocFissile": 0.0} def distributable(self): """Disables distributing of this report by broadcast MPI.""" return self.Distribute.SKIP def interactBOL(self): interfaces.Interface.interactBOL(self) runLog.important("Beginning of BOL Reports") reportingUtils.makeCoreAndAssemblyMaps(self.r, self.cs) reportingUtils.writeAssemblyMassSummary(self.r) if self.cs["summarizeAssemDesign"]: reportingUtils.summarizePinDesign(self.r.core) runLog.info(report.ALL[report.RUN_META]) def interactEveryNode(self, cycle, node): self.r.core.calcBlockMaxes() reportingUtils.summarizePowerPeaking(self.r.core) runLog.important("Cycle {}, node {} Summary: ".format(cycle, node)) runLog.important( " time= {0:8.2f} years, keff= {1:.12f} maxPD= {2:-8.2f} MW/m^2, maxBuI= {3:-8.4f} maxBuF= {4:8.4f}".format( self.r.p.time, self.r.core.p.keff, self.r.core.p.maxPD, self.r.core.p.maxBuI, self.r.core.p.maxBuF, ) ) if self.cs["plots"]: adjoint = self.cs[CONF_NEUTRONICS_TYPE] == neutronics.ADJREAL_CALC figName = self.cs.caseTitle + "_{0}_{1}".format(cycle, node) + ".mgFlux." + self.cs["outputFileExtension"] if self.r.core.getFirstBlock(Flags.FUEL).p.mgFlux is not None: from armi.reactor import blocks blocks.Block.plotFlux(self.r.core, fName=figName, peak=True, adjoint=adjoint) else: runLog.warning("No mgFlux to plot in reports") def interactBOC(self, cycle=None): self.fuelCycleSummary["bocFissile"] = self.r.core.getTotalBlockParam("kgFis") def interactEOC(self, cycle=None): reportingUtils.writeCycleSummary(self.r.core) runLog.info(self.o.timer.report(inclusionCutoff=0.001)) def generateDesignReport(self, generateFullCoreMap, showBlockAxMesh): reportingUtils.makeCoreDesignReport(self.r.core, self.cs) reportingUtils.makeCoreAndAssemblyMaps(self.r, self.cs, generateFullCoreMap, showBlockAxMesh) reportingUtils.makeBlockDesignReport(self.r) def interactEOL(self): """Adds the data to the report, and generates it.""" b = self.r.core.getFirstBlock(Flags.FUEL) b.setAreaFractionsReport() dbi = self.o.getInterface("database") buGroups = self.cs["buGroups"] history = self.o.getInterface("history") reportPlotting.plotReactorPerformance( self.r, dbi, buGroups, extension=self.cs["outputFileExtension"], history=history, ) reportingUtils.setNeutronBalancesReport(self.r.core) self.writeRunSummary() self.o.timer.stopAll() # consider the run done runLog.info(self.o.timer.report(inclusionCutoff=0.001, totalTime=True)) _timelinePlot = self.o.timer.timeline(self.cs.caseTitle, 0.03, totalTime=True) runLog.info(self.printReports()) def printReports(self): """Report Interface Specific.""" str_ = "" for report_ in self.reports: str_ += re.sub("\n", "\n\t", "{}".format(report_)) return "---------- REPORTS BEGIN ----------\n" + str_ + "\n----------- REPORTS END -----------" def writeRunSummary(self): """Make a summary of the run.""" # spent fuel pool report if self.r.excore.get("sfp") is not None: self.reportSFP(self.r.excore["sfp"]) self.countAssembliesSFP(self.r.excore["sfp"]) @staticmethod def reportSFP(sfp): """A high-level summary of the Spent Fuel Pool.""" title = "SpentFuelPool Report" runLog.important("-" * len(title)) runLog.important(title) runLog.important("-" * len(title)) totFis = 0.0 for a in sfp: runLog.important( "{assembly:15s} discharged at t={dTime:10f} after {residence:10f} yrs. It entered at cycle: {cycle}. " "It has {fiss:10f} kg (x {mult}) fissile and peak BU={bu:.2f} %.".format( assembly=a, dTime=a.p.dischargeTime, residence=(a.p.dischargeTime - a.p.chargeTime), cycle=a.p.chargeCycle, fiss=a.getFissileMass(), bu=a.getMaxParam("percentBu"), mult=a.p.multiplicity, ) ) totFis += a.getFissileMass() * a.p.multiplicity / 1000 # convert to kg runLog.important("Total SFP fissile inventory of {0} is {1:.4E} MT".format(sfp, totFis / 1000.0)) @staticmethod def countAssembliesSFP(sfp): """Report on the count of assemblies in the SFP at each timestep.""" if not len(sfp): return runLog.important("Count:") totCount = 0 thisTimeCount = 0 a = sfp[0] lastTime = a.getAge() / units.DAYS_PER_YEAR + a.p.chargeTime for a in sfp: thisTime = a.getAge() / units.DAYS_PER_YEAR + a.p.chargeTime if thisTime != lastTime: runLog.important( "Number of assemblies moved at t={0:6.2f}: {1:04d}. Cumulative: {2:04d}".format( lastTime, thisTimeCount, totCount ) ) lastTime = thisTime thisTimeCount = 0 totCount += 1 # noqa: SIM113 thisTimeCount += 1 ================================================ FILE: armi/bookkeeping/report/reportingUtils.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A collection of miscellaneous functions used by ReportInterface to generate various reports.""" import collections import os import pathlib import re import subprocess import sys import textwrap import time from copy import copy import numpy as np from armi import context, interfaces, runLog from armi.bookkeeping import report from armi.operators import RunTypes from armi.reactor.components import ComponentType from armi.reactor.flags import Flags from armi.utils import ( getFileSHA1Hash, iterables, plotting, tabulate, textProcessors, units, ) # Set to prevent the image and text from being too small to read. MAX_ASSEMS_PER_ASSEM_PLOT = 6 # String constants Operator_CaseTitle = "Case Title:" Operator_TypeOfRun = "Run Type:" Operator_NumProcessors = "Number of Processors:" Operator_WorkingDirectory = "Working Directory:" Operator_CurrentUser = "Current User:" Operator_PythonInterperter = "Python Interpreter:" Operator_PythonExecutable = "Python Executable:" Operator_ArmiCodebase = "ARMI Location:" Operator_MasterMachine = "Master Machine:" Operator_Date = "Date and Time:" Operator_CaseDescription = "Case Description:" def writeWelcomeHeaders(o, cs): """Write welcome information using the Operator and the Case Settings.""" def _writeCaseInformation(o, cs): """Create a table that contains basic case information.""" caseInfo = [ (Operator_CaseTitle, cs.caseTitle), ( Operator_CaseDescription, "{0}".format(textwrap.fill(cs["comment"], break_long_words=False)), ), ( Operator_TypeOfRun, "{} - {}".format(cs["runType"], o.__class__.__name__), ), (Operator_CurrentUser, context.USER), (Operator_ArmiCodebase, context.ROOT), (Operator_WorkingDirectory, os.getcwd()), (Operator_PythonInterperter, sys.version), (Operator_PythonExecutable, sys.executable), (Operator_MasterMachine, getNodeName()), (Operator_NumProcessors, context.MPI_SIZE), (Operator_Date, context.START_TIME), ] runLog.header("=========== Case Information ===========") runLog.info(tabulate.tabulate(caseInfo, tableFmt="armi")) def _listInputFiles(cs): """ Gathers information about the input files of this case. Returns ------- inputInfo : list (label, fileName, shaHash) tuples """ from armi.physics.neutronics.settings import CONF_LOADING_FILE pathToLoading = pathlib.Path(cs.inputDirectory) / cs[CONF_LOADING_FILE] if pathToLoading.is_file(): if pathToLoading.suffix.lower() in (".h5", ".hdf5"): # The blueprints are in a database, there aren't multiple included files includedBlueprints = [pathToLoading] else: includedBlueprints = [inclusion[0] for inclusion in textProcessors.findYamlInclusions(pathToLoading)] else: includedBlueprints = [] inputInfo = [] inputFiles = [ ( "Case Settings", os.path.basename(cs.path) if cs.path else cs.caseTitle + ".yaml", ), # This could be a YAML or an h5. ("Blueprints", cs[CONF_LOADING_FILE]), ] + [("Included blueprints", inclBp) for inclBp in includedBlueprints] activeInterfaces = interfaces.getActiveInterfaceInfo(cs) for klass, kwargs in activeInterfaces: if not kwargs.get("enabled", True): # Don't consider disabled interfaces continue interfaceFileNames = klass.specifyInputs(cs) for label, fileNames in interfaceFileNames.items(): for fName in fileNames: inputFiles.append((label, fName)) if cs["reloadDBName"] and cs["runType"] == RunTypes.SNAPSHOTS: inputFiles.append(("Database", cs["reloadDBName"])) for label, fName in inputFiles: shaHash = "MISSING" if (not fName or not os.path.exists(fName)) else getFileSHA1Hash(fName, digits=10) inputInfo.append((label, fName, shaHash)) # bonus: grab the files stored in the crossSectionControl section for xsID, xsSetting in cs["crossSectionControl"].items(): fNames = [] # Users shouldn't ever have both of these defined, but this is not the place # for code to fail if they do. Allow for both to not be None. if xsSetting.xsFileLocation is not None: # possibly a list of files if isinstance(xsSetting.xsFileLocation, list): fNames.extend(xsSetting.xsFileLocation) else: fNames.append(xsSetting.xsFileLocation) if xsSetting.fluxFileLocation is not None: # single file fNames.append(xsSetting.fluxFileLocation) for fName in fNames: label = f"crossSectionControl-{xsID}" if fName and os.path.exists(fName): shaHash = getFileSHA1Hash(os.path.abspath(fName), digits=10) inputInfo.append((label, fName, shaHash)) return inputInfo def _writeInputFileInformation(cs): """Create a table that contains basic input file information.""" inputFileData = [] for label, fileName, shaHash in _listInputFiles(cs): inputFileData.append((label, fileName, shaHash)) runLog.header("=========== Input File Information ===========") runLog.info( tabulate.tabulate( inputFileData, headers=["Input Type", "Path", "SHA-1 Hash"], tableFmt="armi", ) ) def _writeMachineInformation(): """Create a table that contains basic machine and rank information.""" processorNames = context.MPI_NODENAMES uniqueNames = set(processorNames) nodeMappingData = [] sysInfo = "" for uniqueName in uniqueNames: matchingProcs = [str(rank) for rank, procName in enumerate(processorNames) if procName == uniqueName] numProcessors = str(len(matchingProcs)) nodeMappingData.append((uniqueName, numProcessors, ", ".join(matchingProcs))) sysInfo += getSystemInfo() runLog.header("=========== Machine Information ===========") runLog.info( tabulate.tabulate( nodeMappingData, headers=["Machine", "Number of Processors", "Ranks"], tableFmt="armi", ) ) if sysInfo: runLog.header("=========== System Information ===========") runLog.info(sysInfo) def _writeReactorCycleInformation(o, cs): """Verify that all the operating parameters are defined for the same number of cycles.""" operatingData = [ ("Reactor Thermal Power (MW):", cs["power"] / units.WATTS_PER_MW), ("Number of Cycles:", cs["nCycles"]), ] operatingParams = { "Cycle Lengths:": o.cycleLengths, "Availability Factors:": o.availabilityFactors, "Power Fractions:": o.powerFractions, "Step Lengths (days):": o.stepLengths, } for name, param in operatingParams.items(): paramStr = [str(p) for p in param] operatingData.append((name, textwrap.fill(", ".join(paramStr)))) runLog.header("=========== Reactor Cycle Information ===========") runLog.info(tabulate.tabulate(operatingData, tableFmt="armi")) if context.MPI_RANK > 0: return # prevent the worker nodes from printing the same thing _writeCaseInformation(o, cs) _writeInputFileInformation(cs) _writeMachineInformation() _writeReactorCycleInformation(o, cs) def getNodeName(): """Get the name of this compute node. First, look in context.py. Then try various Linux tools. Then try Windows commands. Returns ------- str Compute node name. """ hostNames = [ context.MPI_NODENAME, context.MPI_NODENAMES[0], subprocess.run("hostname", capture_output=True, text=True, shell=True).stdout, subprocess.run("uname -n", capture_output=True, text=True, shell=True).stdout, os.environ.get("COMPUTERNAME", context.LOCAL), ] for nodeName in hostNames: if nodeName and nodeName != context.LOCAL: return nodeName return context.LOCAL def _getSystemInfoWindows(): """Get system information, assuming the system is Windows. Returns ------- str Basic system information: OS name, OS version, basic processor information Examples -------- Example results: OS Name: Microsoft Windows 10 Enterprise OS Version: 10.0.19041 N/A Build 19041 Processor(s): 1 Processor(s) Installed. [01]: Intel64 Family 6 Model 142 Stepping 12 GenuineIntel ~801 Mhz """ cmd = ( 'systeminfo | findstr /B /C:"OS Name" /B /C:"OS Version" /B /C:"Processor" && systeminfo | findstr /E /C:"Mhz"' ) return subprocess.run(cmd, capture_output=True, text=True, shell=True).stdout def _getSystemInfoMac(): """Get system information, assuming the system is MacOS. Returns ------- str Basic system information: OS name, OS version, basic processor information Examples -------- Example results: System Software Overview: System Version: macOS 12.1 (21C52) Kernel Version: Darwin 21.2.0 ... Hardware Overview: Model Name: MacBook Pro ... """ cmd = "system_profiler SPSoftwareDataType SPHardwareDataType" return subprocess.check_output(cmd, shell=True).decode("utf-8") def _getSystemInfoLinux(): """Get system information, assuming the system is Linux. This method uses multiple, redundant variations on common Linux command utilities to get the information necessary. While it is not possible to guarantee what programs or files will be available on "all Linux operating system", this collection of tools is widely supported and should provide a reasonably broad-distribution coverage. Returns ------- str Basic system information: OS name, OS version, basic processor information Examples -------- Example results: OS Info: Ubuntu 22.04.3 LTS Processor(s): processor : 0 vendor_id : GenuineIntel cpu family : 6 model : 126 model name : Intel(R) Core(TM) i5-1035G1 CPU @ 1.00GHz ... """ # get OS name / version linuxOsCommands = [ 'cat /etc/os-release | grep "^PRETTY_NAME=" | cut -d = -f 2', "uname -a", "lsb_release -d | cut -d : -f 2", 'hostnamectl | grep "Operating System" | cut -d : -f 2', ] osInfo = "" for cmd in linuxOsCommands: osInfo = subprocess.run(cmd, capture_output=True, text=True, shell=True).stdout.strip() if osInfo: break if not osInfo: runLog.warning("Linux OS information not found.") return "" # get processor information linuxProcCommands = ["lscpu", "cat /proc/cpuinfo", "lshw -class CPU"] procInfo = "" for cmd in linuxProcCommands: procInfo = subprocess.run(cmd, capture_output=True, text=True, shell=True).stdout if procInfo: break if not procInfo: runLog.warning("Linux processor information not found.") return "" # build output string out = "OS Info: " out += osInfo.strip() out += "\nProcessor(s):\n " out += procInfo.strip().replace("\n", "\n ") out += "\n" return out def getSystemInfo(): """Get system information, assuming the system is Linux, MacOS, and Windows. Notes ----- The format of the system information will be different on Linux, MacOS, and Windows. Returns ------- str Basic system information: OS name, OS version, basic processor information """ # Get basic system information (on Linux, MacOS, and Windows) if "darwin" in sys.platform: return _getSystemInfoMac() elif "win" in sys.platform: return _getSystemInfoWindows() elif "linux" in sys.platform: return _getSystemInfoLinux() else: runLog.warning( f"Cannot get system information for {sys.platform} because ARMI only " + "supports Linux, MacOS, and Windows." ) return "" def getInterfaceStackSummary(o): data = [] for ii, i in enumerate(o.interfaces, start=1): data.append( ( "{:02d}".format(ii), i.__class__.__name__.replace("Interface", ""), i.name, i.purpose, "Yes" if i.enabled() else "No", "Reversed" if i.reverseAtEOL else "Normal", "Yes" if i.bolForce() else "No", ) ) text = tabulate.tabulate( data, headers=( "Index", "Type", "Name", "Purpose", "Enabled", "EOL order", "BOL forced", ), tableFmt="armi", ) text = text return text def writeTightCouplingConvergenceSummary(convergenceSummary): runLog.info("Tight Coupling Convergence Summary") runLog.info(tabulate.tabulate(convergenceSummary, headers="keys", showIndex=True, tableFmt="armi")) def writeAssemblyMassSummary(r): """Print out things like Assembly weights to the runLog. Parameters ---------- r : armi.reactor.reactors.Reactor """ massSum = [] for a in r.blueprints.assemblies.values(): mass = 0.0 hmMass = 0.0 fissileMass = 0.0 coolantMass = 0.0 # to calculate wet vs. dry weight. types = [] for b in a: # get masses in kg # skip stationary blocks (grid plate doesn't count) if b.hasFlags(Flags.GRID_PLATE): continue mass += b.getMass() / 1000.0 hmMass += b.getHMMass() / 1000.0 fissileMass += b.getFissileMass() / 1000.0 coolants = b.getComponents(Flags.COOLANT, exact=True) + b.getComponents(Flags.INTERCOOLANT, exact=True) coolantMass += sum(coolant.getMass() for coolant in coolants) / 1000.0 blockType = b.getType() if blockType not in types: types.append(blockType) # If the BOL fuel assem is in the center of the core, its area is 1/3 of the full area b/c # its a sliced assem. # count assemblies core = r.core thisTypeList = core.getChildrenOfType(a.getType()) count = 0 for t in thisTypeList: ring, _pos = t.spatialLocator.getRingPos() if ring == 1: # only count center location once. count += 1 else: # add 3 if it's 1/3 core, etc. count += core.powerMultiplier # Get the dominant materials pinMaterialKey = "pinMaterial" pinMaterialObj = a.getDominantMaterial([Flags.FUEL, Flags.CONTROL]) if pinMaterialObj is None: pinMaterialObj = a.getDominantMaterial() pinMaterialKey = "dominantMaterial" pinMaterial = pinMaterialObj.name else: pinMaterial = pinMaterialObj.name struct = a.getDominantMaterial([Flags.CLAD, Flags.DUCT, Flags.SHIELD]) if struct: structuralMaterial = struct.name else: structuralMaterial = "[None]" cool = a.getDominantMaterial([Flags.COOLANT]) if cool: coolantMaterial = cool.name else: coolantMaterial = "[None]" # Get pins per assembly pinsPerAssembly = 0 for candidate in (Flags.FUEL, Flags.CONTROL, Flags.SHIELD): b = a.getFirstBlock(candidate) if b: pinsPerAssembly = b.getNumPins() if pinsPerAssembly: break massSum.append( { "type": a.getType(), "wetMass": mass, "hmMass": hmMass, "fissileMass": fissileMass, "dryMass": mass - coolantMass, "count": count, "components": types, pinMaterialKey: pinMaterial, "structuralMaterial": structuralMaterial, "coolantMaterial": coolantMaterial, "pinsPerAssembly": pinsPerAssembly, } ) runLog.important(_makeBOLAssemblyMassSummary(massSum)) runLog.important(_makeTotalAssemblyMassSummary(massSum)) def _makeBOLAssemblyMassSummary(massSum): str_ = ["--- BOL Assembly Mass Summary (kg) ---"] dataLabels = ["wetMass", "dryMass", "fissileMass", "hmMass", "count"] # print header for the printout of each assembly type str_.append(" " * 12 + "".join(["{0:25s}".format(s["type"]) for s in massSum])) for val in dataLabels: line = "" for s in massSum: line += "{0:<25.3f}".format(s[val]) str_.append("{0:12s}{1}".format(val, line)) # print blocks in this assembly up to 10 for i in range(10): line = " " * 12 for s in massSum: try: line += "{0:25s}".format(s["components"][i]) except IndexError: line += " " * 25 if re.search(r"\S", line): # \S matches any non-whitespace character. str_.append(line) return "\n".join(str_) def _makeTotalAssemblyMassSummary(massSum): massLabels = ["wetMass", "dryMass", "fissileMass", "hmMass"] totals = {} count = 0 str_ = ["--Totals--"] for label in massLabels: totals[label] = 0.0 for assemSum in massSum: totals[label] += assemSum[label] * assemSum["count"] count += assemSum["count"] str_.append("{0:12s} {1:.2f} MT".format(label, totals[label] / 1000.0)) str_.append("Total assembly count: {0}".format(count // len(massLabels))) return "\n".join(str_) def writeCycleSummary(core): """Prints a cycle summary to the runLog. Parameters ---------- core: armi.reactor.reactors.Core cs: armi.settings.caseSettings.Settings """ # Would io be worth considering for this? cycle = core.r.p.cycle str_ = [] runLog.important("Cycle {0} Summary:".format(cycle)) avgBu = core.calcAvgParam("percentBu", typeSpec=Flags.FUEL, generationNum=2) str_.append("Core Average Burnup: {0}".format(avgBu)) str_.append("End of Cycle {0:02d}. Timestamp: {1} ".format(cycle, time.ctime())) runLog.info("\n".join(str_)) def setNeutronBalancesReport(core): """Determines the various neutron balances over the full core. Parameters ---------- core : armi.reactor.reactors.Core """ if not core.getFirstBlock().p.rateCap: runLog.warning( "No rate information (rateCap, rateAbs, etc.) available on the blocks. Skipping balance summary." ) return cap = core.calcAvgParam("rateCap", volumeAveraged=False, generationNum=2) absorb = core.calcAvgParam("rateAbs", volumeAveraged=False, generationNum=2) fis = core.calcAvgParam("rateFis", volumeAveraged=False, generationNum=2) n2nProd = core.calcAvgParam("rateProdN2n", volumeAveraged=False, generationNum=2) fisProd = core.calcAvgParam("rateProdFis", volumeAveraged=False, generationNum=2) leak = n2nProd + fisProd - absorb report.setData( "Fission", "{0:.5e} ({1:.2%})".format(fisProd, fisProd / (fisProd + n2nProd)), report.NEUT_PROD, ) report.setData( "n, 2n", "{0:.5e} ({1:.2%})".format(n2nProd, n2nProd / (fisProd + n2nProd)), report.NEUT_PROD, ) report.setData( "Capture", "{0:.5e} ({1:.2%})".format(cap, cap / (absorb + leak)), report.NEUT_LOSS, ) report.setData( "Fission", "{0:.5e} ({1:.2%})".format(fis, fis / (absorb + leak)), report.NEUT_LOSS, ) report.setData( "Absorption", "{0:.5e} ({1:.2%})".format(absorb, absorb / (absorb + leak)), report.NEUT_LOSS, ) report.setData( "Leakage", "{0:.5e} ({1:.2%})".format(leak, leak / (absorb + leak)), report.NEUT_LOSS, ) runLog.info(report.ALL[report.NEUT_PROD]) runLog.info(report.ALL[report.NEUT_LOSS]) def summarizePinDesign(core): """Prints out some information about the pin assembly/duct design. Handles multiple types of dimensions simplistically by taking the average. Parameters ---------- core : armi.reactor.reactors.Core """ designInfo = collections.defaultdict(list) try: for b in core.iterBlocks(Flags.FUEL): fuel = b.getComponent(Flags.FUEL) duct = b.getComponent(Flags.DUCT) clad = b.getComponent(Flags.CLAD) wire = b.getComponent(Flags.WIRE) designInfo["hot sd"].append(b.getSmearDensity(cold=False)) designInfo["sd"].append(b.getSmearDensity()) designInfo["ductThick"].append( (duct.getDimension("op") - duct.getDimension("ip")) * 5.0 ) # convert to mm and divide by 2 designInfo["cladThick"].append((clad.getDimension("od") - clad.getDimension("id")) * 5.0) pinOD = clad.getDimension("od") * 10.0 wireOD = wire.getDimension("od") * 10.0 pitch = pinOD + wireOD # pitch has half a wire on each side. assemPitch = b.getPitch() * 10 # convert cm to mm. designInfo["pinOD"].append(pinOD) designInfo["wireOD"].append(wireOD) designInfo["pin pitch"].append(pitch) pinToDuctGap = b.getPinToDuctGap() if pinToDuctGap is not None: designInfo["pinToDuct"].append(b.getPinToDuctGap() * 10.0) designInfo["assemPitch"].append(assemPitch) designInfo["duct gap"].append(assemPitch - duct.getDimension("op") * 10.0) designInfo["nPins"].append(b.p.nPins) designInfo["zrFrac"].append(fuel.getMassFrac("ZR")) # assumption made that all lists contain only numerical data designInfo = {key: np.average(data) for key, data in designInfo.items()} dimensionless = {"sd", "hot sd", "zrFrac", "nPins"} for key, average_value in designInfo.items(): dim = "{0:10s}".format(key) val = "{0:.4f}".format(average_value) if key not in dimensionless: val += " mm" report.setData(dim, val, report.PIN_ASSEM_DESIGN) a = core.refAssem report.setData( "Fuel Height (cm):", "{0:.2f}".format(a.getHeight(Flags.FUEL)), report.PIN_ASSEM_DESIGN, ) report.setData( "Plenum Height (cm):", "{0:.2f}".format(a.getHeight(Flags.PLENUM)), report.PIN_ASSEM_DESIGN, ) runLog.info(report.ALL[report.PIN_ASSEM_DESIGN]) first_fuel_block = core.getFirstBlock(Flags.FUEL) runLog.info("Design & component information for first fuel block {}".format(first_fuel_block)) runLog.info(first_fuel_block.setAreaFractionsReport()) for component_ in sorted(first_fuel_block): runLog.info(component_.setDimensionReport()) except Exception as error: runLog.warning("Pin summarization failed to work") runLog.warning(error) def summarizePowerPeaking(core): """Prints reactor Fz, Fxy, Fq. Parameters ---------- core : armi.reactor.reactors.Core """ # Fz is the axial peaking of the highest power assembly _maxPow, maxPowBlock = core.getMaxParam("power", returnObj=True, generationNum=2) maxPowAssem = maxPowBlock.parent avgPDens = maxPowAssem.calcAvgParam("pdens") peakPDens = maxPowAssem.getMaxParam("pdens") if not avgPDens: # protect against divide-by-zero. Peaking doesn't make sense if there is no power return axPeakF = peakPDens / avgPDens # Fxy is the radial peaking factor, looking at ALL assemblies with axially integrated powers. power = 0.0 n = 0 for n, a in enumerate(core): power += a.calcTotalParam("power", typeSpec=Flags.FUEL) avgPow = power / (n + 1) radPeakF = maxPowAssem.calcTotalParam("power", typeSpec=Flags.FUEL) / avgPow runLog.important( "Power Peaking: Fz= {0:.3f} Fxy= {1:.3f} Fq= {2:.3f}".format(axPeakF, radPeakF, axPeakF * radPeakF) ) def makeCoreDesignReport(core, cs): """Builds report to summarize core design inputs. Parameters ---------- core: armi.reactor.reactors.Core cs: armi.settings.caseSettings.Settings """ coreDesignTable = report.data.Table("SUMMARY OF CORE: {}".format(cs.caseTitle.upper())) coreDesignTable.header = ["", "Input Parameter"] # Change the ordering of the core design table in the report relative to the other data report.data.Report.groupsOrderFirst.insert(0, coreDesignTable) report.data.Report.componentWellGroups.insert(0, coreDesignTable) _setGeneralCoreDesignData(cs, coreDesignTable) _setGeneralCoreParametersData(core, cs, coreDesignTable) _setGeneralSimulationData(core, cs, coreDesignTable) def _setGeneralCoreDesignData(cs, coreDesignTable): from armi.physics.fuelCycle.settings import CONF_SHUFFLE_LOGIC from armi.physics.neutronics.settings import CONF_LOADING_FILE report.setData("Case Title", "{}".format(cs.caseTitle), coreDesignTable, report.DESIGN) report.setData("Run Type", "{}".format(cs["runType"]), coreDesignTable, report.DESIGN) report.setData( "Loading File", "{}".format(cs[CONF_LOADING_FILE]), coreDesignTable, report.DESIGN, ) report.setData( "Fuel Shuffling Logic File", "{}".format(cs[CONF_SHUFFLE_LOGIC]), coreDesignTable, report.DESIGN, ) report.setData( "Reactor State Loading", "{}".format(cs["loadStyle"]), coreDesignTable, report.DESIGN, ) if cs["loadStyle"] == "fromDB": report.setData( "Database File", "{}".format(cs["reloadDBName"]), coreDesignTable, report.DESIGN, ) report.setData( "Starting Cycle", "{}".format(cs["startCycle"]), coreDesignTable, report.DESIGN, ) report.setData( "Starting Node", "{}".format(cs["startNode"]), coreDesignTable, report.DESIGN, ) def _setGeneralCoreParametersData(core, cs, coreDesignTable): blocks = core.getBlocks() totalMass = sum(b.getMass() for b in blocks) fissileMass = sum(b.getFissileMass() for b in blocks) heavyMetalMass = sum(b.getHMMass() for b in blocks) totalVolume = sum(b.getVolume() for b in blocks) report.setData(" ", "", coreDesignTable, report.DESIGN) report.setData( "Core Power", "{:.2f} MWth".format(cs["power"] / units.WATTS_PER_MW), coreDesignTable, report.DESIGN, ) report.setData( "Base Capacity Factor", "{}".format(cs["availabilityFactor"]), coreDesignTable, report.DESIGN, ) # note this doesn't consider availabilityFactors report.setData( "Cycle Length", "{} days".format(cs["cycleLength"]), coreDesignTable, report.DESIGN, ) # note this doesn't consider cycleLengths report.setData("Burnup Cycles", "{}".format(cs["nCycles"]), coreDesignTable, report.DESIGN) report.setData( "Burnup Steps per Cycle", "{}".format(cs["burnSteps"]), coreDesignTable, report.DESIGN, ) # note this doesn't consider the detailed cycle input option corePowerMult = int(core.powerMultiplier) report.setData( "Core Total Volume", "{:.2f} cc".format(totalVolume * corePowerMult), coreDesignTable, report.DESIGN, ) report.setData( "Core Fissile Mass", "{:.2f} kg".format(fissileMass / units.G_PER_KG * corePowerMult), coreDesignTable, report.DESIGN, ) report.setData( "Core Heavy Metal Mass", "{:.2f} kg".format(heavyMetalMass / units.G_PER_KG * corePowerMult), coreDesignTable, report.DESIGN, ) report.setData( "Core Total Mass", "{:.2f} kg".format(totalMass / units.G_PER_KG * corePowerMult), coreDesignTable, report.DESIGN, ) report.setData( "Number of Assembly Rings", "{}".format(core.getNumRings()), coreDesignTable, report.DESIGN, ) report.setData( "Number of Assemblies", "{}".format(len(core.getAssemblies() * corePowerMult)), coreDesignTable, report.DESIGN, ) report.setData( "Number of Fuel Assemblies", "{}".format(len(core.getAssemblies(Flags.FUEL) * corePowerMult)), coreDesignTable, report.DESIGN, ) report.setData( "Number of Control Assemblies", "{}".format(len(core.getAssemblies(Flags.CONTROL) * corePowerMult)), coreDesignTable, report.DESIGN, ) report.setData( "Number of Reflector Assemblies", "{}".format(len(core.getAssemblies(Flags.REFLECTOR) * corePowerMult)), coreDesignTable, report.DESIGN, ) report.setData( "Number of Shield Assemblies", "{}".format(len(core.getAssemblies(Flags.SHIELD) * corePowerMult)), coreDesignTable, report.DESIGN, ) def _setGeneralSimulationData(core, cs, coreDesignTable): from armi.physics.neutronics.settings import CONF_GEN_XS, CONF_GLOBAL_FLUX_ACTIVE report.setData(" ", "", coreDesignTable, report.DESIGN) report.setData("Full Core Model", "{}".format(core.isFullCore), coreDesignTable, report.DESIGN) report.setData( "Tight Physics Coupling Enabled", "{}".format(bool(cs["tightCoupling"])), coreDesignTable, report.DESIGN, ) report.setData( "Lattice Physics Enabled for", "{}".format(cs[CONF_GEN_XS]), coreDesignTable, report.DESIGN, ) report.setData( "Neutronics Enabled for", "{}".format(cs[CONF_GLOBAL_FLUX_ACTIVE]), coreDesignTable, report.DESIGN, ) def makeBlockDesignReport(r): """Summarize the block designs from the loading file. Parameters ---------- r : armi.reactor.reactors.Reactor """ for bDesign in r.blueprints.blockDesigns: loadingFileTable = report.data.Table("SUMMARY OF BLOCK: {}".format(bDesign.name)) loadingFileTable.header = ["", "Input Parameter"] # Change the ordering of the loading file table in the report relative to the other data report.data.Report.groupsOrderFirst.append(loadingFileTable) report.data.Report.componentWellGroups.append(loadingFileTable) report.setData("Number of Components", [len(bDesign)], loadingFileTable, report.DESIGN) for i, cDesign in enumerate(bDesign): cType = cDesign.name componentSplitter = (i + 1) * " " + "\n" report.setData(componentSplitter, [""], loadingFileTable, report.DESIGN) dimensions = _getComponentInputDimensions(cDesign) for label, values in dimensions.items(): value, unit = values report.setData( "{} {}".format(cType, label), "{} {}".format(value, unit), loadingFileTable, report.DESIGN, ) def _getComponentInputDimensions(cDesign): """Get the input dimensions of a component and place them in a dictionary with labels and units.""" dims = collections.OrderedDict() dims["Shape"] = (cDesign.shape, "") dims["Material"] = (cDesign.material, "") dims["Cold Temperature"] = (cDesign.Tinput, "C") dims["Hot Temperature"] = (cDesign.Thot, "C") if cDesign.isotopics is not None: dims["Custom Isotopics"] = (cDesign.isotopics, "") for dimName in ComponentType.TYPES[cDesign.shape.lower()].DIMENSION_NAMES: value = getattr(cDesign, dimName) if value is not None: # if not default, add it to the report dims[dimName] = (getattr(cDesign, dimName).value, "cm") return dims def makeCoreAndAssemblyMaps(r, cs, generateFullCoreMap=False, showBlockAxMesh=True): """Create core and assembly design plots. Parameters ---------- r : armi.reactor.reactors.Reactor cs: armi.settings.caseSettings.Settings generateFullCoreMap : bool, default False showBlockAxMesh : bool, default True """ assems = [] blueprints = r.blueprints for aKey in blueprints.assemDesigns.keys(): a = blueprints.constructAssem(cs, name=aKey) # since we will be plotting cold input heights, we need to make sure that # that these new assemblies have access to a blueprints somewhere up the # composite chain. normally this would happen through an assembly's parent # reactor, but because these newly created assemblies are in the load queue, # they will not have a parent reactor. to get around this, we just attach # the blueprints to the assembly directly. a.blueprints = blueprints assems.append(a) core = r.core for plotNum, assemBatch in enumerate(iterables.chunk(assems, MAX_ASSEMS_PER_ASSEM_PLOT), start=1): assemPlotImage = copy(report.ASSEM_TYPES) assemPlotImage.title = assemPlotImage.title + " ({})".format(plotNum) report.data.Report.groupsOrderFirst.insert(-1, assemPlotImage) report.data.Report.componentWellGroups.insert(-1, assemPlotImage) assemPlotName = os.path.abspath(f"{core.name}AssemblyTypes{plotNum}.png") plotting.plotAssemblyTypes( assemBatch, assemPlotName, maxAssems=MAX_ASSEMS_PER_ASSEM_PLOT, showBlockAxMesh=showBlockAxMesh, hot=False, ) # Create radial core map if generateFullCoreMap: core.growToFullCore(cs) counts = { assemDesign.name: len(core.getChildrenOfType(assemDesign.name)) for assemDesign in r.blueprints.assemDesigns } # assemDesigns.keys is ordered based on input, assemOrder only contains types that are in the core assemOrder = [aType for aType in r.blueprints.assemDesigns.keys() if counts[aType] > 0] data = [assemOrder.index(a.p.type) for a in core] labels = [r.blueprints.assemDesigns[a.p.type].specifier for a in core] legendMap = [ ( ai, assemDesign.specifier, "{} ({})".format(assemDesign.name, counts[assemDesign.name]), ) for ai, assemDesign in enumerate(r.blueprints.assemDesigns) if counts[assemDesign.name] > 0 ] fName = "".join([cs.caseTitle, "RadialCoreMap.", cs["outputFileExtension"]]) plotting.plotFaceMap( core, title="{} Radial Core Map".format(cs.caseTitle), fName=fName, cmapName="RdYlBu", data=data, labels=labels, legendMap=legendMap, axisEqual=True, bare=True, titleSize=10, fontSize=8, ) report.setData("Radial Core Map", os.path.abspath(fName), report.FACE_MAP, report.DESIGN) COMPONENT_INFO = "Component Information" ================================================ FILE: armi/bookkeeping/report/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/bookkeeping/report/tests/test_report.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Really basic tests of the report Utils.""" import logging import os import subprocess import sys import unittest from glob import glob from unittest.mock import patch from armi import runLog, settings from armi.bookkeeping import report from armi.bookkeeping.report import data, reportInterface from armi.bookkeeping.report.reportingUtils import ( _getSystemInfoLinux, _getSystemInfoMac, _getSystemInfoWindows, getNodeName, getSystemInfo, makeBlockDesignReport, makeCoreDesignReport, setNeutronBalancesReport, summarizePinDesign, summarizePowerPeaking, writeAssemblyMassSummary, writeCycleSummary, writeWelcomeHeaders, ) from armi.testing import loadTestReactor from armi.tests import mockRunLogs from armi.utils.directoryChangers import TemporaryDirectoryChanger class _MockReturnResult: """Mocking the subprocess.run() return object.""" def __init__(self, stdout): self.stdout = stdout class TestReportingUtils(unittest.TestCase): def test_getSystemInfoLinux(self): """Test _getSystemInfoLinux() on any operating system, by mocking the system calls.""" osInfo = '"Ubuntu 22.04.3 LTS"' procInfo = """processor : 0 vendor_id : GenuineIntel cpu family : 6 model : 126 model name : Intel(R) Core(TM) i5-1035G1 CPU @ 1.00GHz ... """ correctResult = """OS Info: "Ubuntu 22.04.3 LTS" Processor(s): processor : 0 vendor_id : GenuineIntel cpu family : 6 model : 126 model name : Intel(R) Core(TM) i5-1035G1 CPU @ 1.00GHz ...""" def __mockSubprocessRun(*args, **kwargs): if "os-release" in args[0]: return _MockReturnResult(osInfo) else: return _MockReturnResult(procInfo) with patch.object(subprocess, "run", side_effect=__mockSubprocessRun): out = _getSystemInfoLinux() self.assertEqual(out.strip(), correctResult) @patch("subprocess.run") def test_getSystemInfoWindows(self, mockSubprocess): """Test _getSystemInfoWindows() on any operating system, by mocking the system call.""" windowsResult = """OS Name: Microsoft Windows 10 Enterprise OS Version: 10.0.19041 N/A Build 19041 Processor(s): 1 Processor(s) Installed. [01]: Intel64 Family 6 Model 142 Stepping 12 GenuineIntel ~801 Mhz""" mockSubprocess.return_value = _MockReturnResult(windowsResult) out = _getSystemInfoWindows() self.assertEqual(out, windowsResult) @patch("subprocess.run") def test_getSystemInfoMac(self, mockSubprocess): """Test _getSystemInfoMac() on any operating system, by mocking the system call.""" macResult = b"""System Software Overview: System Version: macOS 12.1 (21C52) Kernel Version: Darwin 21.2.0 ... Hardware Overview: Model Name: MacBook Pro ...""" mockSubprocess.return_value = _MockReturnResult(macResult) out = _getSystemInfoMac() self.assertEqual(out, macResult.decode("utf-8")) def test_getSystemInfo(self): """Basic sanity check of getSystemInfo() running in the wild. This test should pass if it is run on Window or mainstream Linux distros. But we expect this to fail if the test is run on some other OS. """ if "darwin" in sys.platform: # too complicated to test MacOS in this method return out = getSystemInfo() substrings = ["OS ", "Processor(s):"] for sstr in substrings: self.assertIn(sstr, out) self.assertGreater(len(out), sum(len(sstr) + 5 for sstr in substrings)) def test_getNodeName(self): """Test that the getNodeName() method returns a non-empty string. It is hard to know what string SHOULD be return here, and it would depend on how the OS is set up on your machine or cluster. But this simple test needs to pass as-is on Windows and Linux. """ self.assertGreater(len(getNodeName()), 0) class TestReport(unittest.TestCase): def setUp(self): self.test_group = data.Table(settings.Settings(), "banana") def test_setData(self): report.setData("banana_1", ["sundae", "plain"]) report.setData("banana_2", ["sundae", "vanilla"], self.test_group) report.setData("banana_3", ["sundae", "chocolate"], self.test_group, [report.ALL]) with self.assertRaises(AttributeError): report.setData("banana_4", ["sundae", "strawberry"], "no_workie", [report.ALL]) with self.assertRaises(AttributeError): report.setData("banana_5", ["sundae", "peanut_butter"], self.test_group, "no_workie") ungroup_instance = report.ALL[report.UNGROUPED] self.assertEqual(ungroup_instance["banana_1"], ["sundae", "plain"]) filled_instance = report.ALL[self.test_group] self.assertEqual(filled_instance["banana_2"], ["sundae", "vanilla"]) self.assertEqual(filled_instance["banana_3"], ["sundae", "chocolate"]) def test_getData(self): # test the null case self.assertIsNone(self.test_group["fake"]) # insert some data self.test_group["banana_1"] = ["sundae", "plain"] # validate we can pull that data back out again data = self.test_group["banana_1"] self.assertEqual(len(data), 2) self.assertIn("sundae", data) self.assertIn("plain", data) def test_reactorSpecificReporting(self): """Test a number of reporting utils that require reactor/core information.""" o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") # make sure makeCoreDesignReport() doesn't fail, though it won't generate an output here makeCoreDesignReport(r.core, o.cs) self.assertEqual(len(glob("*.html")), 0) with mockRunLogs.BufferLog() as mock: # we should start with a clean slate self.assertEqual("", mock.getStdout()) runLog.LOG.startLog("test_reactorSpecificReporting") runLog.LOG.setVerbosity(logging.INFO) writeAssemblyMassSummary(r) self.assertIn("BOL Assembly Mass Summary", mock.getStdout()) self.assertIn("igniter fuel", mock.getStdout()) mock.emptyStdout() setNeutronBalancesReport(r.core) self.assertIn("No rate information", mock.getStdout()) mock.emptyStdout() r.core.getFirstBlock().p.rateCap = 1.0 r.core.getFirstBlock().p.rateProdFis = 1.02 r.core.getFirstBlock().p.rateFis = 1.01 r.core.getFirstBlock().p.rateAbs = 1.0 setNeutronBalancesReport(r.core) self.assertIn("Fission", mock.getStdout()) self.assertIn("Capture", mock.getStdout()) self.assertIn("Absorption", mock.getStdout()) self.assertIn("Leakage", mock.getStdout()) mock.emptyStdout() summarizePinDesign(r.core) self.assertIn("Assembly Design Summary", mock.getStdout()) self.assertIn("Design & component information", mock.getStdout()) self.assertIn("Multiplicity", mock.getStdout()) mock.emptyStdout() writeCycleSummary(r.core) self.assertIn("Core Average", mock.getStdout()) self.assertIn("End of Cycle", mock.getStdout()) mock.emptyStdout() # this report won't do much for the test reactor - improve test reactor makeBlockDesignReport(r) self.assertEqual(len(mock.getStdout()), 0) mock.emptyStdout() # this report won't do much for the test reactor - improve test reactor summarizePowerPeaking(r.core) self.assertEqual(len(mock.getStdout()), 0) def test_writeWelcomeHeaders(self): o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") # grab this file path randoFile = os.path.abspath(__file__) # pass that random file into the settings o.cs["crossSectionControl"]["DA"].xsFileLocation = randoFile o.cs["crossSectionControl"]["DA"].fluxFileLocation = randoFile with mockRunLogs.BufferLog() as mock: # we should start with a clean slate self.assertEqual("", mock.getStdout()) runLog.LOG.startLog("test_writeWelcomeHeaders") runLog.LOG.setVerbosity(logging.INFO) writeWelcomeHeaders(o, o.cs) # assert our random file (and a lot of other stuff) is in the welcome self.assertIn("Case Info", mock.getStdout()) self.assertIn("Input File Info", mock.getStdout()) self.assertIn("crossSectionControl-DA", mock.getStdout()) self.assertIn("Python Executable", mock.getStdout()) self.assertIn(randoFile, mock.getStdout()) class TestReportInterface(unittest.TestCase): @classmethod def setUpClass(cls): cls.td = TemporaryDirectoryChanger() cls.td.__enter__() @classmethod def tearDownClass(cls): cls.td.__exit__(None, None, None) def test_printReports(self): """Testing printReports method.""" repInt = reportInterface.ReportInterface(None, None) rep = repInt.printReports() self.assertIn("REPORTS BEGIN", rep) self.assertIn("REPORTS END", rep) def test_distributableReportInt(self): repInt = reportInterface.ReportInterface(None, None) self.assertEqual(repInt.distributable(), 4) def test_interactBOLReportInt(self): o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") repInt = reportInterface.ReportInterface(r, o.cs) with mockRunLogs.BufferLog() as mock: repInt.interactBOL() self.assertIn("Writing assem layout", mock.getStdout()) self.assertIn("BOL Assembly", mock.getStdout()) self.assertIn("wetMass", mock.getStdout()) def test_interactEveryNode(self): o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") repInt = reportInterface.ReportInterface(r, o.cs) with mockRunLogs.BufferLog() as mock: repInt.interactEveryNode(0, 0) self.assertIn("Cycle 0", mock.getStdout()) self.assertIn("node 0", mock.getStdout()) self.assertIn("keff=", mock.getStdout()) def test_interactBOC(self): o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") repInt = reportInterface.ReportInterface(r, o.cs) self.assertEqual(repInt.fuelCycleSummary["bocFissile"], 0.0) repInt.interactBOC(1) self.assertAlmostEqual(repInt.fuelCycleSummary["bocFissile"], 4.290603409612653) def test_interactEOC(self): o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") repInt = reportInterface.ReportInterface(r, o.cs) with mockRunLogs.BufferLog() as mock: repInt.interactEOC(0) self.assertIn("Cycle 0", mock.getStdout()) self.assertIn("TIMER REPORTS", mock.getStdout()) def test_interactEOL(self): o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") repInt = reportInterface.ReportInterface(r, o.cs) with mockRunLogs.BufferLog() as mock: repInt.interactEOL() self.assertIn("Comprehensive Core Report", mock.getStdout()) self.assertIn("Assembly Area Fractions", mock.getStdout()) ================================================ FILE: armi/bookkeeping/snapshotInterface.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Controls points during a calculation where snapshots will be triggered, signaling more detailed treatments. Snapshots are user-defined cycle/timenode points where something special is to be done. What in particular is done is dependent on the case settings and the collection of active plugins * At the very basic level, third-party code input files are dumped out and stored in special snapshot folders at these times. This can be useful when you are sharing third-party input files with another party (e.g. for review or collaboration). * You may want to run extra long-running physics simulations only at a few time points (e.g. BOL, EOL). This is useful for detailed transient analysis, or other follow-on analysis. Snapshots can be requested through the settings: ``dumpSnapshot`` and/or ``defaultSnapshots``. """ from armi import interfaces, operators, runLog from armi.utils import getStepLengths ORDER = interfaces.STACK_ORDER.POSTPROCESSING def describeInterfaces(cs): """Function for exposing interface(s) to other code.""" return (SnapshotInterface, {}) class SnapshotInterface(interfaces.Interface): """ Snapshot managerial interface. .. impl:: Save extra data to be saved from a run, at specified time nodes. :id: I_ARMI_SNAPSHOT0 :implements: R_ARMI_SNAPSHOT This is a special :py:class:`Interface <armi.interfaces.Interface>` that is designed to run along all the other Interfaces during a simulation, to save off important or helpful data. By default, this is designed to be used with the ``"defaultSnapshots"`` and ``""dumpSnapshot""`` settings. These settings were added so users can control if snapshot data will be recorded during their run. Broadly, this class is implemented to run the Operator method :py:meth:`o.snapshotRequest <armi.operators.Operator.snapshotRequest>`. """ name = "snapshot" def interactBOL(self): """Active the default snapshots at BOL.""" interfaces.Interface.interactBOL(self) if self.cs["defaultSnapshots"]: self.activateDefaultSnapshots() def interactEveryNode(self, cycle, node): """Call the snapshot interface to copy files at each node, if requested.""" snapText = getCycleNodeStamp(cycle, node) # CCCNNN if self.cs["dumpSnapshot"] and snapText in self.cs["dumpSnapshot"]: self.o.snapshotRequest(cycle, node) def interactCoupled(self, iteration): """Call the snapshot interface to copy files for coupled iterations, if requested.""" snapText = getCycleNodeStamp(self.r.p.cycle, self.r.p.timeNode) # CCCNNN if self.cs["dumpSnapshot"] and snapText in self.cs["dumpSnapshot"]: self.o.snapshotRequest(self.r.p.cycle, self.r.p.timeNode, iteration) def activateDefaultSnapshots(self): """Figure out and assign some default snapshots (BOL, MOL, EOL).""" if self.cs["runType"] == operators.RunTypes.EQUILIBRIUM: snapTimeCycleNodePairs = self._getSnapTimesEquilibrium() else: snapTimeCycleNodePairs = self._getSnapTimesNormal() snapText = ["{0:03d}{1:03d}".format(c, n) for c, n in snapTimeCycleNodePairs] # determine if there are new snapshots to add to the settings file for snapT in snapText: if snapT not in self.cs["dumpSnapshot"]: runLog.info("Adding default snapshot {0} to snapshot queue.".format(snapT)) self.cs["dumpSnapshot"] = self.cs["dumpSnapshot"] + [snapT] def _getSnapTimesEquilibrium(self): """Set BOEC, MOEC, EOEC snapshots.""" if not self.cs["eqToDatabaseOnlyWhenConverged"]: raise ValueError("Cannot create default snapshots when `eqToDatabaseOnlyWhenConverged` setting is active") return [(0, 0), (0, self.cs["burnSteps"] // 2), (0, self.cs["burnSteps"])] def _getSnapTimesNormal(self): try: curCycle = self.r.p.cycle except AttributeError: # none has no attribute getParam (no reactor for whatever reason) curCycle = 0 eolCycle = self.cs["nCycles"] - 1 molCycle = eolCycle // 2 bolCycle = 0 snapTimeCycleNodePairs = [] if bolCycle >= curCycle: snapTimeCycleNodePairs.append([bolCycle, 0]) if molCycle >= curCycle: snapTimeCycleNodePairs.append([molCycle, 0]) if eolCycle >= curCycle: eolCycleLastNode = len(getStepLengths(self.cs)[-1]) snapTimeCycleNodePairs.append([eolCycle, eolCycleLastNode]) return snapTimeCycleNodePairs def extractCycleNodeFromStamp(stamp): """ Returns cycle and node from a CCCNNN stamp. See Also -------- getCycleNodeStamp : the opposite """ cycle = int(stamp[:3]) node = int(stamp[3:]) return cycle, node def getCycleNodeStamp(cycle, node): """ Returns a CCCNNN stamp for this cycle and node. Useful for comparing the current cycle/node with requested snapshots in the settings See Also -------- isRequestedDetailPoint : compares a cycle,node to the dumpSnapshot list. extractCycleNodeFromStamp : does the opposite """ return "{0:03d}{1:03d}".format(cycle, node) ================================================ FILE: armi/bookkeeping/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Bookkeeping test package. This may seem a little bit over-engineered, but the jupyter notebooks that get run by the test_historyTracker are also used in the documentation system, so providing a list of related files from this package is useful. Also, these are organized like this to prevent having to import the world just to get something like a list of strings. """ from armi.bookkeeping.tests._constants import * # noqa: F403 ================================================ FILE: armi/bookkeeping/tests/_constants.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Plain old data for the bookkeeping tests. These are stored here so that they can be accessed from within this test package, but also re-exported by `__init__.py`, so that other things (like the documentation system) can use it without having to import the rest of ARMI. """ import os from armi.testing import TESTING_ROOT from armi.tests import TEST_ROOT # These files are needed to run the data_model ipython notebook, which is done in # test_historyTracker, and when building the docs. TUTORIAL_FILES = [ os.path.join(TESTING_ROOT, "reactors", "anl-afci-177", "anl-afci-177-blueprints.yaml"), os.path.join(TESTING_ROOT, "reactors", "anl-afci-177", "anl-afci-177-coreMap.yaml"), os.path.join(TESTING_ROOT, "reactors", "anl-afci-177", "anl-afci-177-fuelManagement.py"), os.path.join(TESTING_ROOT, "reactors", "anl-afci-177", "anl-afci-177.yaml"), os.path.join(TEST_ROOT, "tutorials", "data_model.ipynb"), ] ================================================ FILE: armi/bookkeeping/tests/test_historyTracker.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests for the history tracker interface. These tests actually run a jupyter notebook that is in the documentation to build a valid HDF5 file to load from as a test fixtures. Thus they take a little longer than usual. """ import os import shutil import numpy as np from armi import init as armi_init from armi import settings, utils from armi.reactor.flags import Flags from armi.tests import TEST_ROOT, ArmiTestHelper from armi.utils.directoryChangers import TemporaryDirectoryChanger THIS_DIR = os.path.dirname(__file__) # because tests do not run in this folder TEST_FILE = os.path.join(TEST_ROOT, "smallestTestReactor", "armiRunSmallest.yaml") class TestHistoryTracker(ArmiTestHelper): """History tracker tests that require a Reactor Model.""" @classmethod def setUpClass(cls): cls.dirChanger = TemporaryDirectoryChanger() cls.dirChanger.__enter__() # modify the input settings for our tests dbPath = os.path.join(cls.dirChanger.destination, "armiRunSmallest.h5") reloadPath = os.path.join(cls.dirChanger.destination, "armiRunSmallestReload.h5") cs = settings.Settings(TEST_FILE) newSettings = {} newSettings["db"] = True newSettings["nCycles"] = 1 newSettings["detailAssemLocationsBOL"] = ["001-001"] newSettings["loadStyle"] = "fromDB" newSettings["reloadDBName"] = reloadPath newSettings["startNode"] = 1 newSettings["verbosity"] = "error" cs = cs.modified(newSettings=newSettings) # build the ARMI operator (and Reactor) o = armi_init(fName=TEST_FILE, cs=cs) def _setFakePower(core): peakPower = 1e6 mgFluxBase = np.arange(5) for a in core: for b in a: vol = b.getVolume() fuelFlag = 10 if b.isFuel() else 1.0 b.p.power = peakPower * fuelFlag b.p.pdens = b.p.power / vol b.p.mgFlux = mgFluxBase * b.p.pdens # put some test power values on the Reactor object _setFakePower(o.r.core) # write some data to the DB dbi = o.getInterface("database") dbi.initDB(fName=dbPath) dbi.database.writeToDB(o.r) o.r.p.timeNode += 1 dbi.database.writeToDB(o.r) cls.o = o cls.r = o.r @classmethod def tearDownClass(cls): cls.dirChanger.__exit__(None, None, None) try: cls.o.getInterface("database").database.close() except FileNotFoundError: pass cls.r = None cls.o = None def test_calcMGFluence(self): """ This test confirms that mg flux has many groups when loaded with the history tracker. .. test:: Demonstrate that a parameter stored at differing time nodes can be recovered. :id: T_ARMI_HIST_TRACK0 :tests: R_ARMI_HIST_TRACK """ o = self.o b = o.r.core.childrenByLocator[o.r.core.spatialGrid[0, 0, 0]].getFirstBlock(Flags.FUEL) bVolume = b.getVolume() bName = b.name # duration is None in this DB hti = o.getInterface("history") timesInYears = [duration or 1.0 for duration in hti.getTimeSteps()] timeStepsToRead = [utils.getCycleNodeFromCumulativeNode(i, self.o.cs) for i in range(len(timesInYears))] hti.preloadBlockHistoryVals([bName], ["mgFlux"], timeStepsToRead) mgFluence = None for ts, years in enumerate(timesInYears): cycle, node = utils.getCycleNodeFromCumulativeNode(ts, self.o.cs) mgFlux = hti.getBlockHistoryVal(bName, "mgFlux", (cycle, node)) mgFlux /= bVolume timeInSec = years * 365 * 24 * 3600 if mgFluence is None: mgFluence = timeInSec * mgFlux else: mgFluence += timeInSec * mgFlux self.assertGreater(len(mgFluence), 1, "mgFluence should have more than 1 group") # test that unloadBlockHistoryVals() is working self.assertIsNotNone(hti._preloadedBlockHistory) hti.unloadBlockHistoryVals() self.assertIsNone(hti._preloadedBlockHistory) def test_historyParameters(self): """Retrieve various parameters from the history. .. test:: Demonstrate that various parameters stored at differing time nodes can be recovered. :id: T_ARMI_HIST_TRACK1 :tests: R_ARMI_HIST_TRACK """ o = self.o b = o.r.core.childrenByLocator[o.r.core.spatialGrid[0, 0, 0]].getFirstBlock(Flags.FUEL) b.getVolume() bName = b.name # duration is None in this DB hti = o.getInterface("history") timesInYears = [duration or 1.0 for duration in hti.getTimeSteps()] timeStepsToRead = [utils.getCycleNodeFromCumulativeNode(i, self.o.cs) for i in range(len(timesInYears))] hti.preloadBlockHistoryVals([bName], ["power"], timeStepsToRead) # read some parameters params = {} for param in ["height", "pdens", "power"]: params[param] = [] for ts, years in enumerate(timesInYears): cycle, node = utils.getCycleNodeFromCumulativeNode(ts, self.o.cs) params[param].append(hti.getBlockHistoryVal(bName, param, (cycle, node))) # verify the height parameter doesn't change over time self.assertGreater(params["height"][0], 0) self.assertEqual(params["height"][0], params["height"][1]) # verify the power parameter is retrievable from the history refPower = 1000000.0 self.assertEqual(o.cs["power"], refPower) self.assertAlmostEqual(params["power"][0], refPower * 10.0, delta=0.1) # verify the power density parameter is retrievable from the history refDens = 1636.4803548458785 self.assertAlmostEqual(params["pdens"][0], refDens, delta=0.001) self.assertAlmostEqual(params["pdens"][0], params["pdens"][1]) # test that unloadBlockHistoryVals() is working self.assertIsNotNone(hti._preloadedBlockHistory) hti.unloadBlockHistoryVals() self.assertIsNone(hti._preloadedBlockHistory) def test_historyReport(self): """ Test generation of history report. This does a swap for 5 timesteps:: | TS 0 1 2 3 4 |LOC (1,1) (2,1) (3,1) (4,1) SFP """ history = self.o.getInterface("history") history.interactBOL() history.interactEOL() testLoc = self.o.r.core.spatialGrid[0, 0, 0] testAssem = self.o.r.core.childrenByLocator[testLoc] fileName = history._getAssemHistoryFileName(testAssem) actualFilePath = os.path.join(THIS_DIR, fileName) expectedFileName = os.path.join(THIS_DIR, fileName.replace(".txt", "-ref.txt")) # copy from fast path so the file is retrievable. shutil.move(fileName, os.path.join(THIS_DIR, fileName)) self.compareFilesLineByLine(expectedFileName, actualFilePath) # test that detailAssemblyNames() is working self.assertEqual(len(history.detailAssemblyNames), 1) history.addAllDetailedAssems() self.assertEqual(len(history.detailAssemblyNames), 1) def test_getAssemHistories(self): """Get the histories for all blocks in detailed assemblies.""" history = self.o.getInterface("history") history.interactBOL() assemList = history.getDetailAssemblies() params = history.getTrackedParams() assemHistories = history.getAssemHistories(assemList) for a in assemList: for b in history.nonStationaryBlocks(a): self.assertIn(b, assemHistories) for param in params: self.assertIn(param, assemHistories[b]) def test_getBlockInAssembly(self): history = self.o.getInterface("history") aFuel = self.o.r.core.getFirstAssembly(Flags.FUEL) b = history._getBlockInAssembly(aFuel) self.assertGreater(b.p.height, 1.0) self.assertEqual(b.getType(), "fuel") with self.assertRaises(AttributeError): aShield = self.o.r.core.getFirstAssembly(Flags.SHIELD) history._getBlockInAssembly(aShield) ================================================ FILE: armi/bookkeeping/tests/test_memoryProfiler.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for memoryProfiler.""" import logging import unittest from unittest.mock import MagicMock, patch from armi import runLog from armi.bookkeeping import memoryProfiler from armi.bookkeeping.memoryProfiler import ( getCurrentMemoryUsage, getTotalJobMemory, ) from armi.reactor.tests import test_reactors from armi.tests import TEST_ROOT, mockRunLogs class TestMemoryProfiler(unittest.TestCase): def setUp(self): self.o, self.r = test_reactors.loadTestReactor( TEST_ROOT, {"debugMem": True}, inputFileName="smallestTestReactor/armiRunSmallest.yaml", ) self.memPro: memoryProfiler.MemoryProfiler = self.o.getInterface("memoryProfiler") def tearDown(self): self.o.removeInterface(self.memPro) def test_fullBreakdown(self): with mockRunLogs.BufferLog() as mock: # we should start with a clean slate self.assertEqual("", mock.getStdout()) runLog.LOG.startLog("test_fullBreakdown") runLog.LOG.setVerbosity(logging.INFO) # we should start at info level, and that should be working correctly self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO) self.memPro._printFullMemoryBreakdown(reportSize=False) # do some basic testing self.assertTrue(mock.getStdout().count("UNIQUE_INSTANCE_COUNT") > 10) self.assertIn("garbage", mock.getStdout()) def test_displayMemoryUsage(self): with mockRunLogs.BufferLog() as mock: # we should start with a clean slate self.assertEqual("", mock.getStdout()) runLog.LOG.startLog("test_displayMemUsage") runLog.LOG.setVerbosity(logging.INFO) # we should start at info level, and that should be working correctly self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO) self.memPro.displayMemoryUsage(1) # do some basic testing self.assertIn("End Memory Usage Report", mock.getStdout()) def test_printFullMemoryBreakdown(self): with mockRunLogs.BufferLog() as mock: # we should start with a clean slate self.assertEqual("", mock.getStdout()) runLog.LOG.startLog("test_displayMemUsage") runLog.LOG.setVerbosity(logging.INFO) # we should start at info level, and that should be working correctly self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO) self.memPro._printFullMemoryBreakdown(reportSize=True) # do some basic testing self.assertIn("UNIQUE_INSTANCE_COUNT", mock.getStdout()) self.assertIn(" MB", mock.getStdout()) def test_getReferrers(self): with mockRunLogs.BufferLog() as mock: # we should start with a clean slate self.assertEqual("", mock.getStdout()) testName = "test_getReferrers" runLog.LOG.startLog(testName) runLog.LOG.setVerbosity(logging.DEBUG) # grab the referrers self.memPro.getReferrers(self.r) memLog = mock.getStdout() # test the results self.assertGreater(memLog.count("ref for"), 10) self.assertLess(memLog.count("ref for"), 50) self.assertIn(testName, memLog) self.assertIn("Reactor", memLog) self.assertIn("core", memLog) def test_checkForDuplicateObjectsOnArmiModel(self): with mockRunLogs.BufferLog() as mock: # we should start with a clean slate self.assertEqual("", mock.getStdout()) testName = "test_checkForDuplicateObjectsOnArmiModel" runLog.LOG.startLog(testName) runLog.LOG.setVerbosity(logging.IMPORTANT) # check for duplicates with self.assertRaises(RuntimeError): self.memPro.checkForDuplicateObjectsOnArmiModel("cs", self.r.core) # validate the outputs are as we expect self.assertIn("There are 2 unique objects stored as `.cs`", mock.getStdout()) self.assertIn("Expected id", mock.getStdout()) self.assertIn("Expected object", mock.getStdout()) self.assertIn("These types of objects", mock.getStdout()) self.assertIn("MemoryProfiler", mock.getStdout()) self.assertIn("MainInterface", mock.getStdout()) def test_profileMemoryUsageAction(self): pmua = memoryProfiler.ProfileMemoryUsageAction("timeDesc") self.assertEqual(pmua.timeDescription, "timeDesc") @patch("psutil.virtual_memory") @patch("armi.bookkeeping.memoryProfiler.cpu_count") def test_getTotalJobMemory(self, mockCpuCount, mockVMem): """Use an example node with 50 GB of total physical memory and 10 CPUs.""" mockCpuCount.return_value = 10 vMem = MagicMock() vMem.total = (1024**3) * 50 mockVMem.return_value = vMem expectedArrangement = { (10, 1): 50, (1, 10): 50, (2, 5): 50, (3, 3): 45, (4, 1): 20, (2, 4): 40, (5, 2): 50, } for compReq, jobMemory in expectedArrangement.items(): # compReq[0] is nTasks and compReq[1] is cpusPerTask self.assertEqual(getTotalJobMemory(compReq[0], compReq[1]), jobMemory) @patch("armi.bookkeeping.memoryProfiler.PrintSystemMemoryUsageAction") @patch("armi.bookkeeping.memoryProfiler.SystemAndProcessMemoryUsage") def test_getCurrentMemoryUsage(self, mockSysAndProcMemUse, mockPrintSysMemUseAction): """Mock the memory usage across 3 different processes and that the total usage is as expected (6 MB).""" self._setMemUseMock(mockPrintSysMemUseAction) self.assertAlmostEqual(getCurrentMemoryUsage(), 6 * 1024) @patch("armi.bookkeeping.memoryProfiler.PrintSystemMemoryUsageAction") @patch("armi.bookkeeping.memoryProfiler.SystemAndProcessMemoryUsage") @patch("psutil.virtual_memory") @patch("armi.bookkeeping.memoryProfiler.cpu_count") def test_printCurrentMemoryState(self, mockCpuCount, mockVMem, mock1, mockPrintSysMemUseAction): """Use an example node with 50 GB of total physical memory and 10 CPUs while using 6 GB.""" mockCpuCount.return_value = 10 vMem = MagicMock() vMem.total = (1024**3) * 50 mockVMem.return_value = vMem self._setMemUseMock(mockPrintSysMemUseAction) with mockRunLogs.BufferLog() as mockLogs: self.memPro.cs = {"cpusPerTask": 1, "nTasks": 10} self.memPro.printCurrentMemoryState() stdOut = mockLogs.getStdout() self.assertIn("Currently using 6.0 GB of memory.", stdOut) self.assertIn("There is 44.0 GB of memory left.", stdOut) self.assertIn("There is a total allocation of 50.0 GB", stdOut) # Try another for funzies where we only use half the available resources on the node mockLogs.emptyStdout() self.memPro.cs = {"cpusPerTask": 5, "nTasks": 1} self.memPro.printCurrentMemoryState() stdOut = mockLogs.getStdout() self.assertIn("Currently using 6.0 GB of memory.", stdOut) self.assertIn("There is 19.0 GB of memory left.", stdOut) self.assertIn("There is a total allocation of 25.0 GB", stdOut) def test_printCurrentMemoryState_noSetting(self): """Test that the try/except works as it should.""" expectedStr = ( "To view memory consumed, remaining available, and total allocated for a case, " "add the setting 'cpusPerTask' to your application." ) with mockRunLogs.BufferLog() as mockLogs: self.memPro.printCurrentMemoryState() self.assertIn(expectedStr, mockLogs.getStdout()) def _setMemUseMock(self, mockPrintSysMemUseAction): class mockMemUse: def __init__(self, mem: float): self.processVirtualMemoryInMB = mem instance = mockPrintSysMemUseAction.return_value instance.gather.return_value = [ mockMemUse(1 * 1024), mockMemUse(2 * 1024), mockMemUse(3 * 1024), ] class KlassCounterTests(unittest.TestCase): def get_containers(self): container1 = [1, 2, 3, 4, 5, 6, 7, 2.0] container2 = ("a", "b", container1, None) container3 = { "yo": container2, "yo1": container1, ("t1", "t2"): True, "yeah": [], "nope": {}, } return container3 def test_expandContainer(self): container = self.get_containers() counter = memoryProfiler.KlassCounter(False) counter.countObjects(container) self.assertEqual(counter.count, 24) self.assertEqual(counter[list].count, 2) self.assertEqual(counter[dict].count, 2) self.assertEqual(counter[tuple].count, 2) self.assertEqual(counter[int].count, 7) def test_countHandlesRecursion(self): container = self.get_containers() container1 = container["yo1"] container1.append(container1) counter = memoryProfiler.KlassCounter(False) counter.countObjects(container) # despite it now being recursive ... we get the same counts self.assertEqual(counter.count, 24) self.assertEqual(counter[list].count, 2) self.assertEqual(counter[dict].count, 2) self.assertEqual(counter[tuple].count, 2) self.assertEqual(counter[int].count, 7) ================================================ FILE: armi/bookkeeping/tests/test_snapshot.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test Snapshots.""" import unittest from unittest.mock import patch from armi import settings from armi.bookkeeping import snapshotInterface from armi.operators.operator import Operator class MockReactorParams: def __init__(self): self.cycle = 0 self.timeNode = 1 class MockReactor: def __init__(self, cs): self.p = MockReactorParams() self.o = Operator(cs) class TestSnapshotInterface(unittest.TestCase): @classmethod def setUpClass(self): self.cs = settings.Settings() def setUp(self): self.cs.revertToDefaults() self.si = snapshotInterface.SnapshotInterface(MockReactor(self.cs), self.cs) @patch("armi.operators.operator.Operator.snapshotRequest") def test_interactEveryNode(self, mockSnapshotRequest): newSettings = {} newSettings["dumpSnapshot"] = ["000001"] self.si.cs = self.si.cs.modified(newSettings=newSettings) self.si.interactEveryNode(0, 1) self.assertTrue(mockSnapshotRequest.called) @patch("armi.operators.operator.Operator.snapshotRequest") def test_interactCoupled(self, mockSnapshotRequest): newSettings = {} newSettings["dumpSnapshot"] = ["000001"] self.si.cs = self.si.cs.modified(newSettings=newSettings) self.si.interactCoupled(2) self.assertTrue(mockSnapshotRequest.called) def test_activateDefSnapshots_30cyc2burns(self): """ Test snapshots for 30 cycles and 2 burnsteps, checking the dumpSnapshot setting. .. test:: Allow extra data to be saved from a run, at specified time nodes. :id: T_ARMI_SNAPSHOT0 :tests: R_ARMI_SNAPSHOT """ self.assertEqual([], self.cs["dumpSnapshot"]) newSettings = {} newSettings["nCycles"] = 30 newSettings["burnSteps"] = 2 newSettings["cycleLength"] = 365 self.si.cs = self.si.cs.modified(newSettings=newSettings) self.cs = self.si.cs self.si.activateDefaultSnapshots() self.assertEqual(["000000", "014000", "029002"], self.si.cs["dumpSnapshot"]) def test_activateDeftSnapshots_17cyc5surns(self): """ Test snapshots for 17 cycles and 5 burnsteps, checking the dumpSnapshot setting. .. test:: Allow extra data to be saved from a run, at specified time nodes. :id: T_ARMI_SNAPSHOT1 :tests: R_ARMI_SNAPSHOT """ self.assertEqual([], self.cs["dumpSnapshot"]) newSettings = {} newSettings["nCycles"] = 17 newSettings["burnSteps"] = 5 newSettings["cycleLength"] = 365 self.si.cs = self.si.cs.modified(newSettings=newSettings) self.cs = self.si.cs self.si.activateDefaultSnapshots() self.assertEqual(["000000", "008000", "016005"], self.si.cs["dumpSnapshot"]) ================================================ FILE: armi/bookkeeping/visualization/__init__.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The Visualization package contains functionality and entry points for producing files amenable to visualization of ARMI run results. This could theoretically support all sorts of visualization file formats, but for now, only VTK files are supported. VTK was selected because it has wide support from vis tools, while being a simple-enough format that quality pure-Python libraries exist to produce them. Other formats (e.g., SILO) tend to require more system-dependent binary dependencies, so optional support for them may be added later. """ from armi import plugins # noqa: F401 from armi.bookkeeping.visualization.entryPoint import VisFileEntryPoint # noqa: F401 ================================================ FILE: armi/bookkeeping/visualization/dumper.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Abstract base class for visualization file dumpers.""" from abc import ABC, abstractmethod from armi.reactor import reactors class VisFileDumper(ABC): @abstractmethod def dumpState(self, r: reactors.Reactor): """Dump a single reactor state to the vis file.""" @abstractmethod def __enter__(self): """Invoke initialize when entering a context manager.""" @abstractmethod def __exit__(self, type, value, traceback): """Invoke initialize when entering a context manager.""" ================================================ FILE: armi/bookkeeping/visualization/entryPoint.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Entry point for producing visualization files.""" import pathlib import re import sys from armi import runLog from armi.cli import entryPoint class VisFileEntryPoint(entryPoint.EntryPoint): """Create visualization files from database files.""" name = "vis-file" description = "Convert ARMI databases in to visualization files" _FORMAT_VTK = "vtk" _FORMAT_XDMF = "xdmf" _SUPPORTED_FORMATS = {_FORMAT_VTK, _FORMAT_XDMF} def __init__(self): entryPoint.EntryPoint.__init__(self) def addOptions(self): self.parser.add_argument("h5db", help="Input database path", type=str) self.parser.add_argument( "--output-name", "-o", help="Base name for output file(s). File extensions will be added as appropriate", type=str, default=None, ) self.parser.add_argument( "--format", "-f", help="Output format. Supported formats: `vtk` and `xdmf`", default="vtk", ) self.parser.add_argument( "--nodes", help="An optional list of time nodes to include. Should look like `(1,0)(1,1)(1,2)`, etc", type=str, default=None, ) self.parser.add_argument( "--max-node", help="An optional (cycle,timeNode) tuple to specify the latest time step that should be included", type=str, default=None, ) self.parser.add_argument( "--min-node", help="An optional (cycle,timeNode) tuple to specify the earliest time step that should be included", type=str, default=None, ) def parse(self, args): """ Process user input. Strings are parsed against some regular expressions and saved back to their original locations in the ``self.args`` namespace for later use. """ entryPoint.EntryPoint.parse(self, args) cycleNodePattern = r"\((\d+),(\d+)\)" if self.args.nodes is not None: self.args.nodes = [(int(cycle), int(node)) for cycle, node in re.findall(cycleNodePattern, self.args.nodes)] if self.args.max_node is not None: nodes = re.findall(cycleNodePattern, self.args.max_node) if len(nodes) != 1: runLog.error("Bad --max-node: `{}`. Should look like (c,n).".format(self.args.max_node)) sys.exit(1) cycle, node = nodes[0] self.args.max_node = (int(cycle), int(node)) if self.args.min_node is not None: nodes = re.findall(cycleNodePattern, self.args.min_node) if len(nodes) != 1: runLog.error("Bad --min-node: `{}`. Should look like (c,n).".format(self.args.min_node)) sys.exit(1) cycle, node = nodes[0] self.args.min_node = (int(cycle), int(node)) if self.args.format not in self._SUPPORTED_FORMATS: runLog.error( "Requested format `{}` not among the supported options: {}".format( self.args.format, self._SUPPORTED_FORMATS ) ) sys.exit(1) if self.args.output_name is None: # infer name from input inp = pathlib.Path(self.args.h5db) self.args.output_name = inp.stem def invoke(self): # late imports so that we dont have to import the world to do anything from armi.bookkeeping.db import databaseFactory from armi.bookkeeping.visualization import vtk, xdmf # a little baroque, but easy to extend with future formats formatMap = { self._FORMAT_VTK: vtk.VtkDumper, self._FORMAT_XDMF: xdmf.XdmfDumper, } dumper = formatMap[self.args.format](self.args.output_name, self.args.h5db) nodes = self.args.nodes db = databaseFactory(self.args.h5db, "r") with db: dbNodes = list(db.genTimeSteps()) if nodes is not None and any(node not in dbNodes for node in nodes): raise RuntimeError( "Some of the requested nodes are not in the source database.\nRequested: {}\nPresent: {}".format( nodes, dbNodes ) ) with dumper: for cycle, node in dbNodes: if nodes is not None and (cycle, node) not in nodes: continue if self.args.min_node is not None and (cycle, node) < self.args.min_node: continue if self.args.max_node is not None and (cycle, node) > self.args.max_node: continue runLog.info("Creating visualization file for cycle {}, time node {}...".format(cycle, node)) r = db.load(cycle, node) dumper.dumpState(r) ================================================ FILE: armi/bookkeeping/visualization/tests/__init__.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/bookkeeping/visualization/tests/test_vis.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test report visualization.""" import unittest import numpy as np from pyevtk.vtk import VtkTetra from armi import settings from armi.bookkeeping.db import Database from armi.bookkeeping.visualization import utils, vtk, xdmf from armi.reactor import blocks, components from armi.reactor.tests import test_reactors from armi.utils.directoryChangers import TemporaryDirectoryChanger class TestVtkMesh(unittest.TestCase): """Test the VtkMesh utility class.""" def test_testVtkMesh(self): mesh = utils.VtkMesh.empty() self.assertEqual(mesh.vertices.size, 0) self.assertEqual(mesh.vertices.shape, (0, 3)) self.assertEqual(mesh.connectivity.size, 0) self.assertEqual(mesh.offsets.size, 0) self.assertEqual(mesh.cellTypes.size, 0) verts = np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.25, 0.25, 0.5]]) conn = np.array([0, 1, 2, 3]) offsets = np.array([4]) cellTypes = np.array([VtkTetra.tid]) newMesh = utils.VtkMesh(verts, conn, offsets, cellTypes) mesh.append(newMesh) mesh.append(newMesh) self.assertEqual(mesh.vertices.size, 3 * 8) self.assertEqual(mesh.offsets.size, 2) self.assertEqual(mesh.connectivity.size, 8) self.assertEqual(mesh.cellTypes.size, 2) self.assertEqual(mesh.offsets[-1], 8) self.assertEqual(mesh.connectivity[-1], 7) class TestVisDump(unittest.TestCase): """Test dumping a whole reactor and some specific block types.""" @classmethod def setUpClass(cls): caseSetting = settings.Settings() _, cls.r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") cls.hexBlock = next(cls.r.core.iterBlocks()) cls.cartesianBlock = blocks.CartesianBlock("TestCartesianBlock", caseSetting) cartesianComponent = components.HoledSquare( "duct", "UZr", Tinput=273.0, Thot=273.0, holeOD=68.0, widthOuter=12.5, mult=1.0, ) cls.cartesianBlock.add(cartesianComponent) cls.cartesianBlock.add(components.Circle("clad", "HT9", Tinput=273.0, Thot=273.0, od=68.0, mult=169.0)) def test_dumpReactorVtk(self): # This does a lot, and is hard to verify. at least make sure it doesn't crash with TemporaryDirectoryChanger(dumpOnException=False): dumper = vtk.VtkDumper("testVtk", inputName=None) with dumper: dumper.dumpState(self.r) def test_dumpReactorXdmf(self): # This does a lot, and is hard to verify. at least make sure it doesn't crash with TemporaryDirectoryChanger(dumpOnException=False): db = Database("testDatabase.h5", "w") with db: db.writeToDB(self.r) dumper = xdmf.XdmfDumper("testVtk", inputName="testDatabase.h5") with dumper: dumper.dumpState(self.r) def test_hexMesh(self): mesh = utils.createBlockMesh(self.hexBlock) self.assertEqual(mesh.vertices.size, 12 * 3) self.assertEqual(mesh.cellTypes[0], 16) def test_cartesianMesh(self): mesh = utils.createBlockMesh(self.cartesianBlock) self.assertEqual(mesh.vertices.size, 8 * 3) self.assertEqual(mesh.cellTypes[0], 12) ================================================ FILE: armi/bookkeeping/visualization/tests/test_xdmf.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from armi.bookkeeping.visualization import xdmf class TestXdmf(unittest.TestCase): """ Test XDMF-specific functionality. This is for testing XDMF functions that can reasonably be tested in a vacuum. The main dump methods are hard to test without resorting to checking whole files, which isn't particularly useful. Those tests can be found in test_vis. """ def test_dedupTimes(self): # no duplicates self.assertEqual( xdmf.XdmfDumper._dedupTimes([1.0 * t for t in range(10)]), [1.0 * t for t in range(10)], ) # ends in duplicates self.assertEqual( xdmf.XdmfDumper._dedupTimes([0.0, 1.0, 2.0, 2.0, 3.0, 4.0, 4.0, 4.0]), [0.0, 1.0, 2.0, 2.000000002, 3.0, 4.0, 4.000000004, 4.000000008], ) # ends in unique self.assertEqual( xdmf.XdmfDumper._dedupTimes([0.0, 1.0, 2.0, 2.0, 3.0, 4.0, 4.0, 4.0, 5.0]), [0.0, 1.0, 2.0, 2.000000002, 3.0, 4.0, 4.000000004, 4.000000008, 5.0], ) # all duplicates self.assertEqual( xdmf.XdmfDumper._dedupTimes([0.0] * 5), [0.0, 1e-09, 2e-09, 3.0000000000000004e-09, 4e-09], ) # single value self.assertEqual( xdmf.XdmfDumper._dedupTimes([1.0]), [1.0], ) # empty list self.assertEqual( xdmf.XdmfDumper._dedupTimes([]), [], ) with self.assertRaises(AssertionError): # input should be sorted xdmf.XdmfDumper._dedupTimes([float(t) for t in reversed(range(10))]) ================================================ FILE: armi/bookkeeping/visualization/utils.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility classes/functions for visualization. Most of these are derived from the VTK format, which tends to be general enough to support other formats. Most of the work goes into figuring out where the vertices should be for a given block/assembly shape. If this coupling becomes problematic, abstractions for primitive shapes should be created. """ import math import numpy as np from pyevtk.hl import unstructuredGridToVTK from pyevtk.vtk import VtkHexahedron, VtkQuadraticHexahedron from armi.reactor import assemblies, blocks, reactors from armi.utils import hexagon # The hex prism cell type is not very well-documented, and so is not described in # pyevtk. Digging into the header reveals that `16` does the trick. _HEX_PRISM_TID = 16 class VtkMesh: """ Container for VTK unstructured mesh data. This provides a container for the necessary data to describe a mesh to VTK (vertex locations, connectivity, offsets, cell types). It supports appending one set of mesh data onto another, handling the necessary index offsets. While the specifics are somewhat specific to the VTK format, the concept of storing a bunch of vertices and their connectivity is a relatively general one, so this may be of use to other formats as well. """ def __init__(self, vertices, connectivity, offsets, cellTypes): """ Parameters ---------- vertices : np.ndarray An Nx3 numpy array with one row per (x,y,z) vertex connectivity : np.ndarray A 1-D array containing the vertex indices belonging to each cell offsets : np.ndarray A 1-D array containing the index of the first vertex for the next cell cellTypes : np.ndarray A 1-D array containing the cell type ID for each cell """ self.vertices = vertices self.connectivity = connectivity self.offsets = offsets self.cellTypes = cellTypes @staticmethod def empty(): return VtkMesh( np.empty((0, 3), dtype=np.float64), np.array([], dtype=np.int32), np.array([], dtype=np.int32), np.array([], dtype=np.int32), ) @property def x(self): return np.array(self.vertices[:, 0]) @property def y(self): return np.array(self.vertices[:, 1]) @property def z(self): return np.array(self.vertices[:, 2]) def append(self, other): """Add more cells to the mesh.""" connectOffset = self.vertices.shape[0] offsetOffset = self.offsets[-1] if self.offsets.size > 0 else 0 self.vertices = np.vstack((self.vertices, other.vertices)) self.connectivity = np.append(self.connectivity, other.connectivity + connectOffset) self.offsets = np.append(self.offsets, other.offsets + offsetOffset) self.cellTypes = np.append(self.cellTypes, other.cellTypes) def write(self, path, data) -> str: """ Write this mesh and the passed data to a VTK file. Returns the base path, plus relevant extension. """ fullPath = unstructuredGridToVTK( path, self.x, self.y, self.z, connectivity=self.connectivity, offsets=self.offsets, cell_types=self.cellTypes, cellData=data, ) return fullPath def createReactorBlockMesh(r: reactors.Reactor) -> VtkMesh: mesh = VtkMesh.empty() blks = r.getChildren(deep=True, predicate=lambda o: isinstance(o, blocks.Block)) for b in blks: mesh.append(createBlockMesh(b)) return mesh def createReactorAssemMesh(r: reactors.Reactor) -> VtkMesh: mesh = VtkMesh.empty() assems = r.getChildren(deep=True, predicate=lambda o: isinstance(o, assemblies.Assembly)) for a in assems: mesh.append(createAssemMesh(a)) return mesh def createBlockMesh(b: blocks.Block) -> VtkMesh: if isinstance(b, blocks.HexBlock): return _createHexBlockMesh(b) if isinstance(b, blocks.CartesianBlock): return _createCartesianBlockMesh(b) if isinstance(b, blocks.ThRZBlock): return _createTRZBlockMesh(b) else: raise TypeError( "Unsupported block type `{}`. Supported types are: {}".format( type(b).__name__, {t.__name__ for t in {blocks.CartesianBlock, blocks.HexBlock, blocks.ThRZBlock}}, ) ) def createAssemMesh(a: assemblies.Assembly) -> VtkMesh: # Kind of hacky, but since all blocks in an assembly are the same type, let's just # use the block mesh functions and change their z coordinates to match the size of # the whole assem 🤯 mesh = createBlockMesh(a[0]) # we should only have a single VTK mesh primitive per block assert len(mesh.cellTypes) == 1 zMin = a.spatialGrid._bounds[2][0] zMax = a.spatialGrid._bounds[2][-1] if mesh.cellTypes[0] == VtkHexahedron: mesh.vertices[0:4, 2] = zMin mesh.vertices[4:8, 2] = zMax elif mesh.cellTypes[0] == _HEX_PRISM_TID: mesh.vertices[0:6, 2] = zMin mesh.vertices[6:12, 2] = zMax elif mesh.cellTypes[0] == VtkQuadraticHexahedron.tid: # again, quadratic hexahedra are a pain mesh.vertices[0:4, 2] = zMin mesh.vertices[8:12, 2] = zMin mesh.vertices[4:8, 2] = zMax mesh.vertices[12:16, 2] = zMax return mesh def _createHexBlockMesh(b: blocks.HexBlock) -> VtkMesh: assert b.spatialLocator is not None zMin = b.p.zbottom zMax = b.p.ztop gridOffset = b.spatialLocator.getGlobalCoordinates()[:2] gridOffset = np.tile(gridOffset, (6, 1)) pitch = b.getPitch() hexVerts2d = np.array(hexagon.corners(rotation=0)) * pitch hexVerts2d += gridOffset # we need a top and bottom hex hexVerts2d = np.vstack((hexVerts2d, hexVerts2d)) # fold in z locations to get 3d coordinates hexVerts = np.hstack((hexVerts2d, np.array([[zMin] * 6 + [zMax] * 6]).transpose())) return VtkMesh( hexVerts, np.array(list(range(12))), np.array([12]), np.array([_HEX_PRISM_TID]), ) def _createCartesianBlockMesh(b: blocks.CartesianBlock) -> VtkMesh: assert b.spatialLocator is not None zMin = b.p.zbottom zMax = b.p.ztop gridOffset = b.spatialLocator.getGlobalCoordinates()[:2] gridOffset = np.tile(gridOffset, (4, 1)) pitch = b.getPitch() halfPitchX = pitch[0] * 0.5 halfPitchY = pitch[0] * 0.5 rectVerts = np.array( [ [halfPitchX, halfPitchY], [-halfPitchX, halfPitchY], [-halfPitchX, -halfPitchY], [halfPitchX, -halfPitchY], ] ) rectVerts += gridOffset # make top/bottom rectangles boxVerts = np.vstack((rectVerts, rectVerts)) # fold in z coordinates boxVerts = np.hstack((boxVerts, np.array([[zMin] * 4 + [zMax] * 4]).transpose())) return VtkMesh( boxVerts, np.array(list(range(8))), np.array([8]), np.array([VtkHexahedron.tid]), ) def _createTRZBlockMesh(b: blocks.ThRZBlock) -> VtkMesh: # This could be improved. rIn = b.radialInner() rOut = b.radialOuter() thIn = b.thetaInner() thOut = b.thetaOuter() zIn = b.p.zbottom zOut = b.p.ztop vertsRTZ = [ (rIn, thOut, zIn), (rIn, thIn, zIn), (rOut, thIn, zIn), (rOut, thOut, zIn), (rIn, thOut, zOut), (rIn, thIn, zOut), (rOut, thIn, zOut), (rOut, thOut, zOut), (rIn, (thIn + thOut) * 0.5, zIn), ((rIn + rOut) * 0.5, thIn, zIn), (rOut, (thIn + thOut) * 0.5, zIn), ((rIn + rOut) * 0.5, thOut, zIn), (rIn, (thIn + thOut) * 0.5, zOut), ((rIn + rOut) * 0.5, thIn, zOut), (rOut, (thIn + thOut) * 0.5, zOut), ((rIn + rOut) * 0.5, thOut, zOut), (rIn, thOut, (zIn + zOut) * 0.5), (rIn, thIn, (zIn + zOut) * 0.5), (rOut, thIn, (zIn + zOut) * 0.5), (rOut, thOut, (zIn + zOut) * 0.5), ] vertsXYZ = np.array([[r * math.cos(th), r * math.sin(th), z] for r, th, z in vertsRTZ]) return VtkMesh( vertsXYZ, np.array(list(range(20))), np.array([20]), np.array([VtkQuadraticHexahedron.tid]), ) ================================================ FILE: armi/bookkeeping/visualization/vtk.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Visualization implementation for VTK files. Limitations ----------- This version of the VTK file writer comes with a number of limitations and/or aspects that can be improved upon. For instance: * Only the Block and Assembly meshes and related parameters are exported to the VTK file. Adding Core data is totally doable, and will be the product of future work. With more considerable effort, arbitrary components may be visualizable! * No efforts are made to de-duplicate the vertices in the mesh, so there are more vertices than needed. Some fancy canned algorithms probably exist to do this, and it wouldn't be too difficult to do here either. Also future work, but probably not super important unless dealing with really big meshes. """ from typing import Any, Dict, List, Optional, Set, Tuple import numpy as np from pyevtk.vtk import VtkGroup from armi import runLog from armi.bookkeeping.db import database from armi.bookkeeping.visualization import dumper, utils from armi.reactor import assemblies, blocks, composites, parameters, reactors class VtkDumper(dumper.VisFileDumper): """ Dumper for VTK data. This handles writing unstructured meshes and associated Block parameter data to VTK files. The context manager keeps track of how many files have been written (one per time node), and creates a group/collection file when finished. """ def __init__(self, baseName: str, inputName: str): self._baseName = baseName self._assemFiles: List[Tuple[str, float]] = [] self._blockFiles: List[Tuple[str, float]] = [] def dumpState( self, r: reactors.Reactor, includeParams: Optional[Set[str]] = None, excludeParams: Optional[Set[str]] = None, ): """ Dump a reactor to a VTK file. Parameters ---------- r : reactors.Reactor The reactor state to visualize includeParams : list of str, optional A list of parameter names to include in the viz file. Defaults to all params. excludeParams : list of str, optional A list of parameter names to exclude from the output. Defaults to no params. """ cycle = r.p.cycle timeNode = r.p.timeNode # you never know... assert cycle < 1000 assert timeNode < 1000 # We avoid using cXnY, since VisIt doesn't support .pvd files, but *does* know # to lump data with similar file names and integers at the end. blockPath = "{}_blk_{:0>3}{:0>3}".format(self._baseName, cycle, timeNode) assemPath = "{}_asy_{:0>3}{:0>3}".format(self._baseName, cycle, timeNode) # include and exclude params are mutually exclusive if includeParams is not None and excludeParams is not None: raise ValueError("includeParams and excludeParams can not both be used at the same time") blks = r.getChildren(deep=True, predicate=lambda o: isinstance(o, blocks.Block)) assems = r.getChildren(deep=True, predicate=lambda o: isinstance(o, assemblies.Assembly)) blockMesh = utils.createReactorBlockMesh(r) assemMesh = utils.createReactorAssemMesh(r) # collect param data blockData = _collectObjectData(blks, includeParams, excludeParams) assemData = _collectObjectData(assems, includeParams, excludeParams) # block number densities are special, since they aren't stored as params blockNdens = database.collectBlockNumberDensities(blks) # we need to copy the number density vectors to guarantee unit stride, which # pyevtk requires. Kinda seems like something it could do for us, but oh well. blockNdens = {key: np.array(value) for key, value in blockNdens.items()} blockData.update(blockNdens) fullPath = blockMesh.write(blockPath, blockData) self._blockFiles.append((fullPath, r.p.time)) fullPath = assemMesh.write(assemPath, assemData) self._assemFiles.append((fullPath, r.p.time)) def __enter__(self): self._assemFiles = [] self._blockFiles = [] def __exit__(self, type, value, traceback): assert len(self._assemFiles) == len(self._blockFiles) if len(self._assemFiles) > 1: # multiple files need to be wrapped up into groups. VTK does not like having # multiple meshes in the same group, so we write out separate Collection # files for them asyGroup = VtkGroup(f"{self._baseName}_asm") for path, time in self._assemFiles: asyGroup.addFile(filepath=path, sim_time=time) asyGroup.save() blockGroup = VtkGroup(f"{self._baseName}_blk") for path, time in self._blockFiles: blockGroup.addFile(filepath=path, sim_time=time) blockGroup.save() def _collectObjectData( objs: List[composites.ArmiObject], includeParams: Optional[Set[str]] = None, excludeParams: Optional[Set[str]] = None, ) -> Dict[str, Any]: allData = dict() for pDef in type(objs[0]).pDefs.toWriteToDB(parameters.SINCE_ANYTHING): if includeParams is not None and pDef.name not in includeParams: continue if excludeParams is not None and pDef.name in excludeParams: continue data = [] for obj in objs: val = obj.p[pDef.name] data.append(val) data = np.array(data) if data.dtype.kind == "S" or data.dtype.kind == "U": # no string support! continue if data.dtype.kind == "O": # datatype is "object", usually because it's jagged, or has Nones. We are # willing to try handling the Nones, but jagged also isn't visualizable. nones = np.where([d is None for d in data])[0] if len(nones) == data.shape[0]: # all Nones, so give up continue if len(nones) == 0: # looks like Nones had nothing to do with it. bail continue try: data = database.replaceNonesWithNonsense(data, pDef.name, nones=nones) except (ValueError, TypeError): # Looks like we have some weird data. We might be able to handle it # with more massaging, but probably not visualizable anyhow continue if data.dtype.kind == "O": # Didn't work runLog.warning( "The parameter data for `{}` could not be coerced into a native type for output; skipping.".format( pDef.name ) ) continue if len(data.shape) != 1: # We aren't interested in vector data on each block continue allData[pDef.name] = data return allData ================================================ FILE: armi/bookkeeping/visualization/xdmf.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Support for dumping XDMF files. `XDMF <http://www.xdmf.org/index.php/Main_Page>`_ is a data interchange format that allows for separate representation of the data itself and a description of how those data are to be interpreted. The data description ("light" data) lives in an XML file, while the actual data (in our case, data to be plotted), as well as the data describing the mesh ("hard" data) can be stored in HDF5 files, binary files, or embedded directly into the XML file. In most cases, this allows for visualizing data directly out of an ARMI database file. Using the ``XdmfDumper`` will produce an XML file (with an ``.xdmf`` extension) containing the description of data, as well as an HDF5 file containing the mesh. Together with the input database, the ``.xdmf`` file can be opened in a visualization tool that supports XDMF. .. note:: Paraview seems to have rather good support for XDMF, while VisIt does not. The main issue seems to be that VisIt does not properly render the general polyhedra that XDMF supports. Unfortunately, we __need__ to use this to show hexagonal geometries, since it's the only way to get a hexagonal prism without splitting up the mesh into wedges. To do that would require splitting the parameter data, which would defeat the main benefit of using XMDF in the first place (to be able to plot out of the original Database file). Cartesian and R-X-Theta geometries in VisIt seem to work fine. """ import io import math import pathlib import xml.dom.minidom import xml.etree.ElementTree as ET from typing import Dict, List, Optional, Set, Tuple import h5py import numpy as np from armi import runLog from armi.bookkeeping.db import database from armi.bookkeeping.visualization import dumper, utils from armi.reactor import assemblies, blocks, composites, reactors _VTK_TO_XDMF_CELLS = {16: 16} _POLYHEDRON = 16 _HEXAHEDRON = 9 _QUADRATIC_HEXAHEDRON = 48 # The topology of a hexagonal prism, represented as a general polyhedron. To get this in # proper XDMF, these need to be offset to the proper vertex indices in the full mesh, # and have the number of face vertices inserted into the proper locations (notice the # [0] placeholders). _HEX_PRISM_TOPO = np.array( [0] + list(range(6)) + [0] + list(range(6, 12)) + [0] + [0, 1, 7, 6] + [0] + [1, 2, 8, 7] + [0] + [2, 3, 9, 8] + [0] + [3, 4, 10, 9] + [0] + [4, 5, 11, 10] + [0] + [5, 0, 6, 11] ) # The indices of the placeholder zeros from _HEX_PRISM_TOPO array above _HEX_PRISM_FACE_SIZE_IDX = np.array([0, 7, 14, 19, 24, 29, 34, 39]) # The number of vertices for each face _HEX_PRISM_FACE_SIZES = np.array([6, 6, 4, 4, 4, 4, 4, 4]) def _getAttributesFromDataset(d: h5py.Dataset) -> Dict[str, str]: dataType = { np.dtype("int32"): "Int", np.dtype("int64"): "Int", np.dtype("float32"): "Float", np.dtype("float64"): "Float", }[d.dtype] precision = { np.dtype("int32"): "4", np.dtype("int64"): "8", np.dtype("float32"): "4", np.dtype("float64"): "8", }[d.dtype] return { "Dimensions": " ".join(str(i) for i in d.shape), "DataType": dataType, "Precision": precision, "Format": "HDF", } class XdmfDumper(dumper.VisFileDumper): """ VisFileDumper implementation for XDMF format. The general strategy of this dumper is to create a new HDF5 file that contains just the necessary mesh information for each dumped time step. The XML that describes/points to these data is stored internally as ``ElementTree`` objects until the end. When all time steps have been processed, these elements have time information added to them, and are collected into a "TemporalCollection" Grid and written to an ``.xdmf`` file. """ def __init__(self, baseName: str, inputName: Optional[str] = None): self._baseName = baseName if inputName is None: runLog.warning("No input database name was given, so only an XMDF mesh will be created") self._inputName = inputName # Check that the inputName is a relative path. XDMF doesn't seem to like # absolute paths; at least on windows with ParaView if pathlib.Path(inputName).is_absolute(): raise ValueError( "XDMF tools tend not to like absolute paths; provide a relative path to the input database." ) self._meshH5 = None self._inputDb = None self._times = [] self._blockGrids = [] self._assemGrids = [] def __enter__(self): """ Prepare to write states. The dumper keeps track of ``<Grid>`` tags that need to be written into a Collection at the end. This also opens an auxiliary HDF5 file for writing meshes at each time step. """ self._meshH5 = h5py.File(self._baseName + "_mesh.h5", "w") if self._inputName is None: # we could handle the case where the database wasn't passed by pumping state # into a new h5 file, but why? raise ValueError("Input database needed to generate XDMF output!") self._inputDb = database.Database(self._inputName, "r") with self._inputDb as db: dbVersion = db.version if math.floor(float(dbVersion)) != 3: raise ValueError("XDMF output requires Database version 3. Got version `{}`".format(dbVersion)) self._times = [] self._blockGrids = [] self._assemGrids = [] def __exit__(self, type, value, traceback): """ Finalize file writing. This writes all of the ``<Grid>`` tags into a Collection for all time steps, and closes the input database and mesh-bearing HDF5 file. """ self._meshH5.close() self._meshH5 = None if self._inputDb is not None: self._inputDb.close() self._inputDb = None timeCollectionBlk = ET.Element("Grid", attrib={"GridType": "Collection", "CollectionType": "Temporal"}) timeCollectionAsm = ET.Element("Grid", attrib={"GridType": "Collection", "CollectionType": "Temporal"}) # make sure all times are unique. Paraview will crash if they are not times = self._dedupTimes(self._times) for aGrid, bGrid, time in zip(self._assemGrids, self._blockGrids, times): timeElement = ET.Element("Time", attrib={"TimeType": "Single", "Value": str(time)}) bGrid.append(timeElement) timeCollectionBlk.append(bGrid) aGrid.append(timeElement) timeCollectionAsm.append(aGrid) for collection, typ in [ (timeCollectionBlk, "_blk"), (timeCollectionAsm, "_asm"), ]: xdmf = ET.Element("Xdmf", attrib={"Version": "3.0"}) domain = ET.Element("Domain", attrib={"Name": "Reactor"}) domain.append(collection) xdmf.append(domain) # Write to an internal buffer so that we can print more fancy below tree = ET.ElementTree(element=xdmf) buf = io.StringIO() tree.write(buf, encoding="unicode") buf.seek(0) # Round-trip through minidom to do the pretty print dom = xml.dom.minidom.parse(buf) with open(self._baseName + typ + ".xdmf", "w") as f: f.write(dom.toprettyxml()) @staticmethod def _dedupTimes(times: List[float]) -> List[float]: """ Make sure that no two times are the same. Duplicates will be resolved by bumping each subsequent duplicate time forward by some epsilon, cascading following duplicates by the same amount until no duplicates remain. This will fail in the case where there are already times that are within Ndup*epsilon of each other. In such cases, this function probably isn't valid anyways. """ assert all(a <= b for a, b in zip(times, times[1:])), "Input list must be sorted" # This should be used as a multiplicative epsilon, to avoid precision issues # with large times _EPS = 1.0e-9 # ...except when close enough to 0. Floating-point is a pain mapZeroToOne = lambda x: x if x > _EPS else 1.0 dups = [0] * len(times) # We iterate in reverse so that each entry in dups will contain the number of # duplicate entries that **precede** it for i in reversed(range(len(times))): ti = times[i] nDup = 0 for j in range(i - 1, -1, -1): if times[j] == ti: nDup += 1 else: break dups[i] = nDup return [t + dups * _EPS * mapZeroToOne(t) for dups, t in zip(dups, times)] def dumpState( self, r: reactors.Reactor, includeParams: Optional[Set[str]] = None, excludeParams: Optional[Set[str]] = None, ): """Produce a ``<Grid>`` for a single timestep, as well as supporting HDF5 datasets.""" cycle = r.p.cycle node = r.p.timeNode timeGroupName = database.getH5GroupName(cycle, node) # careful here! we are trying to use the database datasets as the source of hard # data without copying, so the order that we make the mesh needs to be the same # order as the data in the database. There is no guarantee that the way a loaded # reactor is ordered is the same way that it was ordered in the database (though # perhaps we should do some work to specify that better). We need to look at the # layout in the input database to re-order the objects. with self._inputDb as db: layout = db.getLayout(cycle, node) snToIdx = {sn: i for i, sn in zip(layout.indexInData, layout.serialNum)} blks = r.getChildren(deep=True, predicate=lambda o: isinstance(o, blocks.Block)) blks = sorted(blks, key=lambda b: snToIdx[b.p.serialNum]) assems = r.getChildren(deep=True, predicate=lambda o: isinstance(o, assemblies.Assembly)) assems = sorted(assems, key=lambda a: snToIdx[a.p.serialNum]) blockGrid = self._makeBlockMesh(r, snToIdx) self._collectObjectData(blks, timeGroupName, blockGrid) assemGrid = self._makeAssemblyMesh(r, snToIdx) self._collectObjectData(assems, timeGroupName, assemGrid) self._blockGrids.append(blockGrid) self._assemGrids.append(assemGrid) self._times.append(r.p.time) def _collectObjectData(self, objs: List[composites.ArmiObject], timeGroupName, node: ET.Element): """ Scan for things that look plottable in the input database. "Plottable" things are anything that have int or float data, and the same number of elements as there are objects. .. warning:: This makes some assumptions as to the structure of the database. """ if self._inputDb is None: # If we weren't given a database to draw data from, we will just skip this # for now. Most of the time, a dumper should have an input database. # Otherwise, this **could** extract from the reactor state. return typeNames = {type(o).__name__ for o in objs} if len(typeNames) != 1: raise ValueError("Currently only supporting homogeneous block types") typeName = next(iter(typeNames)) dataGroupName = "/".join((timeGroupName, typeName)) with self._inputDb as db: for key, val in db.h5db[dataGroupName].items(): if val.shape != (len(objs),): continue try: dataItem = ET.Element("DataItem", attrib=_getAttributesFromDataset(val)) except KeyError: continue dataItem.text = ":".join((db.fileName, val.name)) attrib = ET.Element( "Attribute", attrib={"Name": key, "Center": "Cell", "AttributeType": "Scalar"}, ) attrib.append(dataItem) node.append(attrib) def _makeBlockMesh(self, r: reactors.Reactor, indexMap) -> ET.Element: cycle = r.p.cycle node = r.p.timeNode blks = r.getChildren(deep=True, predicate=lambda o: isinstance(o, blocks.Block)) blks = sorted(blks, key=lambda b: indexMap[b.p.serialNum]) groupName = "c{}n{}".format(cycle, node) # VTK stuff turns out to be pretty flexible blockMesh = utils.VtkMesh.empty() for b in blks: blockMesh.append(utils.createBlockMesh(b)) verts = blockMesh.vertices verticesInH5 = groupName + "/blk_vertices" self._meshH5[verticesInH5] = verts topoValues = np.array([], dtype=np.int32) offset = 0 for b in blks: nVerts, cellTopo = _getTopologyFromShape(b, offset) topoValues = np.append(topoValues, cellTopo) offset += nVerts topoInH5 = groupName + "/blk_topology" self._meshH5[topoInH5] = topoValues return self._makeGenericMesh("Blocks", len(blks), self._meshH5[verticesInH5], self._meshH5[topoInH5]) def _makeAssemblyMesh(self, r: reactors.Reactor, indexMap) -> ET.Element: cycle = r.p.cycle node = r.p.timeNode asys = r.getChildren(deep=True, predicate=lambda o: isinstance(o, assemblies.Assembly)) asys = sorted(asys, key=lambda b: indexMap[b.p.serialNum]) groupName = "c{}n{}".format(cycle, node) # VTK stuff turns out to be pretty flexible assemMesh = utils.VtkMesh.empty() for assem in asys: assemMesh.append(utils.createAssemMesh(assem)) verts = assemMesh.vertices verticesInH5 = groupName + "/asy_vertices" self._meshH5[verticesInH5] = verts topoValues = np.array([], dtype=np.int32) offset = 0 for a in asys: nVerts, cellTopo = _getTopologyFromShape(a[0], offset) topoValues = np.append(topoValues, cellTopo) offset += nVerts topoInH5 = groupName + "/asy_topology" self._meshH5[topoInH5] = topoValues return self._makeGenericMesh("Assemblies", len(asys), self._meshH5[verticesInH5], self._meshH5[topoInH5]) @staticmethod def _makeGenericMesh(name: str, nCells: int, vertexData: h5py.Dataset, topologyData: h5py.Dataset) -> ET.Element: grid = ET.Element("Grid", attrib={"GridType": "Uniform", "Name": name}) geometry = ET.Element("Geometry", attrib={"GeometryType": "XYZ"}) geomData = ET.Element( "DataItem", attrib={ "Dimensions": "{} {}".format(*vertexData.shape), "NumberType": "Float", "Format": "HDF", }, ) geomData.text = ":".join((vertexData.file.filename, vertexData.name)) geometry.append(geomData) topology = ET.Element( "Topology", attrib={"TopologyType": "Mixed", "NumberOfElements": str(nCells)}, ) topoData = ET.Element( "DataItem", attrib={ "Dimensions": "{}".format(topologyData.size), "NumberType": "Int", "Format": "HDF", }, ) topoData.text = ":".join((topologyData.file.filename, topologyData.name)) topology.append(topoData) grid.append(geometry) grid.append(topology) return grid def _getTopologyFromShape(b: blocks.Block, offset: int) -> Tuple[int, List[int]]: """ Returns the number of vertices used to make the shape, and XDMF topology values. The size of the XDMF topology values cannot be used directly in computing the next offset because it sometimes contains vertex indices __and__ sizing information. """ if isinstance(b, blocks.HexBlock): # polyhedron, 8 faces prefix = [_POLYHEDRON, 8] topo = _HEX_PRISM_TOPO + offset topo[_HEX_PRISM_FACE_SIZE_IDX] = _HEX_PRISM_FACE_SIZES topo = np.append(prefix, topo) return 12, topo if isinstance(b, blocks.CartesianBlock): return ( 8, [ _HEXAHEDRON, ] + list(range(offset, offset + 8)), ) if isinstance(b, blocks.ThRZBlock): return 20, [_QUADRATIC_HEXAHEDRON] + list(range(offset, offset + 20)) else: raise TypeError("Unsupported block type `{}`".format(type(b))) ================================================ FILE: armi/cases/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Case and CaseSuite objects for running and analyzing ARMI cases. A ``Case`` is a collection of inputs that represents one particular run. Cases have special knowledge about dependencies and can perform useful operations like compare, clone, and run. A ``CaseSuite`` is a set of (often related) Cases. These are fundamental to parameter sweeps and test suites. See Also -------- armi.cli : Entry points that build Cases and/or CaseSuites and send them off to do work armi.operators : Operations that ARMI will perform on a reactor model. Generally these are made by an individual Case. Examples -------- Create a Case and run it:: case = Case(settings.Settings("path-to-settings.yaml")) case.run() # do something with output database Create a case suite from existing files, and run the suite:: cs = settings.Settings() # default settings suite = CaseSuite(settings.Settings()) # default settings suite.discover("my-cases*.yaml", recursive=True) suite.run() .. warning:: Suite running may not work yet if the cases have interdependencies. Create a ``burnStep`` sensitivity study from some base CS:: baseCase = Case(settings.Settings("base-settings.yaml")) # default settings suite = CaseSuite(baseCase.cs) # basically just sets armiLocation for numSteps in range(3, 11): with ForcedCreationDirectoryChanger("{}steps".format(numSteps)): case = baseCase.clone(title=baseCase.title + f"-with{numSteps}steps", settings={"burnSteps": numSteps}) suite.add(case) suite.writeInputs() Then submit the inputs to your HPC cluster. """ from armi.cases.case import Case # noqa: F401 from armi.cases.suite import CaseSuite # noqa: F401 ================================================ FILE: armi/cases/case.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The ``Case`` object is responsible for running, and executing a set of user inputs. Many entry points redirect into ``Case`` methods, such as ``clone``, ``compare``, and ``run``. The ``Case`` object provides an abstraction around ARMI inputs to allow for manipulation and collection of cases. See Also -------- armi.cases.suite : A collection of Cases """ import ast import cProfile import glob import io import os import pathlib import pstats import re import sys import textwrap import time import trace from typing import Dict, Optional, Sequence, Set, Union import coverage from armi import context, getPluginManager, interfaces, operators, runLog, settings from armi.bookkeeping.db import compareDatabases from armi.nucDirectory import nuclideBases from armi.physics.neutronics.settings import CONF_LOADING_FILE from armi.reactor import blueprints, reactors from armi.utils import pathTools, tabulate, textProcessors from armi.utils.customExceptions import NonexistentSetting from armi.utils.directoryChangers import ( DirectoryChanger, ForcedCreationDirectoryChanger, ) # Change from default .coverage to help with Windows dotfile issues. # Must correspond with data_file entry in `pyproject.toml`! COVERAGE_RESULTS_FILE = "coverage_results.cov" class Case: """ An ARMI Case that can be used for suite set up and post-analysis. A Case is capable of loading inputs, checking that they are valid, and initializing a reactor model. Cases can also compare against other cases and be collected into multiple :py:class:`armi.cases.suite.CaseSuite`. """ def __init__(self, cs, caseSuite=None, bp=None): """ Initialize a Case from user input. Parameters ---------- cs : Settings Settings for this Case caseSuite : CaseSuite, optional CaseSuite this particular case belongs. Passing this in allows dependency tracking across the other cases (e.g. if one case uses the output of another as input, as happens in in-use testing for reactivity coefficient snapshot testing or more complex analysis sequences). bp : Blueprints, optional :py:class:`armi.reactor.blueprints.Blueprints` object containing the assembly definitions and other information. If not supplied, it will be loaded from the ``cs`` as needed. """ self._startTime = time.time() self._caseSuite = caseSuite self._tasks = [] self._dependencies: Set[Case] = set() self.enabled = True # set the signal if the user passes in a blueprint object, instead of a file if bp is not None: cs.filelessBP = True # NOTE: in order to prevent slow submission times for loading massively large blueprints # (e.g. certain computer-generated input files), self.bp can be None. self.cs = cs self._bp = bp # this is used in parameter sweeps self._independentVariables = {} @property def independentVariables(self): """ Get dictionary of independent variables and their values. This unpacks independent variables from the cs object's independentVariables setting the first time it is run. This is used in parameter sweeps. See Also -------- writeInputs : writes the ``independentVariabls`` setting """ if not self._independentVariables: for indepStr in self.cs["independentVariables"]: indepName, value = ast.literal_eval(indepStr) self._independentVariables[indepName] = value return self._independentVariables def __repr__(self): return "<Case cs: {}>".format(self.cs.path) @property def bp(self): """ Blueprint object for this case. Notes ----- This property allows lazy loading. """ if self._bp is None: self._bp = blueprints.loadFromCs(self.cs, roundTrip=True) return self._bp @bp.setter def bp(self, bp): self._bp = bp @property def dependencies(self): """ Get a list of parent Case objects. Notes ----- This is performed on demand so that if someone changes the underlying Settings, the case will reflect the correct dependencies. As a result, if this is being done iteratively, you may want to cache it somehow (in a dict?). Ideally, this should not be the responsibility of the Case, but rather the suite! """ dependencies = set() if self._caseSuite is not None: pm = getPluginManager() if pm is not None: for pluginDependencies in pm.hook.defineCaseDependencies(case=self, suite=self._caseSuite): dependencies.update(pluginDependencies) # the ([^\/]) capture basically gets the file name portion and excludes any # directory separator dependencies.update( self.getPotentialParentFromSettingValue( self.cs["explicitRepeatShuffles"], r"^(?P<dirName>.*[\/\\])?(?P<title>[^\/\\]+)-SHUFFLES\.txt$", ) ) # ensure that a case doesn't appear to be its own dependency dependencies.update(self._dependencies) dependencies.discard(self) return dependencies def addExplicitDependency(self, case): """ Register an explicit dependency. When evaluating the ``dependency`` property, dynamic dependencies are probed using the current case settings and plugin hooks. Sometimes, it is necessary to impose dependencies that are not expressed through settings and hooks. This method stores another case as an explicit dependency, which will be included with the other, implicitly discovered, dependencies. """ if case in self._dependencies: runLog.warning("The case {} is already explicitly specified as a dependency of {}".format(case, self)) self._dependencies.add(case) def getPotentialParentFromSettingValue(self, settingValue, filePattern): """ Get a parent case based on a setting value and a pattern. This is a convenient way for a plugin to express a dependency. It uses the ``match.groupdict`` functionality to pull the directory and case name out of a specific setting value an regular expression. Parameters ---------- settingValue : str A particular setting value that might contain a reference to an input that is produced by a dependency. filePattern : str A regular expression for extracting the location and name of the dependency. If the ``settingValue`` matches the passed pattern, this function will attempt to extract the ``dirName`` and ``title`` groups to find the dependency. """ m = re.match(filePattern, settingValue, re.IGNORECASE) deps = self._getPotentialDependencies(**m.groupdict()) if m else set() if len(deps) > 1: raise KeyError("Found more than one case matching {}".format(settingValue)) return deps def _getPotentialDependencies(self, dirName, title): """Get a parent case based on a directory and case title.""" if dirName is None: dirName = self.directory elif not os.path.isabs(dirName): dirName = os.path.join(self.directory, dirName) def caseMatches(case): if os.path.normcase(case.title) != os.path.normcase(title): return False return os.path.normcase(os.path.abspath(case.directory)) == os.path.normcase(os.path.abspath(dirName)) return {case for case in self._caseSuite if caseMatches(case)} @property def title(self): """The case title.""" return self.cs.caseTitle @title.setter def title(self, name): self.cs.caseTitle = name @property def dbName(self): """The case output database name.""" return os.path.splitext(self.cs.path)[0] + ".h5" @property def directory(self): """The working directory of the case.""" return self.cs.inputDirectory def __eq__(self, that): """ Compares two cases to determine if they are equivalent by looking at the ``title`` and ``directory``. Notes ----- No other attributes except those stated above are used for the comparison; the above stated attributes can be considered the "primary key" for a Case object and identify it as being unique. Both of these comparisons are simple string comparisons, so a reference and an absolute path to the same case would be considered different. """ return self.title == that.title and self.directory == that.directory def __hash__(self): """Computes the hash of a Case object. This is required when __eq__ is been defined. Take the hash of the tuple of the "primary key". """ return hash((self.title, self.directory)) def setUpTaskDependence(self): """ Set the task dependence based on the :code:`dependencies`. This accounts for whether or not the dependency is enabled. """ if not self.enabled: return for dependency in self.dependencies: if dependency.enabled: self._tasks[0].add_parent(dependency._tasks[-1]) def run(self): """ Run an ARMI case. .. impl:: The case class allows for a generic ARMI simulation. :id: I_ARMI_CASE :implements: R_ARMI_CASE This method is responsible for "running" the ARMI simulation instigated by the inputted settings. This initializes an :py:class:`~armi.operators.operator.Operator`, a :py:class:`~armi.reactor.reactors.Reactor` and invokes :py:meth:`Operator.operate <armi.operators.operator.Operator.operate>`. It also activates supervisory things like code coverage checking, profiling, or tracing, if requested by users during debugging. Notes ----- Room for improvement: The coverage, profiling, etc. stuff can probably be moved out of here to a more elegant place (like a context manager?). """ # Start the log here so that the verbosities for the head and workers can be configured # based on the user settings for the rest of the run. runLog.LOG.startLog(self.cs.caseTitle) if context.MPI_RANK == 0: runLog.setVerbosity(self.cs["verbosity"]) else: runLog.setVerbosity(self.cs["branchVerbosity"]) # if in the settings, start the coverage and profiling cov = self._startCoverage() profiler = self._startProfiling() self.checkInputs() o = self.initializeOperator() with o: if self.cs["trace"] and context.MPI_RANK == 0: # only trace primary node. tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix], trace=1) tracer.runctx("o.operate()", globals(), locals()) else: o.operate() # if in the settings, report the coverage and profiling Case._endCoverage(self.cs["coverageConfigFile"], cov) Case._endProfiling(profiler) def _startCoverage(self): """Helper to the Case.run: spin up the code coverage tooling, if the Settings file says to. Returns ------- coverage.Coverage Coverage object for pytest or unittest """ cov = None if self.cs["coverage"]: cov = coverage.Coverage( config_file=Case._getCoverageRcFile(userCovFile=self.cs["coverageConfigFile"], makeCopy=True), debug=["dataio"], ) if context.MPI_SIZE > 1: # interestingly, you cannot set the parallel flag in the constructor without # auto-specifying the data suffix. This should enable parallel coverage with # auto-generated data file suffixes and combinations. cov.config.parallel = True cov.start() return cov @staticmethod def _endCoverage(userCovFile, cov=None): """Helper to the Case.run(): stop and report code coverage, if the Settings file says to. Parameters ---------- userCovFile : str File path to user-supplied coverage configuration file (default setting is empty string) cov: coverage.Coverage (optional) Hopefully, a valid and non-empty set of coverage data. """ if cov is None: return cov.stop() cov.save() if context.MPI_SIZE > 1: context.MPI_COMM.barrier() # force waiting for everyone to finish if context.MPI_RANK == 0 and context.MPI_SIZE > 1: # combine all the parallel coverage data files into one and make the XML and HTML # reports for the whole run. combinedCoverage = coverage.Coverage(config_file=Case._getCoverageRcFile(userCovFile), debug=["dataio"]) combinedCoverage.config.parallel = True # combine does delete the files it merges combinedCoverage.combine() combinedCoverage.save() combinedCoverage.html_report() combinedCoverage.xml_report() @staticmethod def _getCoverageRcFile(userCovFile, makeCopy=False): """Helper to provide the coverage configuration file according to the OS. A user-supplied file will take precedence, and is not checked for a dot-filename. Notes ----- ARMI replaced the ".coveragerc" file has been replaced by "pyproject.toml". Parameters ---------- userCovFile : str File path to user-supplied coverage configuration file (default setting is empty string) makeCopy : bool (optional) Whether or not to copy the coverage config file to an alternate file path Returns ------- covFile : str path of pyprojec.toml file """ # User-defined file takes precedence. if userCovFile: return os.path.abspath(userCovFile) covRcDir = os.path.abspath(context.PROJECT_ROOT) return os.path.join(covRcDir, "pyproject.toml") def _startProfiling(self): """Helper to the Case.run(): start the Python profiling, if the Settings file says to. Returns ------- cProfile.Profile Standard Python profiling object """ profiler = None if self.cs["profile"]: profiler = cProfile.Profile() profiler.enable(subcalls=True, builtins=True) return profiler @staticmethod def _endProfiling(profiler=None): """Helper to the Case.run(): stop and report python profiling, if the Settings file says to. Parameters ---------- profiler: cProfile.Profile (optional) Hopefully, a valid and non-empty set of profiling data. """ if profiler is None: return profiler.disable() profiler.dump_stats("profiler.{:0>3}.stats".format(context.MPI_RANK)) statsStream = io.StringIO() summary = pstats.Stats(profiler, stream=statsStream).sort_stats("cumulative") summary.print_stats() if context.MPI_SIZE > 0 and context.MPI_COMM is not None: allStats = context.MPI_COMM.gather(statsStream.getvalue(), root=0) if context.MPI_RANK == 0: for rank, statsString in enumerate(allStats): # using print statements because the logger has been turned off print("=" * 100) print("{:^100}".format(" Profiler statistics for RANK={} ".format(rank))) print(statsString) print("=" * 100) else: print(statsStream.getvalue()) def initializeOperator(self, r=None): """Creates and returns an Operator.""" with DirectoryChanger(self.cs.inputDirectory, dumpOnException=False): self._initBurnChain() o = operators.factory(self.cs) if r is None: r = reactors.factory(self.cs, self.bp) o.initializeInterfaces(r) # Set this here to make sure the full duration of initialization is properly captured. # Cannot be done in reactors since the above self.bp call implicitly initializes blueprints. r.core.timeOfStart = self._startTime return o def _initBurnChain(self): """ Apply the burn chain setting to the nucDir. Notes ----- This is admittedly an odd place for this but the burn chain info must be applied sometime after user-input has been loaded (for custom burn chains) but not long after (because nucDir is framework-level and expected to be up-to-date by lots of modules). """ if not self.cs["initializeBurnChain"]: runLog.info("Skipping burn-chain initialization since `initializeBurnChain` setting is disabled.") return if not os.path.exists(self.cs["burnChainFileName"]): raise ValueError( f"The burn-chain file {self.cs['burnChainFileName']} does not exist. The " "data cannot be loaded. Fix this path or disable burn-chain initialization using " "the `initializeBurnChain` setting." ) with open(self.cs["burnChainFileName"]) as burnChainStream: nuclideBases.imposeBurnChain(burnChainStream) def checkInputs(self): """ Checks ARMI inputs for consistency. .. impl:: Perform validity checks on case inputs. :id: I_ARMI_CASE_CHECK :implements: R_ARMI_CASE_CHECK This method checks the validity of the current settings. It relies on an :py:class:`~armi.settings.settingsValidation.Inspector` object from the :py:class:`~armi.operators.operator.Operator` to generate a list of :py:class:`~armi.settings.settingsValidation.Query` objects that represent potential issues in the settings. After gathering the queries, this method prints a table of query "statements" and "questions" to the console. If running in an interactive mode, the user then has the opportunity to address the questions posed by the queries by either addressing the potential issue or ignoring it. Returns ------- bool True if the inputs are all good, False otherwise """ runLog.header("=========== Settings Validation Checks ===========") with DirectoryChanger(self.cs.inputDirectory, dumpOnException=False): operatorClass = operators.getOperatorClassFromSettings(self.cs) inspector = operatorClass.inspector(self.cs) inspectorIssues = [query for query in inspector.queries if query] # Write out the settings validation issues that will be prompted for resolution if in an # interactive session or forced to be resolved otherwise. queryData = [] for i, query in enumerate(inspectorIssues, start=1): queryData.append( ( i, textwrap.fill(query.statement, width=50, break_long_words=False), textwrap.fill(query.question, width=50, break_long_words=False), ) ) if queryData and context.MPI_RANK == 0: runLog.info( tabulate.tabulate( queryData, headers=["Number", "Statement", "Question"], tableFmt="armi", ) ) if context.CURRENT_MODE == context.Mode.INTERACTIVE: # if interactive, ask user to deal with settings issues inspector.run() return not any(inspectorIssues) def clone( self, additionalFiles=None, title=None, modifiedSettings=None, writeStyle="short", ): """ Clone existing ARMI inputs to current directory with optional settings modifications. Since each case depends on multiple inputs, this is a safer way to move cases around without having to wonder if you copied all the files appropriately. Parameters ---------- additionalFiles : list (optional) additional file paths to copy to cloned case title : str (optional) title of new case modifiedSettings : dict (optional) settings to set/modify before creating the cloned case writeStyle : str (optional) Writing style for which settings get written back to the settings files (short, medium, or full). Raises ------ RuntimeError If the source and destination are the same """ cloneCS = self.cs.duplicate() if modifiedSettings is not None: cloneCS = cloneCS.modified(newSettings=modifiedSettings) clone = Case(cloneCS) clone.cs.path = pathTools.armiAbsPath(title or self.title) + ".yaml" if pathTools.armiAbsPath(clone.cs.path) == pathTools.armiAbsPath(self.cs.path): raise RuntimeError( "The source file and destination file are the same: {}\nCannot use armi-clone to " "modify armi settings file.".format(pathTools.armiAbsPath(clone.cs.path)) ) newSettings = copyInterfaceInputs(self.cs, clone.cs.inputDirectory) newCs = clone.cs.modified(newSettings=newSettings) clone.cs = newCs runLog.important(f"writing settings file {clone.cs.path}") clone.cs.writeToYamlFile(clone.cs.path, style=writeStyle, fromFile=self.cs.path) runLog.important(f"finished writing {clone.cs}") fromPath = lambda f: pathTools.armiAbsPath(self.cs.inputDirectory, f) fileName = self.cs[CONF_LOADING_FILE] if fileName: pathTools.copyOrWarn( CONF_LOADING_FILE, fromPath(fileName), os.path.join(clone.cs.inputDirectory, fileName), ) else: runLog.warning(f"skipping {CONF_LOADING_FILE}, there is no file specified") with open(self.cs[CONF_LOADING_FILE], "r") as f: # The root for handling YAML includes is relative to the YAML file, not the # settings file root = pathlib.Path(self.cs.inputDirectory) / pathlib.Path(self.cs[CONF_LOADING_FILE]).parent cloneRoot = pathlib.Path(clone.cs.inputDirectory) / pathlib.Path(clone.cs[CONF_LOADING_FILE]).parent for includePath, mark in textProcessors.findYamlInclusions(f, root=root): if not includePath.is_absolute(): includeSrc = root / includePath includeDest = cloneRoot / includePath else: # don't bother copying absolute files continue if not includeSrc.exists(): raise OSError("The input file file `{}` referenced at {} does not exist.".format(includeSrc, mark)) pathTools.copyOrWarn( "auxiliary input file `{}` referenced at {}".format(includeSrc, mark), includeSrc, includeDest, ) for fileName in additionalFiles or []: pathTools.copyOrWarn("additional file", fromPath(fileName), clone.cs.inputDirectory) return clone def compare( self, that, exclusion: Optional[Sequence[str]] = None, tolerance=0.01, timestepCompare=None, ) -> int: """ Compare the output databases from two run cases. Return number of differences. This is useful both for in-use testing and engineering analysis. """ runLog.info("Comparing the following databases:\nREF: {}\nSRC: {}".format(self.dbName, that.dbName)) diffResults = compareDatabases( self.dbName, that.dbName, tolerance=tolerance, exclusions=exclusion, timestepCompare=timestepCompare, ) code = 1 if diffResults is None else diffResults.nDiffs() sameOrDifferent = "different" if diffResults is None or diffResults.nDiffs() > 0 else "the same" runLog.important("Cases are {}.".format(sameOrDifferent)) return code def writeInputs(self, sourceDir: Optional[str] = None, writeStyle: Optional[str] = "short"): """ Write the inputs to disk. This allows input objects that have been modified in memory (e.g. for a parameter sweep or migration) to be written out as input for a forthcoming case. Parameters ---------- sourceDir : str (optional) The path to copy inputs from (if different from the cs.path). Needed in SuiteBuilder cases to find the baseline inputs from plugins (e.g. shuffleLogic) writeStyle : str (optional) Writing style for which settings get written back to the settings files (short, medium, or full). Notes ----- This will rename the ``loadingFile`` to ``title-blueprints + '.yaml'``. See Also -------- independentVariables parses/reads the independentVariables setting clone Similar to this but doesn't let you write out new/modified blueprints objects """ with ForcedCreationDirectoryChanger(self.cs.inputDirectory, dumpOnException=False): # These seemingly no-ops load the bp via properties if they are not yet initialized. self.bp newSettings = {} newSettings[CONF_LOADING_FILE] = self.title + "-blueprints.yaml" if self.independentVariables: newSettings["independentVariables"] = [ f"({repr(varName)}, {repr(val)})" for varName, val in self.independentVariables.items() ] with open(newSettings[CONF_LOADING_FILE], "w") as loadingFile: blueprints.Blueprints.dump(self.bp, loadingFile) # copy input files from other modules/plugins interfaceSettings = copyInterfaceInputs(self.cs, ".", sourceDir) for settingName, value in interfaceSettings.items(): newSettings[settingName] = value self.cs = self.cs.modified(newSettings=newSettings) if sourceDir: fromPath = os.path.join(sourceDir, self.title + ".yaml") else: fromPath = self.cs.path self.cs.writeToYamlFile(f"{self.title}.yaml", style=writeStyle, fromFile=fromPath) def _copyInputsHelper(fileDescription: str, sourcePath: str, destPath: str, origFile: str) -> str: """ Helper function for copyInterfaceInputs: Creates an absolute file path, and copies the file to that location. If that file path does not exist, returns the file path from the original settings file. Parameters ---------- fileDescription : str A file description for the copyOrWarn method sourcePath : str The absolute file path of the file to copy destPath : str The target directory to copy input files to origFile : str File path as defined in the original settings file Returns ------- destFilePath (or origFile) : str """ sourceName = pathlib.Path(sourcePath).name destFilePath = os.path.join(destPath, sourceName) try: pathTools.copyOrWarn(fileDescription, sourcePath, destFilePath) if pathlib.Path(destFilePath).exists(): # the basename gets written back to the settings file to protect against potential # future dir structure changes return os.path.basename(destFilePath) else: # keep original filepath in the settings file if file copy was unsuccessful return origFile except Exception: return origFile def copyInterfaceInputs(cs, destination: str, sourceDir: Optional[str] = None) -> Dict[str, Union[str, list]]: """ Ping active interfaces to determine which files are considered "input". This enables developers to add new inputs in a plugin-dependent/ modular way. This function should now be able to handle the updating of: - a single file (relative or absolute) - a list of files (relative or absolute) - a file entry that has a wildcard processing into multiple files. Glob is used to offer support for wildcards. - a directory and its contents If the file paths are absolute, do nothing. The case will be able to find the file. In case suites or parameter sweeps, these files often have a sourceDir associated with them that is different from the cs.inputDirectory. So, if relative or wildcard, update the file paths to be absolute in the case settings and copy the file to the destination directory. Parameters ---------- cs : Settings The source case settings to find input files destination : str The target directory to copy input files to sourceDir : str, optional The directory from which to copy files. Defaults to cs.inputDirectory Returns ------- dict A new settings object that contains settings for the keys and values that are either an absolute file path, a list of absolute file paths, or the original file path if absolute paths could not be resolved. Notes ----- Regarding the handling of relative file paths: In the future this could be simplified by adding a concept for a suite root directory, below which it is safe to copy files without needing to update settings that point with a relative path to files that are below it. """ activeInterfaces = interfaces.getActiveInterfaceInfo(cs) sourceDir = sourceDir or cs.inputDirectory sourceDirPath = pathlib.Path(sourceDir) assert pathlib.Path(destination).is_dir() newSettings = {} for klass, _ in activeInterfaces: interfaceFileNames = klass.specifyInputs(cs) for key, files in interfaceFileNames.items(): if not isinstance(key, settings.Setting): try: key = cs.getSetting(key) label = key.name isSetting = True except NonexistentSetting(key): runLog.debug(f"{key} is not a valid setting; continuing on anyway.") label = key isSetting = False else: isSetting = True label = key.name newFiles = [] for f in files: WILDCARD = False EMPTY = False ABSOLUTE = False if "*" in f: WILDCARD = True if not f: # beware: pathlib.path("") returns "." which can be bad news, so we handle empty # strings as their own category EMPTY = True path = pathlib.Path(f) if not EMPTY and path.is_absolute(): ABSOLUTE = True # Attempt to construct an absolute file path srcFullPath = os.path.join(sourceDirPath, f) destFilePath = None if WILDCARD: globFilePaths = [pathlib.Path(os.path.join(sourceDirPath, g)) for g in glob.glob(srcFullPath)] if len(globFilePaths) == 0: destFilePath = f newFiles.append(str(destFilePath)) else: for gFile in globFilePaths: destFilePath = _copyInputsHelper(label, gFile, destination, f) newFiles.append(str(destFilePath)) elif EMPTY: pass elif ABSOLUTE: if path.exists(): # Path is absolute, no settings modification or filecopy needed newFiles.append(path) else: # treat as a relative path destFilePath = _copyInputsHelper(label, srcFullPath, destination, f) newFiles.append(str(destFilePath)) if destFilePath == f: runLog.debug( f"No input files for `{label}` could be resolved with the following path: " f"`{srcFullPath}`. Will not update `{label}`." ) # Some settings are a single filename. Others are lists of files. Make # sure we are returning what the setting expects if isSetting and len(newFiles): if len(files) == 1 and not WILDCARD and key.name in cs and not isinstance(cs[key.name], list): newSettings[label] = newFiles[0] else: newSettings[label] = newFiles return newSettings ================================================ FILE: armi/cases/inputModifiers/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Code that changes input files and writes them back out. Useful for parameter sweeps. See Also -------- armi.reactor.converters Code that changes reactor objects at runtime. These often take longer to run than these but can be used in the middle of ARMI analyses. """ ================================================ FILE: armi/cases/inputModifiers/inputModifiers.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Modifies inputs.""" class InputModifier: """ Object that modifies input definitions in some well-defined way. .. impl:: A generic tool to modify user inputs on multiple cases. :id: I_ARMI_CASE_MOD1 :implements: R_ARMI_CASE_MOD This class serves as an abstract base class for modifying the inputs of a case, typically case settings. Child classes must implement a ``__call__`` method accepting a :py:class:`~armi.settings.caseSettings.Settings` and :py:class:`~armi.reactor.blueprints.Blueprints` and return the appropriately modified version of these objects. The class attribute ``FAIL_IF_AFTER`` should be a tuple defining what, if any, modifications this should fail if performed after. For example, one should not adjust the smear density (a function of Cladding ID) before adjusting the Cladding ID. Some generic child classes are provided in this module, but it is expected that design-specific modifiers are built individually. """ FAIL_IF_AFTER = () def __init__(self, independentVariable=None): """ Constructor. Parameters ---------- independentVariable : dict or None, optional Name/value pairs to associate with the independent variable being modified by this object. Will be analyzed and plotted against other modifiers with the same name. """ if independentVariable is None: independentVariable = {} self.independentVariable = independentVariable def __call__(self, cs, bp): """Perform the desired modifications to input objects.""" raise NotImplementedError class SamplingInputModifier(InputModifier): """ Object that modifies input definitions in some well-defined way. (This class is abstract.) Subclasses must implement a ``__call__`` method accepting a ``Settings``, ``Blueprints``, and ``SystemLayoutInput``. This is a modified version of the InputModifier abstract class that imposes structure for parameters in a design space that will be sampled by a quasi-random sampling algorithm. These algorithms require input modifiers to specify if the parameter is continuous or discrete and have the bounds specified. """ def __init__(self, name: str, paramType: str, bounds: list, independentVariable=None): """Constructor for the Sampling input modifier. Parameters ---------- name: str Name of input modifier. paramType : str specify if parameter is 'continuous' or 'discrete' bounds : list If continuous, provide floating points [a, b] specifying the inclusive bounds. If discrete, provide a list of potential values [a, b, c, ...] independentVariable : [type], optional Name/value pairs to associate with the independent variable being modified by this object. Will be analyzed and plotted against other modifiers with the same name, by default None """ InputModifier.__init__(self, independentVariable=independentVariable) self.name = name self.paramType = paramType self.bounds = bounds def __call__(self, cs, blueprints): """Perform the desired modifications to input objects.""" raise NotImplementedError class FullCoreModifier(InputModifier): """ Grow the SystemLayoutInput to from a symmetric core to a full core. Notes ----- Besides the Core, other grids may also be of interest for expansion, like a grid that defines fuel management. However, the expansion of a fuel management schedule to full core is less trivial than just expanding the core itself. Thus, this modifier currently does not attempt to update fuel management grids, but an expanded implementation could do so in the future if needed. For now, users must expand fuel management grids to full core themself. """ def __call__(self, cs, bp): coreBp = bp.gridDesigns["core"] coreBp.expandToFull() return cs, bp class SettingsModifier(InputModifier): """Adjust setting to specified value.""" def __init__(self, settingName, value): InputModifier.__init__(self, independentVariable={settingName: value}) self.settingName = settingName self.value = value def __call__(self, cs, bp): cs = cs.modified(newSettings={self.settingName: self.value}) return cs, bp class MultiSettingModifier(InputModifier): """ Adjust multiple settings to specified values. Examples -------- >>> inputModifiers.MultiSettingModifier({CONF_NEUTRONICS_TYPE: "both", CONF_COARSE_MESH_REBALANCE: -1}) """ def __init__(self, settingVals: dict): InputModifier.__init__(self, independentVariable=settingVals) self.settings = settingVals def __call__(self, cs, bp): newSettings = {} for name, val in self.settings.items(): newSettings[name] = val cs = cs.modified(newSettings=newSettings) return cs, bp class BluePrintBlockModifier(InputModifier): """Adjust blueprint block->component->dimension to specified value.""" def __init__(self, block, component, dimension, value): InputModifier.__init__(self, independentVariable={dimension: value}) self.block = block self.component = component self.dimension = dimension self.value = value def __call__(self, cs, bp): # parse block for blockDesign in bp.blockDesigns: if blockDesign.name == self.block: # parse component for componentDesign in blockDesign: if componentDesign.name == self.component: # set new value setattr(componentDesign, self.dimension, self.value) return cs, bp return cs, bp ================================================ FILE: armi/cases/inputModifiers/neutronicsModifiers.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Modifies inputs related to neutronics controls. Notes ----- This may make more sense in the neutronics physics plugin. """ from armi.cases.inputModifiers import inputModifiers from armi.physics.neutronics.settings import ( CONF_EPS_EIG, CONF_EPS_FSAVG, CONF_EPS_FSPOINT, ) class NeutronicConvergenceModifier(inputModifiers.InputModifier): """ Adjust the neutronics convergence parameters ``CONF_EPS_EIG``, ``CONF_EPS_FSAVG``, and ``CONF_EPS_FSPOINT``. The supplied value is used for ``CONF_EPS_EIG``. ``CONF_EPS_FSAVG`` and ``CONF_EPS_FSPOINT`` are set to 100 times the supplied value. This can be used to perform sensitivity studies on convergence criteria. """ def __init__(self, value): inputModifiers.InputModifier.__init__(self, {self.__class__.__name__: value}) self.value = value if value > 1e-2 or value <= 0.0: raise ValueError( f"Neutronic convergence modifier value must be greater than 0 and less than 1e-2 (got {value})" ) def __call__(self, cs, bp): newSettings = {} newSettings[CONF_EPS_FSAVG] = self.value * 100 newSettings[CONF_EPS_FSPOINT] = self.value * 100 newSettings[CONF_EPS_EIG] = self.value cs = cs.modified(newSettings=newSettings) return cs, bp class NeutronicMeshsSizeModifier(inputModifiers.InputModifier): """ Adjust the neutronics mesh in all assemblies by a multiplication factor. This can be useful when switching between nodal and finite difference approximations, or when doing mesh convergence sensitivity studies. Attributes ---------- multFactor : int Factor to multiply the number of axial mesh points per block by. """ def __init__(self, multFactor): inputModifiers.InputModifier.__init__(self, {self.__class__.__name__: multFactor}) if not isinstance(multFactor, int): raise TypeError("multFactor must be an integer, but got {}".format(multFactor)) self.multFactor = multFactor def __call__(self, cs, bp): for assemDesign in bp.assemDesigns: assemDesign.axialMeshPoints = [ax * self.multFactor for ax in assemDesign.axialMeshPoints] return cs, bp ================================================ FILE: armi/cases/inputModifiers/pinTypeInputModifiers.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from armi.cases.inputModifiers import inputModifiers from armi.reactor import flags from armi.reactor.components import component from armi.reactor.converters import pinTypeBlockConverters class _PinTypeAssemblyModifier(inputModifiers.InputModifier): """ Abstract class for modifying something about a pin, within a block. This will construct blocks, determine if the block should be modified by checking the ``_getBlockTypesToModify``, and then run ``_adjustBlock(b)``. The ``Blueprints`` are then updated based on the modification assuming that dimension names match exactly to ComponenBlueprint attributes (which is true, because ComponentBlueprint attributes are programmatically derived from Component constructors). """ def __init__(self, value): inputModifiers.InputModifier.__init__(self, {self.__class__.__name__: value}) self.value = value def __call__(self, cs, bp): for bDesign in bp.blockDesigns: # bDesign construct requires lots of arguments, many of which have no impact. # The following can safely be defaulted to meaningless inputs: # axialIndex: a block can be reused at any axial index, modifications made # dependent on will not translate back to the input in a meaningful # fashion # axialMeshPoints: similar to above, this is specified by the assembly, and # a block can be within any section of an assembly. # height: similar to above. a block can have any height specified by an # assembly. if height-specific modifications are required, then a new # block definition should be created in the input # xsType: similar to above. a block can have any xsType specified through # the assembly definition assembly. if xsType-specific modifications are # required, then a new block definition should be created in the input # materialInput: this is the materialModifications from the assembly # definition. if material modifications are required on a block-specific # basis, they should be edited directly b = bDesign.construct( cs, bp, axialIndex=1, axialMeshPoints=1, height=1, xsType="A", materialInput={}, ) if not b.hasFlags(self._getBlockTypesToModify()): continue self._adjustBlock(b) for cDesign, c in zip(bDesign, b): for dimName in c.DIMENSION_NAMES: inpDim = getattr(cDesign, dimName) newDim = getattr(c.p, dimName) if isinstance(newDim, tuple): # map linked component dimension link = component._DimensionLink(newDim) newDim = str(link) if inpDim != newDim: setattr(cDesign, dimName, newDim) return cs, bp def _getBlockTypesToModify(self): """Hook method to determine blocks that should be modified.""" raise NotImplementedError def _adjustBlock(self, b): """Hook method for `__call__` template method.""" raise NotImplementedError class SmearDensityModifier(_PinTypeAssemblyModifier): """ Adjust the smeared density to the specified value. This is effectively how much of the space inside the cladding tube is occupied by fuel at fabrication. """ def _getBlockTypesToModify(self): """Hook method to determine blocks that should be modified.""" return flags.Flags.FUEL def _adjustBlock(self, b): """Hook method for `__call__` template method.""" pinTypeBlockConverters.adjustSmearDensity(b, self.value) class CladThicknessByODModifier(_PinTypeAssemblyModifier): """Adjust the cladding thickness by adjusting the inner diameter of all cladding components.""" FAIL_IF_AFTER = (SmearDensityModifier,) def _getBlockTypesToModify(self): """Hook method to determine blocks that should be modified.""" return "" def _adjustBlock(self, b): pinTypeBlockConverters.adjustCladThicknessByOD(b, self.value) class CladThicknessByIDModifier(_PinTypeAssemblyModifier): """Adjust the cladding thickness by adjusting the outer diameter of the cladding component.""" FAIL_IF_AFTER = (SmearDensityModifier,) def _getBlockTypesToModify(self): """Hook method to determine blocks that should be modified.""" return "" def _adjustBlock(self, b): pinTypeBlockConverters.adjustCladThicknessByID(b, self.value) ================================================ FILE: armi/cases/inputModifiers/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/cases/inputModifiers/tests/test_inputModifiers.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for input modifiers.""" import os import unittest from ruamel import yaml from armi import cases, settings from armi.cases import suiteBuilder from armi.cases.inputModifiers import ( inputModifiers, neutronicsModifiers, pinTypeInputModifiers, ) from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import ( CONF_FP_MODEL, ) from armi.physics.neutronics.settings import ( CONF_EPS_EIG, CONF_EPS_FSAVG, CONF_EPS_FSPOINT, ) from armi.reactor import blueprints from armi.reactor.tests import test_reactors from armi.utils import directoryChangers FLAGS_INPUT = """nuclide flags: U: {burn: false, xs: true} ZR: {burn: false, xs: true} MN: {burn: false, xs: true} FE: {burn: false, xs: true} SI: {burn: false, xs: true} C: {burn: false, xs: true} CR: {burn: false, xs: true} MO: {burn: false, xs: true} NI: {burn: false, xs: true} V: {burn: false, xs: true} W: {burn: false, xs: true}""" CLAD = """clad: &fuel_1_clad Tinput: 350.0 Thot: 350.0 shape: circle id: 1.0 od: 1.1 material: HT9""" CLAD_LINKED = """clad: &fuel_1_clad Tinput: 350.0 Thot: 350.0 shape: circle id: fuel.od od: 1.1 material: HT9""" BLOCKS_INPUT = """blocks: fuel 1: &fuel_1 fuel: &fuel_1_fuel Tinput: 350.0 Thot: 350.0 shape: circle id: 0.0 od: 0.5 material: UZr {clad} hex: &fuel_1_hex Tinput: 350.0 Thot: 350.0 shape: hexagon ip: 1.0 op: 10.0 material: HT9 fuel 2: *fuel_1 block 3: *fuel_1 # non-fuel blocks block 4: {{<<: *fuel_1}} # non-fuel blocks block 5: {{fuel: *fuel_1_fuel, clad: *fuel_1_clad, hex: *fuel_1_hex}} # non-fuel blocks""" BLOCKS_INPUT_1 = BLOCKS_INPUT.format(clad=CLAD) BLOCKS_INPUT_2 = BLOCKS_INPUT.format(clad=CLAD_LINKED) BLUEPRINT_INPUT = f""" {FLAGS_INPUT} {BLOCKS_INPUT_1} assemblies: {{}} """ BLUEPRINT_INPUT_LINKS = f""" {FLAGS_INPUT} {BLOCKS_INPUT_2} assemblies: {{}} """ CORE_INPUT = """ systems: core: grid name: core origin: x: 0.0 y: 0.0 z: 0.0 grids: core: geom: hex symmetry: third core periodic grid contents: [0, 0]: A1 [1, 0]: A2 [1, 1]: A3 [2, -2]: A4 [2, -1]: A5 [2, 0]: A6 [2, 1]: A7 [2, 2]: A8 """ class TestsuiteBuilderIntegrations(unittest.TestCase): @classmethod def setUpClass(cls): bp = blueprints.Blueprints.load(BLUEPRINT_INPUT_LINKS + CORE_INPUT) cs = settings.Settings() bp._prepConstruction(cs) cls.baseCase = cases.Case(cs=cs, bp=bp) def test_smearDensityFail(self): builder = suiteBuilder.FullFactorialSuiteBuilder(self.baseCase) builder.addDegreeOfFreedom(pinTypeInputModifiers.SmearDensityModifier(v) for v in (0.5, 0.6)) builder.addDegreeOfFreedom(pinTypeInputModifiers.CladThicknessByIDModifier(v) for v in (0.05, 0.01)) self.assertEqual(4, len(builder)) with self.assertRaisesRegex(RuntimeError, "before .*SmearDensityModifier"): builder.buildSuite() def test_settingsModifier(self): builder = suiteBuilder.SeparateEffectsSuiteBuilder(self.baseCase) builder.addDegreeOfFreedom( inputModifiers.SettingsModifier(CONF_FP_MODEL, v) for v in ("noFissionProducts", "infinitelyDilute", "MO99") ) builder.addDegreeOfFreedom(inputModifiers.SettingsModifier("detailedAxialExpansion", v) for v in (True,)) builder.addDegreeOfFreedom( inputModifiers.SettingsModifier("buGroups", v) for v in ( [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 100], [3, 5, 7, 9, 10, 20, 100], [3, 5, 10, 15, 20, 100], ) ) builder.addDegreeOfFreedom((inputModifiers.FullCoreModifier(),)) with directoryChangers.TemporaryDirectoryChanger(): suite = builder.buildSuite() for c in suite: c.writeInputs() self.assertTrue(os.path.exists("case-suite")) def test_bluePrintBlockModifier(self): """Test BluePrintBlockModifier with build suite naming function argument.""" case_nbr = 1 builder = suiteBuilder.FullFactorialSuiteBuilder(self.baseCase) builder.addDegreeOfFreedom( [inputModifiers.BluePrintBlockModifier("fuel 1", "clad", "od", float("{:.2f}".format(22 / 7)))] ) builder.addDegreeOfFreedom([inputModifiers.BluePrintBlockModifier("block 5", "clad", "od", 3.14159)]) def SuiteNaming(index, _case, _mods): uniquePart = "{:0>4}".format(index + case_nbr) return os.path.join( ".", "case-suite-testBPBM", uniquePart, self.baseCase.title + "-" + uniquePart, ) with directoryChangers.TemporaryDirectoryChanger(): suite = builder.buildSuite(namingFunc=SuiteNaming) suite.writeInputs() self.assertTrue(os.path.exists("case-suite-testBPBM")) yamlfile = open( f"case-suite-testBPBM/000{case_nbr}/armi-000{case_nbr}-blueprints.yaml", "r", ) bp_dict = yaml.YAML().load(yamlfile) yamlfile.close() self.assertEqual(bp_dict["blocks"]["fuel 1"]["clad"]["od"], 3.14) self.assertEqual(bp_dict["blocks"]["block 5"]["clad"]["od"], 3.14159) class TestSettingsModifiers(unittest.TestCase): def test_NeutronicConvergenceModifier(self): cs = settings.Settings() with self.assertRaises(ValueError): _ = neutronicsModifiers.NeutronicConvergenceModifier(0.0) with self.assertRaises(ValueError): _ = neutronicsModifiers.NeutronicConvergenceModifier(1e-2 + 1e-15) cs, _ = neutronicsModifiers.NeutronicConvergenceModifier(1e-2)(cs, None) self.assertAlmostEqual(cs[CONF_EPS_EIG], 1e-2) self.assertAlmostEqual(cs[CONF_EPS_FSAVG], 1.0) self.assertAlmostEqual(cs[CONF_EPS_FSPOINT], 1.0) class NeutronicsKernelOpts(inputModifiers.InputModifier): def __init__(self, neutronicsKernelOpts): inputModifiers.InputModifier.__init__(self) self.neutronicsKernelOpts = neutronicsKernelOpts def __call__(self, cs, bp): cs = cs.modified(self.neutronicsKernelOpts) return cs, bp class TestFullCoreModifier(unittest.TestCase): """Ensure full core conversion works.""" def test_fullCoreConversion(self): cs = settings.Settings(os.path.join(test_reactors.TEST_ROOT, "armiRun.yaml")) case = cases.Case(cs=cs) mod = inputModifiers.FullCoreModifier() self.assertEqual(case.bp.gridDesigns["core"].symmetry, "third periodic") case, case.bp = mod(case, case.bp) self.assertEqual(case.bp.gridDesigns["core"].symmetry, "full") def test_fullCoreConversionWithOrientation(self): """Tests modifying a reactor to full core that includes beginning of life orientations.""" cs = settings.Settings(os.path.join(test_reactors.TEST_ROOT, "armiRun.yaml")) case = cases.Case(cs=cs) mod = inputModifiers.FullCoreModifier() self.assertEqual(case.bp.gridDesigns["core"].symmetry, "third periodic") # Add beginning of life orientations case.bp.gridDesigns["core"].orientationBOL = {(2, 1): 30.0} # Modify to full core case, case.bp = mod(case, case.bp) # Check results self.assertEqual(case.bp.gridDesigns["core"].symmetry, "full") self.assertIn((2, 3), case.bp.gridDesigns["core"].orientationBOL) self.assertEqual(150.0, case.bp.gridDesigns["core"].orientationBOL[(2, 3)]) self.assertIn((2, 5), case.bp.gridDesigns["core"].orientationBOL) self.assertEqual(270.0, case.bp.gridDesigns["core"].orientationBOL[(2, 5)]) ================================================ FILE: armi/cases/inputModifiers/tests/test_pinTypeInputModifiers.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for input modifiers.""" import math import unittest from armi import settings from armi.cases.inputModifiers import pinTypeInputModifiers from armi.cases.inputModifiers.tests.test_inputModifiers import BLUEPRINT_INPUT from armi.reactor import blueprints class TestBlueprintModifiers(unittest.TestCase): def setUp(self): self.bp = blueprints.Blueprints.load(BLUEPRINT_INPUT) self.bp._prepConstruction(settings.Settings()) def test_AdjustSmearDensity(self): r""" Compute the smear density where clad.id is 1.0. .. math:: areaFuel = smearDensity * innerCladArea fuelOD^2 / 4 = 0.5 * cladID^2 / 4 fuelOD = \sqrt{0.5} Notes ----- The area of fuel is 0.5 * inner area of clad. """ bp = self.bp self.assertEqual(1.0, bp.blockDesigns["fuel 1"]["clad"].id) self.assertEqual(0.5, bp.blockDesigns["fuel 1"]["fuel"].od) self.assertEqual(0.5, bp.blockDesigns["fuel 2"]["fuel"].od) self.assertEqual(0.5, bp.blockDesigns["block 3"]["fuel"].od) self.assertEqual(0.5, bp.blockDesigns["block 4"]["fuel"].od) self.assertEqual(0.5, bp.blockDesigns["block 5"]["fuel"].od) pinTypeInputModifiers.SmearDensityModifier(0.5)(settings.Settings(), bp) self.assertEqual(math.sqrt(0.5), bp.blockDesigns["fuel 1"]["fuel"].od) self.assertEqual(math.sqrt(0.5), bp.blockDesigns["fuel 2"]["fuel"].od) self.assertEqual(math.sqrt(0.5), bp.blockDesigns["block 3"]["fuel"].od) self.assertEqual(math.sqrt(0.5), bp.blockDesigns["block 4"]["fuel"].od) self.assertEqual(0.5, bp.blockDesigns["block 5"]["fuel"].od) # unique instance def test_CladThickenessByODModifier(self): """ Adjust the clad thickness by outer diameter. .. math:: cladThickness = (clad.od - clad.id) / 2 clad.od = 2 * cladThicness - clad.id when ``clad.id = 1.0`` and ``cladThickness = 0.12``, .. math:: clad.od = 2 * 0.12 - 1.0 clad.od = 1.24 """ bp = self.bp self.assertEqual(1.1, bp.blockDesigns["fuel 1"]["clad"].od) self.assertEqual(1.1, bp.blockDesigns["fuel 2"]["clad"].od) self.assertEqual(1.1, bp.blockDesigns["block 3"]["clad"].od) self.assertEqual(1.1, bp.blockDesigns["block 4"]["clad"].od) self.assertEqual(1.1, bp.blockDesigns["block 5"]["clad"].od) pinTypeInputModifiers.CladThicknessByODModifier(0.12)(settings.Settings(), bp) self.assertEqual(1.24, bp.blockDesigns["fuel 1"]["clad"].od) self.assertEqual(1.24, bp.blockDesigns["fuel 2"]["clad"].od) self.assertEqual(1.24, bp.blockDesigns["block 3"]["clad"].od) self.assertEqual(1.24, bp.blockDesigns["block 4"]["clad"].od) self.assertEqual(1.24, bp.blockDesigns["block 5"]["clad"].od) # modifies all blocks def test_CladThickenessByIDModifier(self): """ Adjust the clad thickness by inner diameter. .. math:: cladThickness = (clad.od - clad.id) / 2 clad.id = cladod - 2 * cladThicness when ``clad.id = 1.1`` and ``cladThickness = 0.025``, .. math:: clad.od = 1.1 - 2 * 0.025 clad.od = 1.05 """ bp = self.bp self.assertEqual(1.0, bp.blockDesigns["fuel 1"]["clad"].id) self.assertEqual(1.0, bp.blockDesigns["fuel 2"]["clad"].id) self.assertEqual(1.0, bp.blockDesigns["block 3"]["clad"].id) self.assertEqual(1.0, bp.blockDesigns["block 4"]["clad"].id) self.assertEqual(1.0, bp.blockDesigns["block 5"]["clad"].id) pinTypeInputModifiers.CladThicknessByIDModifier(0.025)(settings.Settings(), bp) self.assertEqual(1.05, bp.blockDesigns["fuel 1"]["clad"].id) self.assertEqual(1.05, bp.blockDesigns["fuel 2"]["clad"].id) self.assertEqual(1.05, bp.blockDesigns["block 3"]["clad"].id) self.assertEqual(1.05, bp.blockDesigns["block 4"]["clad"].id) self.assertEqual(1.05, bp.blockDesigns["block 5"]["clad"].id) # modifies all blocks ================================================ FILE: armi/cases/suite.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" The ``CaseSuite`` object is responsible for running, and executing a set of user inputs. Many entry points redirect into ``CaseSuite`` methods, such as ``clone``, ``compare``, and ``submit``. Used in conjunction with the :py:class:`~armi.cases.case.Case` object, ``CaseSuite`` can be used to collect a series of cases and submit them to a cluster for execution. Furthermore, a ``CaseSuite`` can be used to gather executed cases for post-analysis. ``CaseSuite``\ s should allow ``Cases`` to be added from totally separate directories. This is useful for plugin-informed testing as well as other things. See Also -------- armi.cases.case : An individual item of a case suite. """ import os import traceback from typing import Optional, Sequence from armi import runLog, settings from armi.cases import case as armicase from armi.utils import directoryChangers, tabulate class CaseSuite: """ A CaseSuite is a collection of possibly related Case objects. .. impl:: CaseSuite allows for one case to start after another completes. :id: I_ARMI_CASE_SUITE :implements: R_ARMI_CASE_SUITE The CaseSuite object allows multiple, often related, :py:class:`~armi.cases.case.Case` objects to be run sequentially. A CaseSuite is intended to be both a pre-processing or a post-processing tool to facilitate case generation and analysis. Under most circumstances one may wish to subclass a CaseSuite to meet the needs of a specific calculation. A CaseSuite is a collection that is keyed off Case titles. """ def __init__(self, cs): self._cases = list() self.cs = cs def add(self, case): """ Add a Case object to the CaseSuite. Case objects within a CaseSuite must have unique ``title`` attributes, a KeyError will be raised """ existing = next((c for c in self if case == c), None) if existing is not None: raise ValueError( "CaseSuite already contains case with title `{}`\nFirst case: {}\nSecond case: {}".format( case.title, existing, case ) ) self._cases.append(case) case._caseSuite = self def remove(self, case): """Remove a case from a suite.""" self._cases.remove(case) case._caseSuite = None def __iter__(self): return iter(self._cases) def __len__(self): return len(self._cases) def discover( self, rootDir=None, patterns=None, ignorePatterns=None, recursive=True, skipInspection=False, ): """ Finds case objects by searching for a pattern of file paths, and adds them to the suite. This searches for Settings input files and loads them to create Case objects. Parameters ---------- rootDir : str, optional root directory to search for settings files patterns : list of str, optional file pattern to use to filter file names ignorePatterns : list of str, optional file patterns to exclude matching file names recursive : bool, optional if True, recursively search for settings files skipInspection : bool, optional if True, skip running the check inputs """ csFiles = settings.recursivelyLoadSettingsFiles( rootDir or os.path.abspath(os.getcwd()), patterns or ["*.yaml"], recursive=recursive, ignorePatterns=ignorePatterns, handleInvalids=False, ) for cs in csFiles: case = armicase.Case(cs=cs, caseSuite=self) if not skipInspection: case.checkInputs() self.add(case) def echoConfiguration(self): """ Print information about this suite to the run log. Notes ----- Some of these printouts won't make sense for all users, and may make sense to be delegated to the plugins/app. """ for setting in self.cs.environmentSettings: runLog.important("{}: {}".format(self.cs.getSetting(setting).label, self.cs[setting])) runLog.important("Test inputs will be taken from test case results when they have finished") runLog.important( tabulate.tabulate( [ ( c.title, "T" if c.enabled else "F", ",".join(d.title for d in c.dependencies), ) for c in self ], headers=["Title", "Enabled", "Dependencies"], tableFmt="armi", ) ) def clone(self, oldRoot=None, writeStyle="short"): """ Clone a CaseSuite to a new place. Creates a clone for each case within a CaseSuite. If ``oldRoot`` is not specified, then each case clone is made in a directory with the title of the case. If ``oldRoot`` is specified, then a relative path from ``oldRoot`` will be used to determine a new relative path to the current directory ``oldRoot``. Parameters ---------- oldRoot : str (optional) root directory of original case suite used to help filter when a suite contains one or more cases with the same case title. writeStyle : str (optional) Writing style for which settings get written back to the settings files (short, medium, or full). Notes ----- By design, a CaseSuite has no location dependence; this allows any set of cases to compose a CaseSuite. The thought is that the post-analysis capabilities without restricting a root directory could be beneficial. For example, this allows one to perform analysis on cases analyzed by Person A and Person B, even if the analyses were performed in completely different locations. As a consequence, when you want to clone, we need to infer a "root" of the original cases to attempt to mirror whatever existing directory structure there may have been. """ clone = CaseSuite(self.cs.duplicate()) modifiedSettings = {ss.name: ss.value for ss in self.cs.values() if ss.offDefault} for case in self: if oldRoot: newDir = os.path.dirname(os.path.relpath(case.cs.path, oldRoot)) else: newDir = case.title with directoryChangers.ForcedCreationDirectoryChanger(newDir, dumpOnException=False): clone.add(case.clone(modifiedSettings=modifiedSettings, writeStyle=writeStyle)) return clone def run(self): """ Run each case, one after the other. Warning ------- Suite running may not work yet if the cases have interdependencies. We typically run on a HPC but are still working on a platform independent way of handling HPCs. """ for ci, case in enumerate(self): runLog.important(f"Running case {ci + 1}/{len(self)}: {case}") with directoryChangers.DirectoryChanger(case.directory): try: case.run() except Exception: # allow all errors and continue to next run runLog.error(f"{case} failed during execution.") traceback.print_exc() def compare( self, that, exclusion: Optional[Sequence[str]] = None, weights=None, tolerance=0.01, timestepCompare=None, ) -> int: """ Compare one case suite with another. Returns ------- The number of problem differences encountered. """ runLog.important("Comparing case suites.") nIssues = 0 refTitles = set(c.title for c in self) cmpTitles = set(c.title for c in that) suiteHasMissingFiles = False tableResults = {} for caseTitle in refTitles.union(cmpTitles): refCase = next((c for c in self if c.title == caseTitle), None) cmpCase = next((c for c in that if c.title == caseTitle), None) caseStatus = [] for case in (refCase, cmpCase): status = "Found" if case is None or not os.path.exists(case.dbName): status = "Missing" caseStatus.append(status) refFile, userFile = caseStatus if any(stat != "Found" for stat in caseStatus): # Case was not run, or failed to produce a database. # In either case, this is an issue. # It could possibly be a new test, but there is no way to tell this # versus a reference file being missing so when a new test is made # it will be an issue. After the first push with the new tests the files # will be copied over and future tests will be fine. caseIssues = 1 suiteHasMissingFiles = False else: caseIssues = refCase.compare( cmpCase, exclusion=exclusion, tolerance=tolerance, timestepCompare=timestepCompare, ) nIssues += caseIssues tableResults[caseTitle] = (userFile, refFile, caseIssues) self.writeTable(tableResults) if suiteHasMissingFiles: runLog.warning((UNMISSABLE_FAILURE.format(", ".join(t for t in refTitles - cmpTitles)))) return nIssues def writeInputs(self, writeStyle="short"): """ Write inputs for all cases in the suite. writeStyle : str (optional) Writing style for which settings get written back to the settings files (short, medium, or full). See Also -------- clone Similar to this but doesn't let you write out new geometry or blueprints objects. """ for case in self: case.writeInputs(sourceDir=self.cs.inputDirectory, writeStyle=writeStyle) @staticmethod def writeTable(tableResults): """Write a table summarizing the test differences.""" fmt = "psql" print( ( tabulate.tabulate( [["Integration test directory: {}".format(os.getcwd())]], ["SUMMARIZED INTEGRATION TEST DIFFERENCES:"], tableFmt=fmt, ) ) ) header = ["Test", "User File", "Reference File", "# Problem Diff Lines"] totalDiffs = 0 data = [] for testName in sorted(tableResults.keys()): userFile, refFile, caseIssues = tableResults[testName] data.append((testName, userFile, refFile, caseIssues)) totalDiffs += caseIssues print(tabulate.tabulate(data, header, tableFmt=fmt)) print(tabulate.tabulate([["Total number of differences: {}".format(totalDiffs)]], tableFmt=fmt)) UNMISSABLE_FAILURE = ''' !! THESE TESTS HAVE UNEXPECTED ABSENT RESULTS !! uuuuuuu uu$$$$$$$$$$$uu uu$$$$$$$$$$$$$$$$$uu u$$$$$$$$$$$$$$$$$$$$$u u$$$$$$$$$$$$$$$$$$$$$$$u u$$$$$$$$$$$$$$$$$$$$$$$$$u u$$$$$$$$$$$$$$$$$$$$$$$$$u u$$$$$$" "$$$" "$$$$$$u "$$$$" u$u $$$$" $$$u u$u u$$$ $$$u u$$$u u$$$ "$$$$uu$$$ $$$uu$$$$" "$$$$$$$" "$$$$$$$" u$$$$$$$u$$$$$$$u u$"$"$"$"$"$"$u uuu $$u$ $ $ $ $u$$ uuu u$$$$ $$$$$u$u$u$$$ u$$$$ $$$$$uu "$$$$$$$$$" uu$$$$$$ u$$$$$$$$$$$uu """"" uuuu$$$$$$$$$$ $$$$"""$$$$$$$$$$uuu uu$$$$$$$$$"""$$$" """ ""$$$$$$$$$$$uu ""$""" uuuu ""$$$$$$$$$$uuu u$$$uuu$$$$$$$$$uu ""$$$$$$$$$$$uuu$$$ $$$$$$$$$$"""" ""$$$$$$$$$$$" "$$$$$" ""$$$$"" $$$" $$$$" Comparison suite is missing the following case titles: {} ''' ================================================ FILE: armi/cases/suiteBuilder.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Contains classes that build case suites from perturbing inputs. The general use case is to create a :py:class:`~SuiteBuilder` with a base :py:class:`~armi.cases.case.Case`, use :py:meth:`~SuiteBuilder.addDegreeOfFreedom` to adjust inputs according to the supplied arguments, and finally use ``.buildSuite`` to generate inputs. The case suite can then be discovered, submitted, and analyzed using the standard ``CaseSuite`` objects. This module contains a variety of ``InputModifier`` objects as well, which are examples of how you can modify inputs for parameter sweeping. Power-users will generally make their own ``Modifier``\ s that are design-specific. """ import copy import os import random from typing import List from armi.cases import suite def getInputModifiers(cls): return cls.__subclasses__() + [g for s in cls.__subclasses__() for g in getInputModifiers(s)] class SuiteBuilder: """ Class for constructing a CaseSuite from combinations of modifications on base inputs. .. impl:: A generic tool to modify user inputs on multiple cases. :id: I_ARMI_CASE_MOD0 :implements: R_ARMI_CASE_MOD This class provides the capability to create a :py:class:`~armi.cases.suite.CaseSuite` based on programmatic perturbations/modifications to case settings. It works by being constructed with a base or nominal :py:class:`~armi.cases.case.Case` object. Children classes then append the ``self.modifierSets`` member. Each entry in ``self.modifierSets`` is a :py:class:`~armi.cases.inputModifiers.inputModifiers.InputModifier` representing a case to add to the suite by specifying modifications to the settings of the base case. :py:meth:`SuiteBuilder.buildSuite` is then invoked, returning an instance of the :py:class:`~armi.cases.suite.CaseSuite` containing all the cases with modified settings. Attributes ---------- baseCase : armi.cases.case.Case A Case object to perturb modifierSets : list(tuple(InputModifier)) Contains a list of tuples of ``InputModifier`` instances. A single case is constructed by running a series (the tuple) of InputModifiers on the case. Notes ----- This is public such that someone could pop an item out of the list if it is known to not work, or be unnecessary. """ def __init__(self, baseCase): self.baseCase = baseCase self.modifierSets = [] from armi.cases.inputModifiers import inputModifiers # use an instance variable instead of global lookup. this could allow someone to add their own # modifiers, and also prevents it memory usage / discovery from simply loading the module. self._modifierLookup = {k.__name__: k for k in getInputModifiers(inputModifiers.InputModifier)} def __len__(self): return len(self.modifierSets) def __repr__(self): return "<SuiteBuilder len:{} baseCase:{}>".format(len(self), self.baseCase) def addDegreeOfFreedom(self, inputModifiers): """ Add a degree of freedom to the SweepBuilder. The exact application of this is dependent on a subclass. Parameters ---------- inputModifiers : list(callable(Settings, Blueprints, SystemLayoutInput)) A list of callable objects with the signature ``(Settings, Blueprints, SystemLayoutInput)``. When these objects are called they should perturb the settings or blueprints by some amount determined by their construction. """ raise NotImplementedError def addModifierSet(self, inputModifierSet: List): """ Add a single input modifier set to the suite. Used to add modifications that are not necessarily another degree of freedom. """ self.modifierSets.append(inputModifierSet) def buildSuite(self, namingFunc=None): """ Builds a ``CaseSuite`` based on the modifierSets contained in the SuiteBuilder. For each sequence of modifications, this creates a new ``Case`` from the ``baseCase``, and runs the sequence of modifications on the new ``Case``'s inputs. The modified ``Case`` is then added to a ``CaseSuite``. The resulting ``CaseSuite`` is returned. Parameters ---------- namingFunc : callable(index, case, tuple(InputModifier)), (optional) Function used to name each case. It is supplied with the index (int), the case (Case), and a tuple of InputModifiers used to edit the case. This should be enough information for someone to derive a meaningful name. The function should return a string specifying the path of the ``Settings``, this allows the user to specify the directories where each case will be run. If not supplied the path will be ``./case-suite/<0000>/<title>-<0000>``, where ``<0000>`` is the four-digit case index, and ``<title>`` is the ``baseCase.title``. Raises ------ RuntimeError When order of modifications is deemed to be invalid. Returns ------- caseSuite : CaseSuite Derived from the ``baseCase`` and modifications. """ caseSuite = suite.CaseSuite(self.baseCase.cs) if namingFunc is None: def namingFunc(index, _case, _mods): uniquePart = "{:0>4}".format(index) return os.path.join( ".", "case-suite", uniquePart, self.baseCase.title + "-" + uniquePart, ) for index, modList in enumerate(self.modifierSets): case = copy.deepcopy(self.baseCase) previousMods = [] case.bp._prepConstruction(case.cs) for mod in modList: # it may seem late to figure this out, but since we are doing it now, someone could # filter these conditions out before the buildSuite. optionally, we could have a # flag for "skipInvalidModficationCombos=False" shouldHaveBeenBefore = [fail for fail in getattr(mod, "FAIL_IF_AFTER", ()) if fail in previousMods] if any(shouldHaveBeenBefore): raise RuntimeError( "{} must occur before {}".format(mod, ",".join(repr(m) for m in shouldHaveBeenBefore)) ) previousMods.append(type(mod)) case.cs, case.bp = mod(case.cs, case.bp) case.independentVariables.update(mod.independentVariable) case.cs.path = namingFunc(index, case, modList) caseSuite.add(case) return caseSuite class FullFactorialSuiteBuilder(SuiteBuilder): """Builds a suite that has every combination of each modifier.""" def __init__(self, baseCase): SuiteBuilder.__init__(self, baseCase) # initialize with empty tuple to trick cross-product to always work self.modifierSets.append(()) def addDegreeOfFreedom(self, inputModifiers): """ Add a degree of freedom to the SuiteBuilder. Creates the Cartesian product of the ``inputModifiers`` supplied and those already applied. For example:: class SettingModifier(InputModifier): def __init__(self, settingName, value): self.settingName = settingName self.value = value def __call__(self, cs, bp): cs = cs.modified(newSettings={self.settingName: self.value}) return cs, bp builder = FullFactorialSuiteBuilder(someCase) builder.addDegreeOfFreedom(SettingModifier("settingName1", value) for value in (1, 2)) builder.addDegreeOfFreedom(SettingModifier("settingName2", value) for value in (3, 4, 5)) would result in 6 cases: +-------+------------------+------------------+ | Index | ``settingName1`` | ``settingName2`` | +=======+==================+==================+ | 0 | 1 | 3 | +-------+------------------+------------------+ | 1 | 2 | 3 | +-------+------------------+------------------+ | 2 | 1 | 4 | +-------+------------------+------------------+ | 3 | 2 | 4 | +-------+------------------+------------------+ | 4 | 1 | 5 | +-------+------------------+------------------+ | 5 | 2 | 5 | +-------+------------------+------------------+ See Also -------- SuiteBuilder.addDegreeOfFreedom """ # Cartesian product. Append a new modifier to the end of a chain of previously defined. new = [ existingModSet + (newModifier,) for newModifier in inputModifiers for existingModSet in self.modifierSets ] del self.modifierSets[:] self.modifierSets.extend(new) class FullFactorialSuiteBuilderNoisy(FullFactorialSuiteBuilder): """ Adds a bit of noise to each independent variable to avoid duplicates. This can be useful in some statistical postprocessors. .. warning:: Use with caution. This is part of ongoing research. """ def __init__(self, baseCase, noiseFraction): FullFactorialSuiteBuilder.__init__(self, baseCase) self.noiseFraction = noiseFraction def addDegreeOfFreedom(self, inputModifiers): new = [] for newMod in inputModifiers: for existingModSet in self.modifierSets: existingModSetCopy = copy.deepcopy(existingModSet) for mod in existingModSetCopy: self._perturb(mod) newModCopy = copy.deepcopy(newMod) self._perturb(newModCopy) new.append(existingModSetCopy + (newModCopy,)) del self.modifierSets[:] self.modifierSets.extend(new) def _perturb(self, mod): indeps = {} for key, val in mod.independentVariable.items(): # perturb values by 10% randomly newVal = val + val * self.noiseFraction * (2 * random.random() - 1) indeps[key] = newVal mod.independentVariable = indeps class SeparateEffectsSuiteBuilder(SuiteBuilder): """Varies each degree of freedom in isolation.""" def addDegreeOfFreedom(self, inputModifiers): """ Add a degree of freedom to the SuiteBuilder. Adds a case for each modifier supplied. For example:: class SettingModifier(InputModifier): def __init__(self, settingName, value): self.settingName = settingName self.value = value def __call__(self, cs, bp): cs = cs.modified(newSettings={self.settignName: self.value}) return cs, bp builder = SeparateEffectsSuiteBuilder(someCase) builder.addDegreeOfFreedom(SettingModifier("settingName1", value) for value in (1, 2)) builder.addDegreeOfFreedom(SettingModifier("settingName2", value) for value in (3, 4, 5)) would result in 5 cases: +-------+------------------+------------------+ | Index | ``settingName1`` | ``settingName2`` | +=======+==================+==================+ | 0 | 1 | default | +-------+------------------+------------------+ | 1 | 2 | default | +-------+------------------+------------------+ | 2 | default | 3 | +-------+------------------+------------------+ | 3 | default | 4 | +-------+------------------+------------------+ | 4 | default | 5 | +-------+------------------+------------------+ See Also -------- SuiteBuilder.addDegreeOfFreedom """ self.modifierSets.extend((modifier,) for modifier in inputModifiers) ================================================ FILE: armi/cases/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/cases/tests/test_cases.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for Case and CaseSuite objects.""" import copy import cProfile import logging import os import platform import unittest import h5py from armi import cases, context, getApp, interfaces, plugins, runLog, settings from armi.bookkeeping.db.databaseInterface import DatabaseInterface from armi.physics.fuelCycle.settings import CONF_SHUFFLE_LOGIC from armi.reactor import blueprints from armi.reactor.tests import test_reactors from armi.testing import TESTING_ROOT from armi.tests import ARMI_RUN_PATH, TEST_ROOT, mockRunLogs from armi.utils import directoryChangers BLUEPRINT_INPUT = """ nuclide flags: U: {burn: false, xs: true} ZR: {burn: false, xs: true} MN: {burn: false, xs: true} FE: {burn: false, xs: true} SI: {burn: false, xs: true} C: {burn: false, xs: true} CR: {burn: false, xs: true} MO: {burn: false, xs: true} NI: {burn: false, xs: true} blocks: fuel 1: &fuel_1 fuel: &fuel_1_fuel Tinput: 350.0 Thot: 350.0 shape: circle id: 0.0 od: 0.5 material: UZr clad: &fuel_1_clad Tinput: 350.0 Thot: 350.0 shape: circle id: 1.0 od: 1.1 material: SS316 fuel 2: *fuel_1 block 3: *fuel_1 # non-fuel blocks block 4: {<<: *fuel_1} # non-fuel blocks block 5: {fuel: *fuel_1_fuel, clad: *fuel_1_clad} # non-fuel blocks assemblies: {} systems: core: grid name: core origin: x: 0.0 y: 0.0 z: 0.0 grids: core: geom: hex symmetry: third core periodic grid contents: [0, 0]: A1 [1, 0]: A2 [1, 1]: A3 """ class TestArmiCase(unittest.TestCase): """Class to tests armi.cases.Case methods.""" def test_independentVariables(self): """Ensure that independentVariables added to a case move with it.""" bp = blueprints.Blueprints.load(BLUEPRINT_INPUT) cs = settings.Settings(ARMI_RUN_PATH) cs = cs.modified(newSettings={"verbosity": "important"}) baseCase = cases.Case(cs, bp=bp) with directoryChangers.TemporaryDirectoryChanger(): vals = {"cladThickness": 1, "control strat": "good", "enrich": 0.9} case = baseCase.clone() case._independentVariables = vals case.writeInputs() newCs = settings.Settings(fName=case.title + ".yaml") newCase = cases.Case(newCs) for name, val in vals.items(): self.assertEqual(newCase.independentVariables[name], val) def test_setUpTaskDependence(self): case = cases.Case(settings.Settings()) case.enabled = False case.setUpTaskDependence() case.enabled = True case.setUpTaskDependence() self.assertTrue(case.enabled) self.assertEqual(len(case._tasks), 0) self.assertEqual(len(case.dependencies), 0) def test_getCoverageRcFile(self): case = cases.Case(settings.Settings()) covRcDir = os.path.abspath(context.PROJECT_ROOT) # Don't actually copy the file, just check the file paths match covRcFile = case._getCoverageRcFile(userCovFile="", makeCopy=False) self.assertEqual(covRcFile, os.path.join(covRcDir, "pyproject.toml")) userFile = "UserCovRc" covRcFile = case._getCoverageRcFile(userCovFile=userFile, makeCopy=False) self.assertEqual(covRcFile, os.path.abspath(userFile)) def test_startCoverage(self): with directoryChangers.TemporaryDirectoryChanger(): cs = settings.Settings(ARMI_RUN_PATH) # Test the null case cs = cs.modified(newSettings={"coverage": False}) case = cases.Case(cs) cov = case._startCoverage() self.assertIsNone(cov) # NOTE: We can't test coverage=True, because it breaks coverage on CI def test_endCoverage(self): with directoryChangers.TemporaryDirectoryChanger(): cs = settings.Settings(ARMI_RUN_PATH) cs = cs.modified(newSettings={"coverage": False}) case = cases.Case(cs) # NOTE: We can't test coverage=True, because it breaks coverage on CI outFile = "coverage_results.cov" prof = case._startCoverage() self.assertFalse(os.path.exists(outFile)) case._endCoverage(userCovFile="", cov=prof) self.assertFalse(os.path.exists(outFile)) @unittest.skipUnless(context.MPI_RANK == 0, "test only on root node") def test_startProfiling(self): with directoryChangers.TemporaryDirectoryChanger(): cs = settings.Settings(ARMI_RUN_PATH) # Test the null case cs = cs.modified(newSettings={"profile": False}) case = cases.Case(cs) prof = case._startProfiling() self.assertIsNone(prof) # Test when we start coverage correctly cs = cs.modified(newSettings={"profile": True}) case = cases.Case(cs) prof = case._startProfiling() self.assertTrue(isinstance(prof, cProfile.Profile)) @unittest.skipUnless(context.MPI_RANK == 0, "test only on root node") def test_endProfiling(self): with directoryChangers.TemporaryDirectoryChanger(): cs = settings.Settings(ARMI_RUN_PATH) cs = cs.modified(newSettings={"profile": True}) case = cases.Case(cs) # run the profiler prof = case._startProfiling() case._endProfiling(prof) self.assertTrue(isinstance(prof, cProfile.Profile)) def test_run(self): """ Test running a case. .. test:: There is a generic mechanism to allow simulation runs. :id: T_ARMI_CASE :tests: R_ARMI_CASE .. test:: Test case settings object is created, settings can be edited, and case can run. :id: T_ARMI_SETTING :tests: R_ARMI_SETTING """ with directoryChangers.TemporaryDirectoryChanger(): cs = settings.Settings(ARMI_RUN_PATH) newSettings = { "branchVerbosity": "important", "coverage": False, "nCycles": 2, "profile": False, "trace": False, "verbosity": "important", } cs = cs.modified(newSettings=newSettings) case = cases.Case(cs) with mockRunLogs.BufferLog() as mock: # start with a clean slate self.assertEqual("", mock.getStdout()) runLog.LOG.startLog("test_run") runLog.LOG.setVerbosity(logging.INFO) case.run() stdOut = mock.getStdout() self.assertIn("Triggering BOL Event", stdOut) self.assertIn("xsGroups", stdOut) self.assertIn("Completed EveryNode - timestep: cycle 0, node 0, year 0.00 Event", stdOut) def test_clone(self): testTitle = "CLONE_TEST" # test the short write style with directoryChangers.TemporaryDirectoryChanger(): cs = settings.Settings(ARMI_RUN_PATH) case = cases.Case(cs) shortCase = case.clone( additionalFiles=["ISOAA"], title=testTitle, modifiedSettings={"verbosity": "important"}, ) # Check additional files made it self.assertTrue(os.path.exists("ISOAA")) # Check title change made it clonedYaml = testTitle + ".yaml" self.assertTrue(os.path.exists(clonedYaml)) self.assertTrue(shortCase.title, testTitle) # Check on some expected settings # Availability factor is in the original settings file but since it is a # default value, gets removed for the write-out txt = open(clonedYaml, "r").read() self.assertNotIn("availabilityFactor", txt) self.assertIn("verbosity: important", txt) # test the medium write style with directoryChangers.TemporaryDirectoryChanger(): cs = settings.Settings(ARMI_RUN_PATH) case = cases.Case(cs) case.clone(writeStyle="medium") clonedYaml = "armiRun.yaml" self.assertTrue(os.path.exists(clonedYaml)) # Availability factor is in the original settings file and it is a default # value. While "short" (default writing style) removes, "medium" should not txt = open(clonedYaml, "r").read() self.assertIn("availabilityFactor", txt) class TestCaseSuiteDependencies(unittest.TestCase): """CaseSuite tests.""" def setUp(self): self.suite = cases.CaseSuite(settings.Settings()) bp = blueprints.Blueprints.load(BLUEPRINT_INPUT) self.c1 = cases.Case(cs=settings.Settings(), bp=bp) self.c1.cs.path = "c1.yaml" self.suite.add(self.c1) self.c2 = cases.Case(cs=settings.Settings(), bp=bp) self.c2.cs.path = "c2.yaml" self.suite.add(self.c2) def test_clone(self): """If you pass an invalid path, the clone can't happen, but it won't do any damage either.""" with self.assertRaises(RuntimeError): _clone = self.suite.clone("test_clone") def test_checkInputs(self): """ Test the checkInputs() method on a couple of cases. .. test:: Check the ARMI inputs for consistency and validity. :id: T_ARMI_CASE_CHECK :tests: R_ARMI_CASE_CHECK """ self.c1.checkInputs() self.c2.checkInputs() def test_dependenciesWithObscurePaths(self): """Test directory dependence for strangely-written file paths (escape characters).""" checks = [ ("c1.yaml", "c2.yaml", "c1.h5", True), (r"\\case\1\c1.yaml", r"\\case\2\c2.yaml", "c1.h5", False), (r"\\case\1\c1.yaml", r"\\case\2\c2.yaml", r"..\1\c1.h5", False), ] if platform.system() == "Windows": # windows-specific case insensitivity checks.extend( [ ("c1.yaml", "c2.yaml", "C1.H5", True), ( r"\\cas\es\1\c1.yaml", r"\\cas\es\2\c2.yaml", r"..\..\1\c1.h5", True, ), ( r"c1.yaml", r"c2.yaml", r".\c1.h5", True, ), ( r"\\cas\es\1\c1.yaml", r"\\cas\es\2\c2.yaml", r"../..\1\c1.h5", True, ), ( r"\\cas\es\1\c1.yaml", r"\\cas\es\2\c2.yaml", r"../../1\c1.h5", True, ), ( r"\\cas\es\1\c1.yaml", r"\\cas\es\2\c2.yaml", r"..\../1\c1.h5", True, ), ( r"\\cas\es\1\c1.yaml", r"\\cas\es\2\c2.yaml", r"\\cas\es\1\c1.h5", True, ), # below False because getcwd() != \\case\es\2 ( r"..\..\1\c1.yaml", r"\\cas\es\2\c2.yaml", r"\\cas\es\1\c1.h5", False, ), ( r"\\cas\es\1\c1.yaml", r"\\cas\es\2\c2.yaml", r"..\..\2\c1.h5", False, ), ] ) for p1, p2, dbPath, isIn in checks: self.c1.cs.path = p1 self.c2.cs.path = p2 newSettings = {} newSettings["loadStyle"] = "fromDB" newSettings["reloadDBName"] = dbPath self.c2.cs = self.c2.cs.modified(newSettings=newSettings) # note that case.dependencies is a property and # will actually reflect these changes self.assertEqual( isIn, self.c1 in self.c2.dependencies, "where p1: {} p2: {} dbPath: {}".format(p1, p2, dbPath), ) def test_dependencyFromDBName(self): # no effect -> need to specify loadStyle, 'fromDB' newSettings = {"reloadDBName": "c1.h5"} self.c2.cs = self.c2.cs.modified(newSettings=newSettings) self.assertEqual(0, len(self.c2.dependencies)) newSettings = {"loadStyle": "fromDB"} self.c2.cs = self.c2.cs.modified(newSettings=newSettings) self.assertIn(self.c1, self.c2.dependencies) # the .h5 extension is optional newSettings = {"reloadDBName": "c1"} self.c2.cs = self.c2.cs.modified(newSettings=newSettings) self.assertIn(self.c1, self.c2.dependencies) def test_dependencyFromExplictRepeatShuffles(self): self.assertEqual(0, len(self.c2.dependencies)) newSettings = {"explicitRepeatShuffles": "c1-SHUFFLES.txt"} self.c2.cs = self.c2.cs.modified(newSettings=newSettings) self.assertIn(self.c1, self.c2.dependencies) def test_explicitDependency(self): """ Test dependencies for case suites. .. test:: Dependence allows for one case to start after the completion of another. :id: T_ARMI_CASE_SUITE :tests: R_ARMI_CASE_SUITE """ self.c1.addExplicitDependency(self.c2) self.assertIn(self.c2, self.c1.dependencies) def test_titleSetterGetter(self): self.assertEqual(self.c1.title, "c1") self.c1.title = "new_bob" self.assertEqual(self.c1.title, "new_bob") class TestCaseSuiteComparison(unittest.TestCase): """CaseSuite.compare() tests.""" def setUp(self): self.td = directoryChangers.TemporaryDirectoryChanger() self.td.__enter__() def tearDown(self): self.td.__exit__(None, None, None) def test_compareNoDiffs(self): """As a baseline, this test should always reveal zero diffs.""" # build two super-simple H5 files for testing o, r = test_reactors.loadTestReactor( TEST_ROOT, customSettings={"reloadDBName": "reloadingDB.h5"}, inputFileName="smallestTestReactor/armiRunSmallest.yaml", ) suites = [] for _i in range(2): # Build the cases suite = cases.CaseSuite(settings.Settings()) bp = blueprints.Blueprints.load(BLUEPRINT_INPUT) c1 = cases.Case(cs=settings.Settings(), bp=bp) c1.cs.path = "c1.yaml" suite.add(c1) c2 = cases.Case(cs=settings.Settings(), bp=bp) c2.cs.path = "c2.yaml" suite.add(c2) suites.append(suite) # create two DBs, identical but for file names tmpDir = os.getcwd() dbs = [] for i in range(1, 3): # create the tests DB dbi = DatabaseInterface(r, o.cs) dbi.initDB(fName=f"{tmpDir}/c{i}.h5") db = dbi.database # validate the file exists, and force it to be readable again b = h5py.File(db._fullPath, "r") self.assertEqual(list(b.keys()), ["inputs"]) self.assertEqual(sorted(b["inputs"].keys()), ["blueprints", "settings"]) b.close() # append to lists dbs.append(db) # do a comparison that should have no diffs diff = c1.compare(c2) self.assertEqual(diff, 0) diff = suites[0].compare(suites[1]) self.assertEqual(diff, 0) diff = suites[1].compare(suites[0]) self.assertEqual(diff, 0) class TestExtraInputWriting(unittest.TestCase): """Make sure extra inputs from interfaces are written.""" def test_writeInput(self): fName = os.path.join(TEST_ROOT, "armiRun.yaml") cs = settings.Settings(fName) baseCase = cases.Case(cs) with directoryChangers.TemporaryDirectoryChanger(): case = baseCase.clone() case.writeInputs() self.assertTrue(os.path.exists(cs[CONF_SHUFFLE_LOGIC])) # Availability factor is in the original settings file but since it is a default value, # gets removed for the write-out txt = open("armiRun.yaml", "r").read() self.assertNotIn("availabilityFactor", txt) self.assertIn("armiRun-blueprints.yaml", txt) with directoryChangers.TemporaryDirectoryChanger(): case = baseCase.clone(writeStyle="medium") case.writeInputs(writeStyle="medium") # Availability factor is in the original settings file and it is a default value. While # "short" (default writing style) removes, "medium" should not txt = open("armiRun.yaml", "r").read() self.assertIn("availabilityFactor", txt) class MultiFilesInterfaces(interfaces.Interface): """ A little test interface that adds a setting that we need to test copyInterfaceInputs with multiple files. """ name = "MultiFilesInterfaces" @staticmethod def specifyInputs(cs): settingName = "multipleFilesSetting" return {settingName: cs[settingName]} class TestPluginWithDuplicateSetting(plugins.ArmiPlugin): @staticmethod @plugins.HOOKIMPL def defineSettings(): """Define a duplicate setting.""" return [ settings.setting.Setting( "power", default=123, label="power", description="duplicate power", ) ] class TestPluginCopyInterfaceFiles(plugins.ArmiPlugin): @staticmethod @plugins.HOOKIMPL def defineSettings(): """Define settings for the plugin.""" return [ settings.setting.Setting( "multipleFilesSetting", default=[], label="multiple files", description="testing stuff", ) ] @staticmethod @plugins.HOOKIMPL def exposeInterfaces(cs): """A plugin is mostly just a vehicle to add Interfaces to an Application.""" return [ interfaces.InterfaceInfo( interfaces.STACK_ORDER.PREPROCESSING, MultiFilesInterfaces, {"enabled": True}, ) ] class TestCopyInterfaceInputs(unittest.TestCase): """Ensure file path is found and updated properly.""" def setUp(self): """ Manipulate the standard App. We can't just configure our own, since the pytest environment bleeds between tests. """ self._backupApp = copy.deepcopy(getApp()) def tearDown(self): """Restore the App to its original state.""" import armi armi._app = self._backupApp context.APP_NAME = "armi" def test_copyInputsHelper(self): """Test the helper function for copyInterfaceInputs.""" testSetting = CONF_SHUFFLE_LOGIC cs = settings.Settings(ARMI_RUN_PATH) shuffleFile = cs[testSetting] # test it passes sourceFullPath = os.path.join(TEST_ROOT, shuffleFile) # ensure we are not in TEST_ROOT with directoryChangers.TemporaryDirectoryChanger() as newDir: destFilePath = cases.case._copyInputsHelper( testSetting, sourcePath=sourceFullPath, destPath=newDir.destination, origFile=shuffleFile, ) newFilePath = os.path.join(newDir.destination, shuffleFile) self.assertTrue(os.path.exists(newFilePath)) self.assertEqual(destFilePath, os.path.basename(newFilePath)) # test with bad file path, should return original file ensure we are not in TEST_ROOT with directoryChangers.TemporaryDirectoryChanger() as newDir: destFilePath = cases.case._copyInputsHelper( testSetting, sourcePath=sourceFullPath, destPath="fakeDest", origFile=shuffleFile, ) self.assertFalse(os.path.exists(destFilePath)) self.assertEqual(destFilePath, shuffleFile) def test_copyInterfaceInputsSingleFile(self): testSetting = CONF_SHUFFLE_LOGIC cs = settings.Settings(ARMI_RUN_PATH) shuffleFile = cs[testSetting] # ensure we are not in TEST_ROOT with directoryChangers.TemporaryDirectoryChanger() as newDir: newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination) newFilePath = os.path.join(newDir.destination, shuffleFile) self.assertTrue(os.path.exists(newFilePath)) self.assertEqual(newSettings[testSetting], os.path.basename(newFilePath)) def test_copyInterfaceInputsNonFilePath(self): testSetting = CONF_SHUFFLE_LOGIC cs = settings.Settings(ARMI_RUN_PATH) fakeShuffle = "fakeFile.py" cs = cs.modified(newSettings={testSetting: fakeShuffle}) # ensure we are not in TEST_ROOT with directoryChangers.TemporaryDirectoryChanger() as newDir: newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination) self.assertFalse(os.path.exists(newSettings[testSetting])) self.assertEqual(newSettings[testSetting], fakeShuffle) def test_copyInterfaceInputs_emptyFilePath(self): testSetting = CONF_SHUFFLE_LOGIC cs = settings.Settings(ARMI_RUN_PATH) fakeShuffle = "" cs = cs.modified(newSettings={testSetting: fakeShuffle}) # ensure we are not in TEST_ROOT with directoryChangers.TemporaryDirectoryChanger() as newDir: newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination) with self.assertRaises(KeyError): # shouldn't process this setting as anything to worry about, so it won't be added to the dict _shuffleLogic = newSettings[testSetting] def test_failOnDuplicateSetting(self): """That that if a plugin attempts to add a duplicate setting, it raises an error.""" # register the new Plugin app = getApp() app.pluginManager.register(TestPluginWithDuplicateSetting) with self.assertRaises(ValueError): _ = settings.Settings(ARMI_RUN_PATH) def test_copyInterfaceInputsMultipleFiles(self): # register the new Plugin app = getApp() app.pluginManager.register(TestPluginCopyInterfaceFiles) pluginPath = "armi.cases.tests.test_cases.TestPluginCopyInterfaceFiles" settingFiles = [str(os.path.join(TESTING_ROOT, "resources", "COMPXS.ascii")), "ISOAA"] testName = "test_copyInterfaceInputs_multipleFiles" testSetting = "multipleFilesSetting" cs = settings.Settings(ARMI_RUN_PATH) cs = cs.modified( caseTitle=testName, newSettings={testName: [pluginPath]}, ) cs = cs.modified(newSettings={testSetting: settingFiles}) # ensure we are not in TEST_ROOT with directoryChangers.TemporaryDirectoryChanger() as newDir: newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination) newFilePaths = [os.path.join(newDir.destination, f) for f in settingFiles] for newFilePath in newFilePaths: self.assertTrue(os.path.exists(newFilePath)) self.assertEqual([str(s) for s in newSettings[testSetting]], [str(s) for s in settingFiles]) def test_copyInterfaceInputsOneFile(self): # register the new Plugin app = getApp() app.pluginManager.register(TestPluginCopyInterfaceFiles) pluginPath = "armi.cases.tests.test_cases.TestPluginCopyInterfaceFiles" settingFiles = [str(os.path.join(TESTING_ROOT, "resources", "COMPXS.ascii"))] testName = "test_copyInterfaceInputsOneFile" testSetting = "multipleFilesSetting" cs = settings.Settings(ARMI_RUN_PATH) cs = cs.modified( caseTitle=testName, newSettings={testName: [pluginPath]}, ) cs = cs.modified(newSettings={testSetting: settingFiles}) # ensure we are not in TEST_ROOT with directoryChangers.TemporaryDirectoryChanger() as newDir: newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination) newFilePaths = [os.path.join(newDir.destination, f) for f in settingFiles] for newFilePath in newFilePaths: self.assertTrue(os.path.exists(newFilePath)) self.assertEqual([str(s) for s in newSettings[testSetting]], [str(s) for s in settingFiles]) def test_copyInterfaceInputsWildcardFile(self): testSetting = CONF_SHUFFLE_LOGIC cs = settings.Settings(ARMI_RUN_PATH) # Use something that isn't the shuffle logic file in the case settings wcFile = "ISO*" cs = cs.modified(newSettings={testSetting: wcFile}) # ensure we are not in TEST_ROOT with directoryChangers.TemporaryDirectoryChanger() as newDir: newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination) newFilePath = [os.path.join(newDir.destination, "ISOAA")] self.assertTrue(os.path.exists(newFilePath[0])) self.assertEqual(newSettings[testSetting], [os.path.basename(newFilePath[0])]) # Check on a file that doesn't exist (so globFilePaths len is 0) wcFile = "fakeFile*" cs = cs.modified(newSettings={testSetting: wcFile}) with directoryChangers.TemporaryDirectoryChanger() as newDir: newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination) self.assertFalse(os.path.exists(newSettings[testSetting][0])) self.assertEqual(newSettings[testSetting], [wcFile]) def test_copyInterfaceInputsRelPath(self): testSetting = CONF_SHUFFLE_LOGIC cs = settings.Settings(ARMI_RUN_PATH) shuffleFile = cs[testSetting] relFile = "../tests/" + shuffleFile cs = cs.modified(newSettings={testSetting: relFile}) # ensure we are not in TEST_ROOT with directoryChangers.TemporaryDirectoryChanger() as newDir: newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination) newFilePath = os.path.join(newDir.destination, shuffleFile) self.assertTrue(os.path.exists(newFilePath)) self.assertEqual(newSettings[testSetting], os.path.basename(newFilePath)) def test_copyInterfaceInputsAbsPath(self): testSetting = CONF_SHUFFLE_LOGIC cs = settings.Settings(ARMI_RUN_PATH) shuffleFile = cs[testSetting] absFile = os.path.dirname(os.path.abspath(ARMI_RUN_PATH)) absFile = str(os.path.join(absFile, os.path.basename(shuffleFile))) cs = cs.modified(newSettings={testSetting: absFile}) with directoryChangers.TemporaryDirectoryChanger() as newDir: newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination) # file exists self.assertTrue(os.path.exists(newSettings[testSetting])) # but not copied to this dir self.assertFalse(os.path.exists(os.path.basename(newSettings[testSetting]))) self.assertEqual(str(newSettings[testSetting]), absFile) ================================================ FILE: armi/cases/tests/test_suiteBuilder.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the SuiteBuilder.""" import os import unittest from armi import cases, settings from armi.cases.inputModifiers.inputModifiers import InputModifier from armi.cases.suiteBuilder import FullFactorialSuiteBuilder, SeparateEffectsSuiteBuilder THIS_DIR = os.path.dirname(os.path.abspath(__file__)) AFCI_PATH = os.path.join(THIS_DIR, "..", "..", "testing", "reactors", "anl-afci-177", "anl-afci-177.yaml") class SettingModifier(InputModifier): def __init__(self, settingName, value): self.settingName = settingName self.value = value def __call__(self, cs, bp): cs = cs.modified(newSettings={self.settingName: self.value}) return cs, bp class TestFullFactorialSuiteBuilder(unittest.TestCase): """Class to test FullFactorialSuiteBuilder.""" def test_buildSuite(self): """Initialize a full factorial suite of cases. .. test:: A generic mechanism to allow users to modify user inputs in cases. :id: T_ARMI_CASE_MOD1 :tests: R_ARMI_CASE_MOD """ cs = settings.Settings(AFCI_PATH) case = cases.Case(cs) builder = FullFactorialSuiteBuilder(case) builder.addDegreeOfFreedom(SettingModifier("settingName1", value) for value in (1, 2)) builder.addDegreeOfFreedom(SettingModifier("settingName2", value) for value in (3, 4, 5)) self.assertEqual(builder.modifierSets[0][0].value, 1) self.assertEqual(builder.modifierSets[0][1].value, 3) self.assertEqual(builder.modifierSets[1][0].value, 2) self.assertEqual(builder.modifierSets[1][1].value, 3) self.assertEqual(builder.modifierSets[2][0].value, 1) self.assertEqual(builder.modifierSets[2][1].value, 4) self.assertEqual(builder.modifierSets[3][0].value, 2) self.assertEqual(builder.modifierSets[3][1].value, 4) self.assertEqual(builder.modifierSets[4][0].value, 1) self.assertEqual(builder.modifierSets[4][1].value, 5) self.assertEqual(builder.modifierSets[5][0].value, 2) self.assertEqual(builder.modifierSets[5][1].value, 5) self.assertEqual(len(builder.modifierSets), 6) class TestSeparateEffectsBuilder(unittest.TestCase): """Class to test separate effects builder.""" def test_buildSuite(self): """Initialize a full factorial suite of cases. .. test:: A generic mechanism to allow users to modify user inputs in cases. :id: T_ARMI_CASE_MOD2 :tests: R_ARMI_CASE_MOD """ cs = settings.Settings(AFCI_PATH) case = cases.Case(cs) builder = SeparateEffectsSuiteBuilder(case) builder.addDegreeOfFreedom(SettingModifier("settingName1", value) for value in (1, 2)) builder.addDegreeOfFreedom(SettingModifier("settingName2", value) for value in (3, 4, 5)) self.assertEqual(builder.modifierSets[0][0].value, 1) self.assertEqual(builder.modifierSets[0][0].settingName, "settingName1") self.assertEqual(builder.modifierSets[1][0].value, 2) self.assertEqual(builder.modifierSets[1][0].settingName, "settingName1") self.assertEqual(builder.modifierSets[2][0].value, 3) self.assertEqual(builder.modifierSets[2][0].settingName, "settingName2") self.assertEqual(builder.modifierSets[3][0].value, 4) self.assertEqual(builder.modifierSets[3][0].settingName, "settingName2") self.assertEqual(builder.modifierSets[4][0].value, 5) self.assertEqual(builder.modifierSets[4][0].settingName, "settingName2") self.assertEqual(len(builder.modifierSets), 5) ================================================ FILE: armi/cli/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This package provides various operations users can ask ARMI to do with their inputs. An Entry Point might run a simulation, migrate inputs, build a suite of related inputs and submit them in a parameter sweep, validate inputs, open the GUI, run a test suite, or other similar things. There are built-in entry points, and additional ones may be specified by custom plugins. The full :doc:`docs for entry points are here </developer/entrypoints>`. See Also -------- armi.cases : Individual collections of tasks that may run one or more entry points. These allow one entry point to create a sequence of events that may call one or more additional entry points. For example, the ``submitSuite`` entry point builds a case suite with many related cases that will all call the ``run`` entry point from a HPC cluster. armi.operators : Operations that ARMI will perform on a reactor model. These may be created by ``Case`` objects created by certain entry points (e.g. ``run``). armi : Fundamental entry point that calls this package. """ # importing each module causes the any EntryPoints defined in the module that # are decorated with @armi.command to be added to the collection of registered # classes import argparse import re import textwrap from typing import Optional from armi import context, meta, plugins, runLog class EntryPointsPlugin(plugins.ArmiPlugin): @staticmethod @plugins.HOOKIMPL def defineEntryPoints(): from armi.cli import ( checkInputs, # testing cleanTemps, clone, compareCases, gridGui, migrateInputs, modify, reportsEntryPoint, run, runSuite, ) entryPoints = [] entryPoints.append(checkInputs.CheckInputEntryPoint) entryPoints.append(checkInputs.ExpandBlueprints) entryPoints.append(clone.CloneArmiRunCommandBatch) entryPoints.append(clone.CloneArmiRunCommandInteractive) entryPoints.append(clone.CloneSuiteCommand) entryPoints.append(compareCases.CompareCases) entryPoints.append(compareCases.CompareSuites) entryPoints.append(migrateInputs.MigrateInputs) entryPoints.append(modify.ModifyCaseSettingsCommand) entryPoints.append(run.RunEntryPoint) entryPoints.append(runSuite.RunSuiteCommand) entryPoints.append(gridGui.GridGuiEntryPoint) # testing entryPoints.append(cleanTemps.CleanTemps) entryPoints.append(reportsEntryPoint.ReportsEntryPoint) return entryPoints class ArmiParser(argparse.ArgumentParser): """Subclass of default ArgumentParser to better handle application splash text.""" def print_help(self, file=None): splash() argparse.ArgumentParser.print_help(self, file) class ArmiCLI: """ ARMI CLI -- The main entry point into ARMI. There are various commands available. To get help for the individual commands, run again with `<command> --help`. Typically, the CLI implements functions that already exist within ARMI. .. impl:: The basic ARMI CLI, for running a simulation. :id: I_ARMI_CLI_CS :implements: R_ARMI_CLI_CS Provides a basic command-line interface (CLI) for running an ARMI simulation. Available commands can be listed with ``-l``. Information on individual commands can be obtained by running with ``<command> --help``. """ def __init__(self): from armi import getPluginManager self._entryPoints = dict() for pluginEntryPoints in getPluginManager().hook.defineEntryPoints(): for entryPoint in pluginEntryPoints: if entryPoint.name in self._entryPoints: raise KeyError( "Duplicate entry points defined for `{}`: {} and {}".format( entryPoint.name, self._entryPoints[entryPoint.name], entryPoint, ) ) self._entryPoints[entryPoint.name] = entryPoint parser = ArmiParser( prog=context.APP_NAME, description=self.__doc__.split(".. impl")[0], usage="%(prog)s [-h] [-l | command [args]]", ) group = parser.add_mutually_exclusive_group() group.add_argument("-v", "--version", action="store_true", help="display the version") group.add_argument("-l", "--list-commands", action="store_true", help="list commands") group.add_argument("command", nargs="?", default="help", help=argparse.SUPPRESS) parser.add_argument("args", nargs=argparse.REMAINDER, help=argparse.SUPPRESS) self.parser = parser @staticmethod def showVersion(): """Print the App name and version on the command line.""" from armi import getApp prog = context.APP_NAME app = getApp() if app is None or prog == "armi": print("{0} {1}".format(prog, meta.__version__)) else: print("{0} {1}".format(prog, app.version)) def listCommands(self): """List commands with a short description.""" splash() indent = 22 initial_indent = " " subsequent_indent = initial_indent + " " * indent wrapper = textwrap.TextWrapper(initial_indent=initial_indent, subsequent_indent=subsequent_indent, width=79) sub = re.compile(r"\s+").sub # given a string, condense white space into a single space condense = lambda s: sub(" ", s.strip()) commands = self._entryPoints.values() formatter = "{name:<{width}}{desc}".format print("\ncommands:") for cmd in sorted(commands, key=lambda cmd: cmd.name): """Each command can optionally define a class attribute `description` as documentation. If description is not defined (default=None since it should inherit from EntryPoint), then the docstring is used. If the docstring is also None, then fall back to an empty string.""" desc = condense(cmd.description or cmd.__doc__ or "") print(wrapper.fill(formatter(width=indent, name=cmd.name, desc=desc))) def run(self) -> Optional[int]: args = self.parser.parse_args() if args.list_commands: self.listCommands() return 0 elif args.version: ArmiCLI.showVersion() return 0 elif args.command == "help": self.parser.print_help() return 0 return self.executeCommand(args.command, args.args) def executeCommand(self, command, args) -> Optional[int]: """Execute `command` with arguments `args`, return optional exit code.""" command = command.lower() if command not in self._entryPoints: print('Unrecognized command "{}". Valid commands are listed below.'.format(command)) self.listCommands() return 1 commandClass = self._entryPoints[command] cmd = commandClass() if cmd.splash: splash() # parse the arguments... command can have their own cmd.parse(args) if cmd.args.batch: context.Mode.setMode(context.Mode.BATCH) elif cmd.mode is not None: context.Mode.setMode(cmd.mode) # do whatever there is to be done! return cmd.invoke() def splash(): """Emit a the active App's splash text to the runLog for the primary node.""" from armi import getApp app = getApp() assert app is not None if context.MPI_RANK == 0: runLog.raw(app.splashText) ================================================ FILE: armi/cli/checkInputs.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Entry point into ARMI to check inputs of a case or a whole folder of cases.""" import pathlib import sys from armi import runLog from armi.cli.entryPoint import EntryPoint from armi.utils.textProcessors import resolveMarkupInclusions class ExpandBlueprints(EntryPoint): """ Perform expansion of !include directives in a blueprint file. This is useful for testing inputs that make heavy use of !include directives. """ name = "expand-bp" splash = False def addOptions(self): self.parser.add_argument("blueprints", type=str, help="Path to root blueprints file") def invoke(self): p = pathlib.Path(self.args.blueprints) if not p.exists(): runLog.error("Blueprints file `{}` does not exist".format(str(p))) return 1 stream = resolveMarkupInclusions(p) sys.stdout.write(stream.read()) return None class CheckInputEntryPoint(EntryPoint): """ Check ARMI inputs for errors, inconsistencies, and the ability to initialize a reactor. Also has functionality to generate a summary report of the input design. This can be run on multiple cases and creates a table detailing the results of the input check. """ name = "check-input" def addOptions(self): self.parser.add_argument( "--recursive", "-r", action="store_true", default=False, help="Recursively check directory structure for valid settings files", ) self.parser.add_argument( "--skip-checks", "-C", action="store_true", default=False, help="Skip checking inputs (might be useful if you only want to generate a report).", ) self.parser.add_argument( "patterns", type=str, nargs="*", default=["*.yaml"], help="File names or patterns", ) def invoke(self): from armi import cases from armi.utils import tabulate suite = cases.CaseSuite(self.cs) suite.discover(patterns=self.args.patterns, recursive=self.args.recursive) table = [] # tuples (case, hasIssues, hasErrors) for case in suite: hasIssues = "UNKNOWN" if not self.args.skip_checks: hasIssues = "PASSED" if case.checkInputs() else "HAS ISSUES" canStart = "UNKNOWN" table.append((case.cs.path, case.title, canStart, hasIssues)) runLog.important( tabulate.tabulate( table, headers=["case", "can start", "input is self consistent"], tableFmt="armi", ) ) if any(t[3] == "HAS ISSUES" for t in table): runLog.error("The case is not self consistent") if any(t[2] == "FAILED" for t in table): runLog.error("The case can not start") ================================================ FILE: armi/cli/cleanTemps.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from armi import context from armi.cli.entryPoint import EntryPoint class CleanTemps(EntryPoint): """ Delete all temp directories created by any ARMI run. Useful for occasionally cleaning temporary dirs from crashed runs. .. warning:: This will break any ongoing runs. """ name = "clean-temps" def invoke(self): context.cleanFastPathAfterSimulation() ================================================ FILE: armi/cli/clone.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from armi.cli.entryPoint import EntryPoint class CloneArmiRunCommandBatch(EntryPoint): """ Clone existing ARMI settings input, and associated files, to the current directory and modify it according to the supplied settings (on the command line). """ name = "clone-batch" settingsArgument = "required" def addOptions(self): self.parser.add_argument( "--additional-files", nargs="*", default=[], help="Additional files from the source directory to copy into the target directory", ) self.parser.add_argument( "--settingsWriteStyle", type=str, default="short", help="Writing style for which settings get written back to the settings files.", choices=["short", "medium", "full"], ) # somehow running `armi clone-batch -h` on the command line requires this to # not be first? for settingName in self.cs.keys(): self.createOptionFromSetting(settingName, suppressHelp=True) def invoke(self): # get the case title. from armi import cases inputCase = cases.Case(cs=self.cs) inputCase.clone( additionalFiles=self.args.additional_files, writeStyle=self.args.settingsWriteStyle, ) class CloneArmiRunCommandInteractive(CloneArmiRunCommandBatch): """ Interactively clone existing ARMI settings input, and associated files, to the current directory and modify it according to the supplied settings (on the command line). """ name = "clone" settingsArgument = "required" class CloneSuiteCommand(EntryPoint): """Clone existing ARMI cases as a new suite.""" name = "clone-suite" def addOptions(self): for settingName in self.cs.environmentSettings: self.createOptionFromSetting(settingName) self.parser.add_argument( "--directory", "-d", type=str, default=os.getcwd(), help="Root directory to search for cases", ) self.parser.add_argument( "patterns", nargs="*", type=str, default=["*.yaml"], help="Pattern to use while searching for ARMI settings files.", ) self.parser.add_argument( "--ignore", "-i", nargs="+", type=str, default=[], help="Pattern to search for inputs to ignore.", ) self.parser.add_argument( "--list", "-l", action="store_true", default=False, help="Just list the settings files found, don't actually submit them.", ) self.parser.add_argument( "--settingsWriteStyle", type=str, default="short", help="Writing style for which settings get written back to the settings files.", choices=["short", "medium", "full"], ) def invoke(self): from armi import cases suite = cases.CaseSuite(self.cs) suite.discover( patterns=self.args.patterns, rootDir=self.args.directory, ignorePatterns=self.args.ignore, ) suite.clone(oldRoot=self.args.directory, writeStyle=self.args.settingsWriteStyle) ================================================ FILE: armi/cli/compareCases.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from armi import runLog from armi.cli.entryPoint import EntryPoint # Params that are well-known to vary from run to run. In the future we should probably # derive this from a parameter category so that it is extensible DEFAULT_EXCLUSIONS = ( "^.*/minutesSinceStart$", "^.*/maxProcessMemoryInMB$", "^.*/minProcessMemoryInMB$", ) # Parameters that under normal circumstances would be the same, but may not be # faithfully represented by an old database format. CONVERTED_EXCLUSIONS = DEFAULT_EXCLUSIONS + ( "^.*/serialNum$", "^.*/temperatureInC$", "^.*/volume$", "^.*/layout/temperatures$", ) class CompareCases(EntryPoint): """Compare the databases from two ARMI cases.""" name = "compare" def _addComparisonOptions(self): parser = self.parser parser.add_argument( "--tolerance", default=0.01, action="store", type=float, help=( "If a test database entry differs by more than this percent " "from the reference database, then it will be marked " "as a difference between the two databases." ), ) parser.add_argument( "--weights", nargs="*", action="store", help="Period separated key/value pairs for database table weights", ) parser.add_argument( "--exclude", default=CONVERTED_EXCLUSIONS, action="store", nargs="+", help=("Patterns for parameters to ignore in comparisons"), ) parser.add_argument( "--timestepCompare", default=None, action="store", nargs="+", help=( "List of timesteps to compare. Note that any timestep not listed will " "not be compared. Format the cycle and node separated by a period. E.g. " "0.0 0.1 1.2 3.3 will compare c0n0, c0n1, c1n2, c3n3 and skip all others" ), ) def addOptions(self): self._addComparisonOptions() parser = self.parser parser.add_argument( "refDB", type=str, help="The database to be used as the reference, baseline case.", ) parser.add_argument( "cmpDB", type=str, help="The database to be used as the comparison, evaluated case.", ) parser.add_argument("--output", "-o", type=str, default="", help="Output file name.") def parse(self, args): EntryPoint.parse(self, args) if self.args.timestepCompare: self.args.timestepCompare = list(tuple(map(int, step.split("."))) for step in self.args.timestepCompare) if self.args.weights: self.args.weights = dict(w.split(".") for w in self.args.weights) def invoke(self): from armi.bookkeeping.db import compareDatabases diffs = compareDatabases( self.args.refDB, self.args.cmpDB, tolerance=self.args.tolerance, exclusions=self.args.exclude, timestepCompare=self.args.timestepCompare, ) return diffs.nDiffs() class CompareSuites(CompareCases): """Do a case-by-case comparison between two CaseSuites.""" name = "compare-suites" def addOptions(self): self._addComparisonOptions() self.parser.add_argument( "reference", type=str, help="The root directory of the reference, or baseline, suite.", ) self.parser.add_argument( "comparison", type=str, help="The root directory of the comparison, or evaluated, suite.", ) self.parser.add_argument( "--patterns", "-p", nargs="*", type=str, default=["*.yaml"], help="Pattern to use while searching for ARMI settings files.", ) self.parser.add_argument( "--additional_comparisons", nargs="*", type=str, default=[], help="Pattern tests that were not run but should appear in table.", ) self.parser.add_argument( "--ignore", "-i", nargs="*", type=str, default=[], help="Pattern to search for inputs to ignore.", ) self.parser.add_argument( "--skip-inspection", "-I", action="store_true", default=False, help="Skip inspection. By default, setting files are checked for integrity and consistency. These " "checks result in needing to manually resolve a number of differences. Using this option will " "suppress the inspection step.", ) def invoke(self): from armi import cases if not os.path.exists(self.args.reference): runLog.error("Could not find reference directory {}".format(self.args.reference)) sys.exit(1) if not os.path.exists(self.args.comparison): runLog.error("Could not find comparison directory {}".format(self.args.comparison)) sys.exit(1) refSuite = cases.CaseSuite(self.cs) # contains all tests that user had access to allTests = [] for pat in self.args.patterns + self.args.additional_comparisons: allTests.append(pat) refSuite.discover( rootDir=self.args.reference, patterns=allTests, ignorePatterns=self.args.ignore, skipInspection=self.args.skip_inspection, ) cmpSuite = cases.CaseSuite(self.cs) cmpSuite.discover( rootDir=self.args.comparison, patterns=self.args.patterns, ignorePatterns=self.args.ignore, skipInspection=self.args.skip_inspection, ) nIssues = refSuite.compare( cmpSuite, weights=self.args.weights, tolerance=self.args.tolerance, exclusion=self.args.exclude, timestepCompare=self.args.timestepCompare, ) if nIssues > 0: sys.exit(1) ================================================ FILE: armi/cli/database.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Entry point into ARMI for manipulating output databases.""" import os import pathlib from armi import context, runLog from armi.cli.entryPoint import EntryPoint from armi.utils.textProcessors import resolveMarkupInclusions class ExtractInputs(EntryPoint): """ Recover input files from a database file. This can come in handy when input files need to be hand-migrated to facilitate loading or migration of the database file itself, or when attempting to re-run a slightly-modified version of a case. """ name = "extract-inputs" mode = context.Mode.BATCH def addOptions(self): self.parser.add_argument("h5db", help="Path to input database", type=str) self.parser.add_argument( "--output-base", "-o", help="Base name for extracted inputs. If not provided, base name is implied from the database name.", type=str, default=None, ) def parse_args(self, args): EntryPoint.parse_args(self, args) if self.args.output_base is None: self.args.output_base = os.path.splitext(self.args.h5db)[0] def invoke(self): from armi.bookkeeping.db.database import Database db = Database(self.args.h5db, "r") with db: settings, bp = db.readInputsFromDB() settingsPath = self.args.output_base + "_settings.yaml" bpPath = self.args.output_base + "_blueprints.yaml" bail = False for path in [settingsPath, bpPath]: if os.path.exists(settingsPath): runLog.error("`{}` already exists. Aborting.".format(path)) bail = True if bail: return for path, data, inp in [ (settingsPath, settings, "settings"), (bpPath, bp, "blueprints"), ]: if path is None: continue runLog.info("Writing {} to `{}`".format(inp, path)) if isinstance(data, bytes): data = data.decode() with open(path, "w") as f: f.write(data) class InjectInputs(EntryPoint): """ Insert new inputs into a database file, overwriting any existing inputs. This is useful for performing hand migrations of inputs to facilitate database migrations. """ name = "inject-inputs" mode = context.Mode.BATCH def addOptions(self): self.parser.add_argument("h5db", help="Path to affected database", type=str) self.parser.add_argument("--blueprints", help="Path to blueprints file", type=str, default=None) self.parser.add_argument("--settings", help="Path to settings file", type=str, default=None) def invoke(self): from armi.bookkeeping.db.database import Database if all(li is None for li in [self.args.blueprints, self.args.settings]): runLog.error("No settings, blueprints, or geometry files specified; nothing to do.") return bp = None settings = None if self.args.blueprints is not None: bp = resolveMarkupInclusions(pathlib.Path(self.args.blueprints)).read() if self.args.settings is not None: settings = resolveMarkupInclusions(pathlib.Path(self.args.settings)).read() db = Database(self.args.h5db, "a") with db: # Not calling writeInputsToDb, since it makes too many assumptions about where the # inputs are coming from, and which ones we want to write. Instead, we assume that we # know where to store them, and do it ourselves. for data, key in [ (bp, "blueprints"), (settings, "settings"), ]: if data is not None: dSetName = "inputs/" + key if dSetName in db.h5db: del db.h5db[dSetName] db.h5db[dSetName] = data ================================================ FILE: armi/cli/entryPoint.py ================================================ """ EntryPoint base classes. See :doc:`/developer/entrypoints`. """ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from typing import Optional, Union from armi import context, runLog, settings class _EntryPointEnforcer(type): """ Simple metaclass used for the EntryPoint abstract base class to enforce class attributes. """ def __new__(mcs, name, bases, attrs): if "name" not in attrs: raise AttributeError("Subclasses of EntryPoint must define a `name` class attribute.") # basic input validation. Will throw a KeyError if argument is incorrect clsSettings = {"optional": "optional", "required": "required", None: None}[attrs.get("settingsArgument", None)] attrs["settingsArgument"] = clsSettings return type.__new__(mcs, name, bases, attrs) class EntryPoint(metaclass=_EntryPointEnforcer): """ Generic command line entry point. A valid subclass must provide at least a ``name`` class attribute, and may also specify the other class attributes described below. .. impl:: Generic CLI base class for developers to use. :id: I_ARMI_CLI_GEN :implements: R_ARMI_CLI_GEN Provides a base class for plugin developers to use in creating application-specific CLIs. Valid subclasses must at least provide a ``name`` class attribute. Optional class attributes that a subclass may provide include ``description``, a string describing the command's actions, ``splash``, a boolean specifying whether to display a splash screen upon execution, and ``settingsArgument``. If ``settingsArgument`` is specified as ``required``, then a settings files is a required positional argument. If ``settingsArgument`` is set to ``optional``, then a settings file is an optional positional argument. If None is specified for the ``settingsArgument``, then no settings file argument is added. """ #: The <command-name> that is used to call the command from the command line name: Optional[str] = None description: Optional[str] = None """A string summarizing the command's actions. This is summary that is printed when you run `python -m armi --list-commands` or `python -m armi <command-name> --help`. If not provided, the docstring of the decorated class will be used instead. In general, the docstring is probably sufficient but this argument allows you to provide a short description of the command while retaining a long and detailed docstring.""" settingsArgument: Union[str, None] = None """ One of {'optional', 'required', None}, or unspecified. Specifies whether a settings file argument is to be added to the command's argument parser. If settingsArgument == 'required', then a settings file is a required positional argument. If settingsArgument == 'optional', then it is an optional positional argument. Finally, if settingsArgument is None, then no settings file argument is added.""" splash = True """ Whether running the entry point should produce a splash text upon executing. Setting this to ``False`` is useful for utility commands that produce standard output that would be needlessly cluttered by the splash text. """ #: One of {armi.Mode.BATCH, armi.Mode.INTERACTIVE, armi.Mode.GUI}, optional. #: Specifies the ARMI mode in which the command is run. Default is armi.Mode.BATCH. mode: Optional[int] = None def __init__(self): if self.name is None: raise AttributeError("Subclasses of EntryPoint must define a `name` class attribute") self.cs = self._initSettings() self.parser = argparse.ArgumentParser( prog="{} {}".format(context.APP_NAME, self.name), description=self.description or self.__doc__, ) if self.settingsArgument is not None: if self.settingsArgument not in ["required", "optional"]: raise AttributeError( "Subclasses of EntryPoint must specify if the a case settings file is `required` or `optional`" ) if self.settingsArgument == "optional": self.parser.add_argument( "settings_file", nargs="?", action=loadSettings(self.cs), help="path to the settings file to load.", ) elif self.settingsArgument == "required": self.parser.add_argument( "settings_file", action=loadSettings(self.cs), help="path to the settings file to load.", ) # optional arguments self.parser.add_argument( "--caseTitle", type=str, nargs=None, action=setCaseTitle(self.cs), help="update the case title of the run.", ) self.parser.add_argument( "--batch", action="store_true", default=False, help="Run in batch mode even on TTY, silencing all queries.", ) self.createOptionFromSetting("verbosity", "-v") self.createOptionFromSetting("branchVerbosity", "-V") self.args = argparse.Namespace() self.settingsProvidedOnCommandLine = [] @staticmethod def _initSettings(): """ Initialize settings for this entry point. Settings given on command line will update this data structure. Override to provide specific settings in the entry point. """ return settings.Settings() def addOptions(self): """ Add additional command line options. Values of options added to ``self.parser`` will be available on ``self.args``. Values added with ``createOptionFromSetting`` will override the setting values in the settings input file. See Also -------- createOptionFromSetting : A method often called from here to creat CLI options from application settings. argparse.ArgumentParser.add_argument : Often called from here using ``self.parser.add_argument`` to add custom argparse arguments. """ def parse_args(self, args): self.parser.parse_args(args, namespace=self.args) runLog.setVerbosity(self.cs["verbosity"]) def parse(self, args): """Parse the command line arguments, with the command specific arguments.""" self.addOptions() self.parse_args(args) def invoke(self) -> Optional[int]: """ Body of the entry point. This is an abstract method, and must must be overridden in sub-classes. Returns ------- exitcode : int or None Implementations should return an exit code, or ``None``, which is interpreted the same as zero (successful completion). """ raise NotImplementedError("Subclasses of EntryPoint must override the .invoke() method") def createOptionFromSetting(self, settingName: str, additionalAlias: str = None, suppressHelp: bool = False): """ Create a CLI option from an ARMI setting. This will override whatever is in the settings file. Parameters ---------- settingName : str the setting name additionalAlises : str additional alias for the command line option, be careful and make sure they are all distinct! supressHelp : bool option to suppress the help message when using the command line :code:`--help` function. This is particularly beneficial when many options are being added as they can clutter the :code:`--help` to be almost unusable. """ settingsInstance = self.cs.getSetting(settingName) if settings.isBoolSetting(settingsInstance): helpMessage = argparse.SUPPRESS if suppressHelp else settingsInstance.description self._createToggleFromSetting(settingName, helpMessage, additionalAlias) else: choices = None if suppressHelp: helpMessage = argparse.SUPPRESS else: helpMessage = settingsInstance.description.replace("%", "%%") aliases = ["--" + settingName] if additionalAlias is not None: aliases.append(additionalAlias) isListType = settingsInstance.underlyingType is list try: self.parser.add_argument( *aliases, type=str, # types are properly converted by _SetSettingAction nargs="*" if isListType else None, action=setSetting(self), default=settingsInstance.default, choices=choices, help=helpMessage, ) # Capture an argument error here to prevent errors when duplicate options are attempting # to be added. This may also be captured by exploring the parser's `_actions` list as well # but this avoid accessing a private attribute. except argparse.ArgumentError: pass def _createToggleFromSetting(self, settingName, helpMessage, additionalAlias=None): aliases = ["--" + settingName] if additionalAlias is not None: aliases.append(additionalAlias) group = self.parser.add_mutually_exclusive_group() group.add_argument(*aliases, action=storeBool(True, self), help=helpMessage) # not really sure what to do about the help message here. Don't # want to suppress it since it won't show up at all, but can't # exactly "negate" the text automatically. Ideas? if helpMessage is not argparse.SUPPRESS: helpMessage = "" group.add_argument( "--no-" + settingName, action=storeBool(False, self), dest=settingName, help=helpMessage, ) # ^^ overwrites settingName with False def storeBool(boolDefault, ep): class _StoreBoolAction(argparse.Action): def __init__(self, option_strings, dest, help=None): super(_StoreBoolAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, const=boolDefault, default=False, required=False, help=help, ) def __call__(self, parser, namespace, values, option_string=None): ep.cs[self.dest] = self.const ep.settingsProvidedOnCommandLine.append(self.dest) ep.cs.failOnLoad() return _StoreBoolAction def setSetting(ep): class _SetSettingAction(argparse.Action): """This class loads the command line supplied setting values into the :py:data:`armi.settings.cs`. """ def __call__(self, parser, namespace, values, option_string=None): # correctly converts type ep.cs[self.dest] = values ep.settingsProvidedOnCommandLine.append(self.dest) ep.cs.failOnLoad() return _SetSettingAction # Q: Why does this require special treatment? Why not treat it like the other # case settings and use setSetting action? # A: Because caseTitle is no longer an actual cs setting. It's a instance attr. def setCaseTitle(cs): class _SetCaseTitleAction(argparse.Action): """This class sets the case title to the supplied value of the :py:data:`armi.settings.cs`. """ def __call__(self, parser, namespace, value, option_string=None): cs.caseTitle = value return _SetCaseTitleAction # Careful, this is used by physicalProgramming def loadSettings(cs): class LoadSettingsAction(argparse.Action): """This class loads the command line supplied settings file into the :py:data:`armi.settings.cs`. """ def __call__(self, parser, namespace, values, option_string=None): # since this is a positional argument, it can be called with values is # None (i.e. default) if values is not None: cs.loadFromInputFile(values) return LoadSettingsAction ================================================ FILE: armi/cli/gridGui.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Grid editor GUI entry point. CLI entry point to spin up the GridEditor GUI. """ from armi.cli import entryPoint class GridGuiEntryPoint(entryPoint.EntryPoint): """Load the grid editor GUI.""" name = "grids" def addOptions(self): self.parser.add_argument( "blueprints", nargs="?", type=str, default=None, help="Optional path to a blueprint file to open", ) def invoke(self): # Import late since wxpython is kind of big and only needed when actually # invoking the entry point try: import wx from armi.utils import gridEditor except ImportError: raise RuntimeError( "wxPython is not installed in this " "environment, but is required for the Grid GUI. wxPython is not " "installed during the default ARMI installation process. Refer to " "installation instructions to install extras like wxPython." ) app = wx.App() frame = wx.Frame(None, wx.ID_ANY, title="Grid Editor", size=(1000, 1000)) gui = gridEditor.GridBlueprintControl(frame) frame.Show() if self.args.blueprints is not None: gui.loadFile(self.args.blueprints) app.MainLoop() ================================================ FILE: armi/cli/migrateInputs.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Entry point into ARMI to migrate inputs to the latest version of ARMI.""" import os from armi.cli.entryPoint import EntryPoint from armi.migration import ACTIVE_MIGRATIONS, base from armi.utils import directoryChangers class MigrateInputs(EntryPoint): """Migrate ARMI Inputs and/or outputs to Latest ARMI Code Base.""" name = "migrate-inputs" def addOptions(self): self.parser.add_argument( "--settings-path", "--cs", help="Migrate a case settings file to be compatible with the latest ARMI code base", type=str, ) self.parser.add_argument( "--database-path", "--db", help="Migrate a database file to be compatible with the latest ARMI code base", type=str, ) def invoke(self): """Run the entry point.""" if self.args.settings_path: path, _fname = os.path.split(self.args.settings_path) with directoryChangers.DirectoryChanger(path, dumpOnException=False): self._migrate(self.args.settings_path, self.args.database_path) else: self._migrate(self.args.settings_path, self.args.database_path) @staticmethod def _migrate(settingsPath, dbPath): """ Run all migrations. Notes ----- Some migrations change the paths so we update them one by one. """ for migrationI in ACTIVE_MIGRATIONS: if issubclass(migrationI, (base.SettingsMigration, base.BlueprintsMigration)) and settingsPath: mig = migrationI(path=settingsPath) mig.apply() if issubclass(migrationI, base.SettingsMigration): # don't update on blueprints migration paths, that's not settings! settingsPath = mig.path elif issubclass(migrationI, base.DatabaseMigration) and dbPath: mig = migrationI(path=dbPath) mig.apply() dbPath = mig.path ================================================ FILE: armi/cli/modify.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Search through a directory tree and modify ARMI settings in existing input file(s). All valid settings may be used as keyword arguments. """ from armi import operators, runLog, settings from armi.cli.entryPoint import EntryPoint class ModifyCaseSettingsCommand(EntryPoint): """ Search through a directory tree and modify ARMI settings in existing input file(s). All valid settings may be used as keyword arguments. Run the entry point like this:: $ python -m armi modify --nTasks=3 *.yaml """ name = "modify" def addOptions(self): self.parser.add_argument( "--list-setting-files", "-l", action="store_true", help=("Just list the settings files found and the proposed changes to make. Don't actually modify them."), ) self.parser.add_argument( "--skip-inspection", "-I", action="store_true", default=False, help="Skip inspection. By default, setting files are checked for integrity and consistency. These " "checks result in needing to manually resolve a number of differences. Using this option will " "suppress the inspection step.", ) self.parser.add_argument( "--rootDir", type=str, default=".", help="A root directory in which to search for settings files, e.g., armi/tests.", ) self.parser.add_argument( "--settingsWriteStyle", type=str, default="short", help="Writing style for which settings get written back to the settings files.", choices=["short", "medium", "full"], ) self.parser.add_argument( "patterns", type=str, nargs="*", default=["*.yaml"], help="Pattern(s) to use to find match file names (e.g. *.yaml)", ) for settingName in self.cs.keys(): self.createOptionFromSetting(settingName, suppressHelp=True) def invoke(self): csInstances = settings.recursivelyLoadSettingsFiles(self.args.rootDir, self.args.patterns) messages = ("found", "listing") if self.args.list_setting_files else ("writing", "modifying") for cs in csInstances: runLog.important("{} settings file {}".format(messages[0], cs.path)) for settingName in self.settingsProvidedOnCommandLine: if cs[settingName] != self.cs[settingName]: runLog.info( " changing `{}` from : {}\n {} to -> {}".format( settingName, cs[settingName], " " * (2 + len(settingName)), self.cs[settingName], ) ) cs[settingName] = self.cs[settingName] # if we are only listing setting files, don't write them; it is OK that we modified them in memory if not self.args.skip_inspection: inspector = operators.getOperatorClassFromSettings(cs).inspector(cs) inspector.run() if not self.args.list_setting_files: cs.writeToYamlFile(cs.path, style=self.args.settingsWriteStyle) runLog.important("Finished {} {} settings files.".format(messages[1], len(csInstances))) ================================================ FILE: armi/cli/reportsEntryPoint.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from armi.cli import entryPoint class ReportsEntryPoint(entryPoint.EntryPoint): """ Placeholder for an ARMI reports entry point. Subclass this if you want to parse the ARMI DB or Reactor data model to build your reports. """ name = "report" settingsArgument = "optional" def __init__(self): entryPoint.EntryPoint.__init__(self) def invoke(self): pass ================================================ FILE: armi/cli/run.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run an ARMI case.""" from armi.cli.entryPoint import EntryPoint class RunEntryPoint(EntryPoint): """Run an ARMI case.""" name = "run" settingsArgument = "required" def invoke(self): from armi import cases inputCase = cases.Case(cs=self.cs) inputCase.run() ================================================ FILE: armi/cli/runSuite.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run multiple ARMI cases one after the other on the local machine.""" import os from armi import cases from armi.cli.run import RunEntryPoint from armi.utils import directoryChangers class RunSuiteCommand(RunEntryPoint): """ Recursively run all the cases in a suite one after the other on the local machine. Invoke with ``mpirun`` or ``mpiexec`` to activate parallelism within each individual case. """ name = "run-suite" def addOptions(self): RunEntryPoint.addOptions(self) self.parser.add_argument( "patterns", nargs="*", type=str, default=["*.yaml"], help="Pattern to use while searching for ARMI settings files.", ) self.parser.add_argument( "--ignore", "-i", nargs="+", type=str, default=[], help="Pattern to search for inputs to ignore.", ) self.parser.add_argument( "--list", "-l", action="store_true", default=False, help="Just list the settings files found, don't actually run them.", ) self.parser.add_argument( "--suiteDir", type=str, default=os.getcwd(), help=("The path containing the case suite to run. Default current working directory."), ) def invoke(self): with directoryChangers.DirectoryChanger(self.args.suiteDir, dumpOnException=False): suite = cases.CaseSuite(self.cs) suite.discover(patterns=self.args.patterns, ignorePatterns=self.args.ignore) if self.args.list: suite.echoConfiguration() else: suite.run() ================================================ FILE: armi/cli/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/cli/tests/test_runEntryPoint.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for run cli entry point.""" import logging import os import sys import unittest from shutil import copyfile from armi import runLog from armi.__main__ import main from armi.bookkeeping.db.databaseInterface import DatabaseInterface from armi.bookkeeping.visualization.entryPoint import VisFileEntryPoint from armi.cli.checkInputs import CheckInputEntryPoint, ExpandBlueprints from armi.cli.clone import CloneArmiRunCommandBatch, CloneSuiteCommand from armi.cli.compareCases import CompareCases, CompareSuites from armi.cli.database import ExtractInputs, InjectInputs from armi.cli.entryPoint import EntryPoint from armi.cli.migrateInputs import MigrateInputs from armi.cli.modify import ModifyCaseSettingsCommand from armi.cli.reportsEntryPoint import ReportsEntryPoint from armi.cli.run import RunEntryPoint from armi.cli.runSuite import RunSuiteCommand from armi.physics.neutronics.diffIsotxs import CompareIsotxsLibraries from armi.testing import loadTestReactor from armi.tests import ARMI_RUN_PATH, TEST_ROOT, mockRunLogs from armi.utils.directoryChangers import TemporaryDirectoryChanger from armi.utils.dynamicImporter import getEntireFamilyTree def buildTestDB(fileName, numNodes=1, numCycles=1): """This function builds a (super) simple test DB. Notes ----- This needs to be run inside a temp directory. Parameters ---------- fileName : str The file name (not path) we want for the ARMI test DB. numNodes : int, optional The number of nodes we want in the DB, default 1. numCycles : int, optional The number of cycles we want in the DB, default 1. Returns ------- str Database file name. """ o, r = loadTestReactor( TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml", ) # create the tests DB dbi = DatabaseInterface(r, o.cs) dbi.initDB(fName=f"{fileName}.h5") db = dbi.database # populate the db with something r.p.cycle = 0 for node in range(abs(numNodes)): for cycle in range(abs(numCycles)): r.p.timeNode = node r.p.cycle = cycle r.p.cycleLength = 100 db.writeToDB(r) db.close() return f"{fileName}.h5" class TestInitializationEntryPoints(unittest.TestCase): def test_entryPointInitialization(self): """Tests the initialization of all subclasses of `EntryPoint`. .. test:: Test initialization of many basic CLIs. :id: T_ARMI_CLI_GEN0 :tests: R_ARMI_CLI_GEN """ entryPoints = getEntireFamilyTree(EntryPoint) # Comparing to a minimum number of entry points, in case more are added. self.assertGreater(len(entryPoints), 15) for e in entryPoints: entryPoint = e() entryPoint.addOptions() settingsArg = None if entryPoint.settingsArgument is not None: for a in entryPoint.parser._actions: if "settings_file" in a.dest: settingsArg = a break self.assertIsNotNone( settingsArg, msg=( f"A settings file argument was expected for {entryPoint}, " "but does not exist. This is a error in the EntryPoint " "implementation." ), ) class TestCheckInputEntryPoint(unittest.TestCase): def test_checkInputEntryPointBasics(self): ci = CheckInputEntryPoint() ci.addOptions() ci.parse_args(["/path/to/fake.yaml", "-C"]) self.assertEqual(ci.name, "check-input") self.assertEqual(ci.args.patterns, ["/path/to/fake.yaml"]) self.assertEqual(ci.args.skip_checks, True) def test_checkInputEntryPointInvoke(self): """Test the "check inputs" entry point. .. test:: A working CLI child class, to validate inputs. :id: T_ARMI_CLI_GEN1 :tests: R_ARMI_CLI_GEN """ ci = CheckInputEntryPoint() ci.addOptions() ci.parse_args([ARMI_RUN_PATH]) with mockRunLogs.BufferLog() as mock: runLog.LOG.startLog("test_checkInputEntryPointInvoke") runLog.LOG.setVerbosity(logging.INFO) self.assertEqual("", mock.getStdout()) ci.invoke() self.assertIn(ARMI_RUN_PATH, mock.getStdout()) self.assertIn("input is self consistent", mock.getStdout()) class TestCloneArmiRunCommandBatch(unittest.TestCase): def test_cloneArmiRunCommandBatchBasics(self): ca = CloneArmiRunCommandBatch() ca.addOptions() ca.parse_args( [ ARMI_RUN_PATH, "--additional-files", "test", "--settingsWriteStyle", "full", ] ) self.assertEqual(ca.name, "clone-batch") self.assertEqual(ca.settingsArgument, "required") self.assertEqual(ca.args.additional_files, ["test"]) self.assertEqual(ca.args.settingsWriteStyle, "full") def test_cloneArmiRunCommandBatchInvokeShort(self): # Test short write style ca = CloneArmiRunCommandBatch() ca.addOptions() ca.parse_args([ARMI_RUN_PATH]) with TemporaryDirectoryChanger(): ca.invoke() self.assertEqual(ca.settingsArgument, "required") self.assertEqual(ca.args.settingsWriteStyle, "short") clonedYaml = "armiRun.yaml" self.assertTrue(os.path.exists(clonedYaml)) # validate a setting that has a default value was removed txt = open(clonedYaml, "r").read() self.assertNotIn("availabilityFactor", txt) def test_cloneArmiRunCommandBatchInvokeMedium(self): """Test the "clone armi run" batch entry point, on medium detail. .. test:: A working CLI child class, to clone a run. :id: T_ARMI_CLI_GEN2 :tests: R_ARMI_CLI_GEN """ # Test medium write style ca = CloneArmiRunCommandBatch() ca.addOptions() ca.parse_args([ARMI_RUN_PATH, "--settingsWriteStyle", "medium"]) with TemporaryDirectoryChanger(): ca.invoke() self.assertEqual(ca.settingsArgument, "required") self.assertEqual(ca.args.settingsWriteStyle, "medium") clonedYaml = "armiRun.yaml" self.assertTrue(os.path.exists(clonedYaml)) # validate a setting that has a default value is still there txt = open(clonedYaml, "r").read() self.assertIn("availabilityFactor", txt) class TestCloneSuiteCommand(unittest.TestCase): def test_cloneSuiteCommandBasics(self): cs = CloneSuiteCommand() cs.addOptions() cs.parse_args(["-d", "test", "--settingsWriteStyle", "medium"]) self.assertEqual(cs.name, "clone-suite") self.assertEqual(cs.args.directory, "test") self.assertEqual(cs.args.settingsWriteStyle, "medium") class TestCompareCases(unittest.TestCase): def test_compareCasesBasics(self): with TemporaryDirectoryChanger(): cc = CompareCases() cc.addOptions() cc.parse_args(["/path/to/fake1.h5", "/path/to/fake2.h5"]) self.assertEqual(cc.name, "compare") self.assertIsNone(cc.args.timestepCompare) self.assertIsNone(cc.args.weights) with self.assertRaises(ValueError): # The "fake" files do exist, so this should fail. cc.invoke() class TestCompareSuites(unittest.TestCase): def test_compareSuitesBasics(self): with TemporaryDirectoryChanger(): cs = CompareSuites() cs.addOptions() cs.parse_args(["/path/to/fake1.h5", "/path/to/fake2.h5", "-I"]) self.assertEqual(cs.name, "compare-suites") self.assertEqual(cs.args.reference, "/path/to/fake1.h5") self.assertTrue(cs.args.skip_inspection) self.assertIsNone(cs.args.weights) class TestExpandBlueprints(unittest.TestCase): def test_expandBlueprintsBasics(self): ebp = ExpandBlueprints() ebp.addOptions() ebp.parse_args(["/path/to/fake.yaml"]) self.assertEqual(ebp.name, "expand-bp") self.assertEqual(ebp.args.blueprints, "/path/to/fake.yaml") # Since the file is fake, invoke() should exit early. with mockRunLogs.BufferLog() as mock: runLog.LOG.startLog("test_expandBlueprintsBasics") runLog.LOG.setVerbosity(logging.INFO) self.assertEqual("", mock.getStdout()) ebp.invoke() self.assertIn("does not exist", mock.getStdout()) class TestExtractInputs(unittest.TestCase): def test_extractInputsBasics(self): with TemporaryDirectoryChanger() as newDir: # build test DB o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") dbi = DatabaseInterface(r, o.cs) dbPath = os.path.join(newDir.destination, f"{self._testMethodName}.h5") dbi.initDB(fName=dbPath) db = dbi.database db.writeToDB(r) # init the CLI ei = ExtractInputs() ei.addOptions() ei.parse_args([dbPath]) # test the CLI initialization self.assertEqual(ei.name, "extract-inputs") self.assertEqual(ei.args.output_base, dbPath[:-3]) # run the CLI on a test DB, verify it worked via logging with mockRunLogs.BufferLog() as mock: runLog.LOG.startLog("test_extractInputsBasics") runLog.LOG.setVerbosity(logging.INFO) self.assertEqual("", mock.getStdout()) ei.invoke() self.assertIn("Writing settings to", mock.getStdout()) self.assertIn("Writing blueprints to", mock.getStdout()) db.close() class TestInjectInputs(unittest.TestCase): def test_injectInputsBasics(self): ii = InjectInputs() ii.addOptions() ii.parse_args(["/path/to/fake.h5"]) self.assertEqual(ii.name, "inject-inputs") self.assertIsNone(ii.args.blueprints) def test_injectInputsInvokeIgnore(self): ii = InjectInputs() ii.addOptions() ii.parse_args(["/path/to/fake.h5"]) with mockRunLogs.BufferLog() as mock: runLog.LOG.startLog("test_injectInputsInvokeIgnore") runLog.LOG.setVerbosity(logging.INFO) self.assertEqual("", mock.getStdout()) ii.invoke() self.assertIn("No settings", mock.getStdout()) def test_injectInputsInvokeNoData(self): with TemporaryDirectoryChanger(): # init CLI ii = InjectInputs() ii.addOptions() bp = os.path.join(TEST_ROOT, "refSmallReactor.yaml") ii.parse_args(["/path/to/fake.h5", "--blueprints", bp]) # invoke and check log with self.assertRaises(FileNotFoundError): # The "fake.h5" doesn't exist, so this should fail. ii.invoke() class TestMigrateInputs(unittest.TestCase): def test_migrateInputsBasics(self): mi = MigrateInputs() mi.addOptions() mi.parse_args(["--settings-path", "cs_path"]) self.assertEqual(mi.name, "migrate-inputs") self.assertEqual(mi.args.settings_path, "cs_path") class TestModifyCaseSettingsCommand(unittest.TestCase): def test_modifyCaseSettingsCommandBasics(self): mcs = ModifyCaseSettingsCommand() mcs.addOptions() mcs.parse_args(["--rootDir", "/path/to/", "--settingsWriteStyle", "medium", "fake.yaml"]) self.assertEqual(mcs.name, "modify") self.assertEqual(mcs.args.rootDir, "/path/to/") self.assertEqual(mcs.args.settingsWriteStyle, "medium") self.assertEqual(mcs.args.patterns, ["fake.yaml"]) def test_modifyCaseSettingsCommandInvoke(self): mcs = ModifyCaseSettingsCommand() mcs.addOptions() with TemporaryDirectoryChanger(): # copy over settings files for fileName in [ "armiRun.yaml", "refSmallReactor.yaml", "refSmallReactorShuffleLogic.py", ]: copyfile(os.path.join(TEST_ROOT, fileName), fileName) # pass in --nTasks=333 mcs.parse_args(["--nTasks=333", "--rootDir", ".", "armiRun.yaml"]) # invoke the CLI mcs.invoke() # validate the change to nTasks was made txt = open("armiRun.yaml", "r").read() self.assertIn("nTasks: 333", txt) class MockFakeReportsEntryPoint(ReportsEntryPoint): name = "MockFakeReport" def invoke(self): return "mock fake" class TestReportsEntryPoint(unittest.TestCase): def test_cleanArgs(self): rep = MockFakeReportsEntryPoint() result = rep.invoke() self.assertEqual(result, "mock fake") class TestCompareIsotxsLibsEntryPoint(unittest.TestCase): def test_compareIsotxsLibsBasics(self): com = CompareIsotxsLibraries() com.addOptions() com.parse_args(["--fluxFile", "/path/to/fluxfile.txt", "reference", "comparisonFiles"]) self.assertEqual(com.name, "diff-isotxs") self.assertIsNone(com.settingsArgument) with self.assertRaises(FileNotFoundError): # The provided files don't exist, so this should fail. com.invoke() class TestRunEntryPoint(unittest.TestCase): def test_runEntryPointBasics(self): rep = RunEntryPoint() rep.addOptions() rep.parse_args([ARMI_RUN_PATH]) self.assertEqual(rep.name, "run") self.assertEqual(rep.settingsArgument, "required") def test_runCommandHelp(self): """Ensure main entry point with no args completes.""" with self.assertRaises(SystemExit) as excinfo: # have to override the pytest args sys.argv = [""] main() self.assertEqual(excinfo.exception.code, 0) def test_executeCommand(self): """Use executeCommand to call run. But we expect it to fail because we provide a fictional settings YAML. """ with self.assertRaises(SystemExit) as excinfo: # override the pytest args sys.argv = ["run", "path/to/fake.yaml"] main() self.assertEqual(excinfo.exception.code, 1) class TestRunSuiteCommand(unittest.TestCase): def test_runSuiteCommandBasics(self): rs = RunSuiteCommand() rs.addOptions() rs.parse_args(["/path/to/fake.yaml", "-l"]) self.assertEqual(rs.name, "run-suite") self.assertIsNone(rs.settingsArgument) # test the invoke method with mockRunLogs.BufferLog() as mock: runLog.LOG.startLog("test_runSuiteCommandBasics") runLog.LOG.setVerbosity(logging.INFO) self.assertEqual("", mock.getStdout()) rs.invoke() self.assertIn("Finding potential settings files", mock.getStdout()) self.assertIn("Checking for valid settings", mock.getStdout()) self.assertIn("Primary Log Verbosity", mock.getStdout()) class TestVisFileEntryPointCommand(unittest.TestCase): def test_visFileEntryPointBasics(self): with TemporaryDirectoryChanger() as newDir: # build test DB self.o, self.r = loadTestReactor( TEST_ROOT, customSettings={"reloadDBName": "reloadingDB.h5"}, inputFileName="smallestTestReactor/armiRunSmallest.yaml", ) self.dbi = DatabaseInterface(self.r, self.o.cs) dbPath = os.path.join(newDir.destination, f"{self._testMethodName}.h5") self.dbi.initDB(fName=dbPath) self.db = self.dbi.database self.db.writeToDB(self.r) # create Viz entry point vf = VisFileEntryPoint() vf.addOptions() vf.parse_args([dbPath]) self.assertEqual(vf.name, "vis-file") self.assertIsNone(vf.settingsArgument) # test the invoke method with mockRunLogs.BufferLog() as mock: runLog.LOG.startLog("test_visFileEntryPointBasics") runLog.LOG.setVerbosity(logging.INFO) self.assertEqual("", mock.getStdout()) vf.invoke() desired = "Creating visualization file for cycle 0, time node 0..." self.assertIn(desired, mock.getStdout()) # test the parse method (using the same DB to save time) vf = VisFileEntryPoint() vf.parse([dbPath]) self.assertIsNone(vf.args.nodes) self.assertIsNone(vf.args.min_node) self.assertIsNone(vf.args.max_node) self.assertEqual(vf.args.output_name, "test_visFileEntryPointBasics") self.db.close() ================================================ FILE: armi/cli/tests/test_runSuite.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for runsuite cli entry point.""" import io import sys import unittest from unittest.mock import patch from armi import meta from armi.cli import ArmiCLI class TestRunSuiteSuite(unittest.TestCase): def test_listCommand(self): """Ensure run-suite entry point is registered. .. test:: The ARMI CLI can be correctly initialized. :id: T_ARMI_CLI_CS0 :tests: R_ARMI_CLI_CS """ acli = ArmiCLI() origout = sys.stdout try: out = io.StringIO() sys.stdout = out acli.listCommands() finally: sys.stdout = origout self.assertIn("run-suite", out.getvalue()) def test_showVersion(self): """Test the ArmiCLI.showVersion method. .. test:: The ARMI CLI's basic "--version" functionality works. :id: T_ARMI_CLI_CS1 :tests: R_ARMI_CLI_CS """ origout = sys.stdout try: out = io.StringIO() sys.stdout = out ArmiCLI.showVersion() finally: sys.stdout = origout self.assertIn("armi", out.getvalue()) self.assertIn(meta.__version__, out.getvalue()) @patch("armi.cli.ArmiCLI.executeCommand") def test_run(self, mockExeCmd): """Test the ArmiCLI.run method. .. test:: The ARMI CLI's import run() method works. :id: T_ARMI_CLI_CS2 :tests: R_ARMI_CLI_CS """ correct = 0 acli = ArmiCLI() mockExeCmd.return_value = correct ret = acli.run() self.assertEqual(ret, correct) ================================================ FILE: armi/conftest.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Per-directory pytest plugin configuration used only during development/testing. This is a used to manipulate the environment under which pytest runs the unit tests. This can act as a one-stop-shop for manipulating the sys.path, or the ARMI App used to run the tests. Tests must be invoked via pytest for this to have any affect, for example:: $ pytest -n 6 armi """ import os import matplotlib from armi import apps, configure, context from armi.settings import caseSettings from armi.tests import TEST_ROOT def pytest_sessionstart(session): print("Initializing generic ARMI Framework application") configure(apps.App()) bootstrapArmiTestEnv() def bootstrapArmiTestEnv(): """ Perform ARMI config appropriate for running unit tests. .. tip:: This can be imported and run from other ARMI applications for test support. """ from armi.nucDirectory import nuclideBases cs = caseSettings.Settings() context.Mode.setMode(context.Mode.BATCH) # Need to init burnChain. (See Reactor._initBurnChain) with open(cs["burnChainFileName"]) as burnChainStream: nuclideBases.imposeBurnChain(burnChainStream) # turn on a non-interactive mpl backend to minimize errors related to initializing Tcl in parallel tests matplotlib.use("agg") # Set and create a test-specific FAST_PATH for parallel unit testing. Not all unit tests have operators, and # operators are usually responsible for making FAST_PATH, so we make it here. It will be deleted by the atexit hook. context.activateLocalFastPath() if not os.path.exists(context.getFastPath()): os.makedirs(context.getFastPath()) # some tests need to find the TEST_ROOT via an env variable when they're filling in templates with ``$ARMITESTBASE`` # in them or opening input files use the variable in an `!include` tag. Thus we provide it here. os.environ["ARMITESTBASE"] = TEST_ROOT ================================================ FILE: armi/context.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Module containing global constants that reflect the executing context of ARMI. ARMI's global state information: operating system information, environment data, user data, memory parallelism, temporary storage locations, and if operational mode (interactive, gui, or batch). """ import datetime import enum import gc import getpass import os import sys import time from logging import DEBUG # h5py needs to be imported here, so that the disconnectAllHdfDBs() call that gets bound to atexit # below doesn't lead to a segfault on python exit. # # Minimal code to reproduce the issue: # # >>> import atexit # # >>> def willSegFault(): # >>> import h5py # # >>> atexit.register(willSegFault) import h5py # noqa: F401 BLUEPRINTS_IMPORTED = False BLUEPRINTS_IMPORT_CONTEXT = "" # App name is used when spawning new tasks that should invoke a specific ARMI application. Sometimes # these tasks only use ARMI functionality, so running `python -m armi` is fine. Other times, the # task is specific to an application, requiring something like: `python -m myArmiApp` APP_NAME = "armi" class Mode(enum.Enum): """ Mode represents different run types possible in ARMI. The modes can be Batch, Interactive, or GUI. Mode is generally auto-detected based on your terminal. It can also be set in various CLI entry points. Each entry point has a ``--batch`` command line argument that can force Batch mode. """ BATCH = 1 INTERACTIVE = 2 GUI = 4 @classmethod def setMode(cls, mode): """Set the run mode of the current ARMI case.""" global CURRENT_MODE assert isinstance(mode, cls), "Invalid mode {}".format(mode) CURRENT_MODE = mode ROOT = os.path.abspath(os.path.dirname(__file__)) PROJECT_ROOT = os.path.join(ROOT, "..") RES = os.path.join(ROOT, "resources") DOC = os.path.abspath(os.path.join(PROJECT_ROOT, "doc")) USER = getpass.getuser() START_TIME = time.ctime() # Set batch mode if not a TTY, which means you're on a cluster writing to a stdout file. In this # mode you cannot respond to prompts. (This does not work reliably for both Windows and Linux so an # os-specific solution is applied.) IS_WINDOWS = ("win" in sys.platform) and ("darwin" not in sys.platform) isatty = sys.stdout.isatty() if IS_WINDOWS else sys.stdin.isatty() CURRENT_MODE = Mode.INTERACTIVE if isatty else Mode.BATCH Mode.setMode(CURRENT_MODE) MPI_COMM = None # MPI_RANK represents the index of the CPU that is running. # 0 is typically the primary CPU, while 1+ are typically workers. MPI_RANK = 0 # MPI_SIZE is the total number of CPUs. MPI_SIZE = 1 LOCAL = "local" MPI_NODENAME = LOCAL MPI_NODENAMES = [LOCAL] try: # Check for MPI from mpi4py import MPI from mpi4py.util import pkl5 MPI_COMM = pkl5.Intracomm(MPI.COMM_WORLD) MPI_RANK = MPI_COMM.Get_rank() MPI_SIZE = MPI_COMM.Get_size() MPI_NODENAME = MPI.Get_processor_name() MPI_NODENAMES = MPI_COMM.allgather(MPI_NODENAME) except ImportError: # stick with defaults pass except RuntimeError: # likely from MPI not being on system, this is OK for many ARMI invocations # Note this exception was introduced upon upgrading to mpi4py 4.1.1 pass if sys.platform.startswith("win"): # trying a Windows approach APP_DATA = os.path.join(os.environ["APPDATA"], "armi") APP_DATA = APP_DATA.replace("/", "\\") else: # non-Windows: /tmp/ if possible, if not home if os.access("/tmp/", os.W_OK): APP_DATA = "/tmp/.armi" else: APP_DATA = os.path.expanduser("~/.armi") if MPI_NODENAMES.index(MPI_NODENAME) == MPI_RANK: if not os.path.isdir(APP_DATA): try: os.makedirs(APP_DATA) os.chmod(APP_DATA, 0o0777) except OSError: pass if not os.path.isdir(APP_DATA): raise OSError("Directory doesn't exist {0}".format(APP_DATA)) if MPI_COMM is not None: # Make sure app data exists before workers proceed. MPI_COMM.barrier() MPI_DISTRIBUTABLE = MPI_SIZE > 1 _FAST_PATH = os.path.join(os.getcwd()) """ A directory available for high-performance I/O. .. warning:: This is not a constant and can change at runtime. """ _FAST_PATH_IS_TEMPORARY = False """Flag indicating whether or not the FAST_PATH should be cleaned up on exit.""" def activateLocalFastPath() -> None: """ Specify a local temp directory to be the fast path. ``FAST_PATH`` is often a local hard drive on a cluster node. It should be a high-performance scratch space. Different processors on the same node should have different fast paths. Notes ----- This path will be obliterated when the job ends. This path is set at import time, so if a series of unit tests come through that instantiate one operator after the other, the path will already exist the second time. The directory is created in the Operator constructor. """ global _FAST_PATH, _FAST_PATH_IS_TEMPORARY, APP_DATA # Try to fix pathing issues in Windows. if os.name == "nt": APP_DATA = APP_DATA.replace("/", "\\") _FAST_PATH = os.path.join( APP_DATA, "{}{}-{}".format( MPI_RANK, os.environ.get("PYTEST_XDIST_WORKER", ""), # for parallel unit testing, datetime.datetime.now().strftime("%Y%m%d%H%M%S%f"), ), ) _FAST_PATH_IS_TEMPORARY = True def getFastPath() -> str: """ Callable to get the current FAST_PATH. Notes ----- This exists because it's dangerous to use ``FAST_PATH`` directly. as it can change between import and runtime. """ return _FAST_PATH def cleanFastPathAfterSimulation(): """ Clean up temporary files after a run. Some Windows HPC systems send a SIGBREAK signal when the user cancels a job, which is NOT handled by ``atexit``. Notably, SIGBREAK does not exist outside Windows. For the SIGBREAK signal to work with a Windows HPC, the ``TaskCancelGracePeriod`` option must be configured to be non- zero. This sets the period between SIGBREAK and SIGTERM/SIGINT. To do cleanups in this case, we must use the ``signal`` module. Actually, even then it does not work because MS ``mpiexec`` does not pass signals through. """ from armi import runLog from armi.utils.pathTools import cleanPath disconnectAllHdfDBs() printMsg = runLog.getVerbosity() <= DEBUG if _FAST_PATH_IS_TEMPORARY and os.path.exists(_FAST_PATH): if printMsg: print( "Cleaning up temporary files in: {}".format(_FAST_PATH), file=sys.stdout, ) try: cleanPath(_FAST_PATH, mpiRank=MPI_RANK) except Exception as error: for outputStream in (sys.stderr, sys.stdout): if printMsg: print( "Failed to delete temporary files in: {}\n error: {}".format(_FAST_PATH, error), file=outputStream, ) def disconnectAllHdfDBs() -> None: """ Forcibly disconnect all instances of HdfDB objects. Notes ----- This is a hack to help ARMI exit gracefully when the garbage collector and h5py have issues destroying objects. The root cause for why this was having issues was never identified. It appears that when several HDF5 files are open in the same run (e.g. when calling ``armi.init()`` multiple times from a post-processing script), when these h5py File objects were closed, the garbage collector would raise an exception related to the repr'ing the object. We get around this by using the garbage collector to manually disconnect all open HdfDBs. """ from armi.bookkeeping.db import Database h5dbs = [db for db in gc.get_objects() if isinstance(db, Database)] for db in h5dbs: db.close() ================================================ FILE: armi/interfaces.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Interfaces are objects of code that interact with ARMI. They read information off the state, perform calculations (or\ run external codes), and then store the results back in the state. Learn all about interfaces in :doc:`/developer/guide` See Also -------- armi.operators : Schedule calls to various interfaces armi.plugins : Register various interfaces """ import copy from typing import Dict, List, NamedTuple, Tuple, Union import numpy as np from numpy.linalg import norm from armi import getPluginManagerOrFail, runLog, settings, utils from armi.reactor import parameters from armi.utils import textProcessors class STACK_ORDER: # noqa: N801 """ Constants that help determine the order of modules in the interface stack. Each module defines an ``ORDER`` constant that specifies where in this order it should be placed in the Interface Stack. .. impl:: Define an ordered list of interfaces. :id: I_ARMI_OPERATOR_INTERFACES0 :implements: R_ARMI_OPERATOR_INTERFACES At each time node during a simulation, an ordered collection of Interfaces are run (referred to as the interface stack). But ARMI does not force the order upon the analyst. Instead, each Interface registers where in that ordered list it belongs by giving itself an order number (which can be an integer or a decimal). This class defines a set of constants which can be imported and used by Interface developers to define that Interface's position in the stack. The constants defined are given names, based on common stack orderings in the ARMI ecosystem. But in the end, these are just constant values, and the names they are given are merely suggestions. See Also -------- armi.operators.operator.Operator.createInterfaces armi.physics.neutronics.globalFlux.globalFluxInterface.ORDER """ BEFORE = -0.1 AFTER = 0.1 PREPROCESSING = 1.0 FUEL_MANAGEMENT = PREPROCESSING + 1 DEPLETION = FUEL_MANAGEMENT + 1 FUEL_PERFORMANCE = DEPLETION + 1 CROSS_SECTIONS = FUEL_PERFORMANCE + 1 CRITICAL_CONTROL = CROSS_SECTIONS + 1 FLUX = CRITICAL_CONTROL + 1 THERMAL_HYDRAULICS = FLUX + 1 REACTIVITY_COEFFS = THERMAL_HYDRAULICS + 1 TRANSIENT = REACTIVITY_COEFFS + 1 BOOKKEEPING = TRANSIENT + 1 POSTPROCESSING = BOOKKEEPING + 1 class TightCoupler: """ Data structure that defines tight coupling attributes that are implemented within an Interface and called upon when ``interactAllCoupled`` is called. .. impl:: The TightCoupler defines the convergence criteria for physics coupling. :id: I_ARMI_OPERATOR_PHYSICS0 :implements: R_ARMI_OPERATOR_PHYSICS During a simulation, the developers of an ARMI application frequently want to iterate on some physical calculation until that calculation has converged to within some small tolerance. This is typically done to solve the nonlinear dependence of different physical properties of the reactor, like fuel performance. However, what parameter is being tightly coupled is configurable by the developer. This class provides a way to calculate if a single parameter has converged based on some convergence tolerance. The user provides the parameter, tolerance, and a maximum number of iterations to define a basic convergence calculation. If in the ``isConverged`` method the parameter has not converged, the number of iterations is incremented, and this class will wait, presuming another iteration is forthcoming. Parameters ---------- param : str The name of a parameter defined in the ARMI Reactor model. tolerance : float Defines the allowable error between the current and previous parameter values to determine if the selected coupling parameter has converged. maxIters : int Maximum number of tight coupling iterations allowed """ _SUPPORTED_TYPES = [float, int, list, np.ndarray] def __init__(self, param, tolerance, maxIters): self.parameter = param self.tolerance = tolerance self.maxIters = maxIters self._numIters = 0 self._previousIterationValue = None self.eps = np.inf def __repr__(self): return ( f"<{self.__class__.__name__}, Parameter: {self.parameter}, Convergence Criteria: " + f"{self.tolerance}, Maximum Coupled Iterations: {self.maxIters}>" ) def storePreviousIterationValue(self, val: _SUPPORTED_TYPES): """ Stores the previous iteration value of the given parameter. Parameters ---------- val : _SUPPORTED_TYPES the value to store. Is commonly equal to interface.getTightCouplingValue() Raises ------ TypeError Checks the type of the val against ``_SUPPORTED_TYPES`` before storing. If invalid, a TypeError is raised. """ if type(val) not in self._SUPPORTED_TYPES: raise TypeError( f"{val} supplied has type {type(val)} which is not supported in {self}. " f"Supported types: {self._SUPPORTED_TYPES}" ) self._previousIterationValue = val def isConverged(self, val: _SUPPORTED_TYPES) -> bool: """ Return boolean indicating if the convergence criteria between the current and previous iteration values are met. Parameters ---------- val : _SUPPORTED_TYPES The most recent value for computing convergence criteria. Is commonly equal to interface.getTightCouplingValue() Returns ------- boolean True (False) interface is (not) converged Notes ----- - On convergence, this class is automatically reset to its initial condition to avoid retaining or holding a stale state. Calling this method will increment a counter that when exceeded will clear the state. A warning will be reported if the state is cleared prior to the convergence criteria being met. - For computing convergence of arrays, only up to 2D is allowed. 3D arrays would arise from considering component level parameters. However, converging on component level parameters is not supported at this time. Raises ------ ValueError If the previous iteration value has not been assigned. The ``storePreviousIterationValue`` method must be called first. RuntimeError Only support calculating norms for up to 2D arrays. """ if self._previousIterationValue is None: raise ValueError( f"Cannot check convergence of {self} with no previous iteration value set. Set using " "`storePreviousIterationValue` first." ) previous = self._previousIterationValue # calculate convergence of val and previous if isinstance(val, (int, float)): self.eps = abs(val - previous) else: dim = self.getListDimension(val) if dim == 1: # 1D array self.eps = norm(np.subtract(val, previous), ord=2) elif dim == 2: # 2D array epsVec = [] for old, new in zip(previous, val): epsVec.append(norm(np.subtract(old, new), ord=2)) self.eps = norm(epsVec, ord=np.inf) else: raise RuntimeError("Currently only support up to 2D arrays for calculating convergence of arrays.") # Check if convergence is satisfied. If so, or if reached max number of iters, then reset # the number of iterations converged = self.eps < self.tolerance if converged: self._numIters = 0 else: self._numIters += 1 if self._numIters == self.maxIters: runLog.warning( f"Maximum number of iterations for {self.parameter} reached without convergence! Prescribed " f"convergence criteria is {self.tolerance}." ) self._numIters = 0 return converged @staticmethod def getListDimension(listToCheck: list, dim: int = 1) -> int: """Return the dimension of a python list. Parameters ---------- listToCheck: list the supplied python list to have its dimension returned dim: int, optional the dimension of the list Returns ------- dim, int the dimension of the list. Typically 1, 2, or 3 but can be arbitrary order, N. """ for v in listToCheck: if isinstance(v, list): dim += 1 dim = TightCoupler.getListDimension(v, dim) break return dim class Interface: """ The eponymous Interface between the ARMI reactor data model and the Plugins. .. impl:: The interface shall allow code execution at important operational points in time. :id: I_ARMI_INTERFACE :implements: R_ARMI_INTERFACE The Interface class defines a number methods with names like ``interact***``. These methods are called in order at each time node. This allows for an individual Plugin defining multiple interfaces to insert code at the start or end of a particular time node or cycle during reactor simulation. In this fashion, the Plugins and thus the Operator control when their code is run. The end goal of all this work is to allow the Plugins to carefully tune when and how they interact with the reactor data model. Interface instances are gathered into an interface stack in :py:meth:`armi.operators.operator.Operator.createInterfaces`. """ # list containing interfaceClass @classmethod def getDependencies(cls, cs): return [] @classmethod def getInputFiles(cls, cs): """Return a MergeableDict containing files that should be considered "input".""" return utils.MergeableDict() name: Union[str, None] = None """ The name of the interface. This is undefined for the base class, and must be overridden by any concrete class that extends this one. """ purpose = None """ The action performed by an Interface. This is not required be be defined by implementations of Interface, but is used to form categories of interfaces. """ class Distribute: """Enum-like return flag for behavior on interface broadcasting with MPI.""" DUPLICATE = 1 NEW = 2 SKIP = 4 def __init__(self, r, cs): """ Construct an interface. The ``r`` and ``cs`` arguments are required, but may be ``None``, where appropriate for the specific ``Interface`` implementation. Parameters ---------- r : Reactor A reactor to attach to cs : Settings Settings object to use Raises ------ RuntimeError Interfaces derived from Interface must define their name """ if self.name is None: raise RuntimeError( "Interfaces derived from Interface must define their name ({}).".format(type(self).__name__) ) self._enabled = True self.reverseAtEOL = False self._bolForce = False # override disabled flag in interactBOL if true. self.cs = cs self.r = r self.o = r.o if r else None self.coupler = _setTightCouplerByInterfaceFunction(self, cs) def __repr__(self): return "<Interface {0}>".format(self.name) def _checkSettings(self): """Raises an exception if interface settings requirements are not met.""" pass def nameContains(self, name): return name in str(self.name) def distributable(self): """ Return true if this can be MPI broadcast. Notes ----- Cases where this isn't possible include the database interface, where the SQL driver cannot be distributed. """ return self.Distribute.DUPLICATE def preDistributeState(self): """ Prepare for distribute state by returning all non-distributable attributes. Examples -------- >>> return {"neutronsPerFission", self.neutronsPerFission} """ return {} def postDistributeState(self, toRestore): """Restore non-distributable attributes after a distributeState.""" pass def attachReactor(self, o, r): """ Set this interfaces' reactor to the reactor passed in and sets default settings. Parameters ---------- r : Reactor object The reactor to attach quiet : bool, optional If true, don't print out the message while attaching Notes ----- This runs on all worker nodes as well as the primary. """ self.r = r self.cs = o.cs self.o = o def detachReactor(self): """Delete the callbacks to reactor or operator. Useful when pickling, MPI sending, etc. to save memory.""" self.o = None self.r = None self.cs = None def duplicate(self): """ Duplicate this interface without duplicating some of the large attributes (like the entire reactor). Makes a copy of interface with detached reactor/operator/settings so that it can be attached to an operator at a later point in time. Returns ------- Interface The deepcopy of this interface with detached reactor/operator/settings """ # temporarily remove references to the interface. They will be reattached later. o = self.o self.o = None r = self.r self.r = None cs = self.cs self.cs = None # a new sterile copy of the interface. # With no record of operators, reactors, or cs, it can be added easily to a new operator newI = copy.deepcopy(self) # reattach current interface information self.o = o self.r = r self.cs = cs return newI def getHistoryParams(self): """ Add these params to the history tracker for designated assemblies. The assembly will get a print out of these params vs. time at EOL. """ return [] def getInterface(self, *args, **kwargs): return self.o.getInterface(*args, **kwargs) if self.o else None def interactInit(self): """ Interacts immediately after the interfaces are created. Notes ----- BOL interactions on other interfaces will not have occurred here. """ self._checkSettings() def interactBOL(self): """Called at the Beginning-of-Life of a run, before any cycles start.""" if self._enabled: self._initializeParams() def _initializeParams(self): """ Assign the parameters for active interfaces so that they will be in the database. Notes ----- Parameters with defaults are not written to the database until they have been assigned SINCE_ANYTHING. This is done to reduce database size, so that we don't write parameters to the DB that are related to interfaces that are not not active. """ for paramDef in parameters.ALL_DEFINITIONS.inCategory(self.name): if paramDef.default not in (None, parameters.NoDefault): paramDef.assigned = parameters.SINCE_ANYTHING def interactEOL(self): """Called at End-of-Life, after all cycles are complete.""" pass def interactBOC(self, cycle=None): """Called at the beginning of each cycle.""" pass def interactEOC(self, cycle=None): """Called at the end of each cycle.""" pass def interactEveryNode(self, cycle, node): """Called at each time node/subcycle of every cycle.""" pass def interactCoupled(self, iteration): """Called repeatedly at each time node/subcycle when tight physics coupling is active.""" pass def getTightCouplingValue(self): """Abstract method to retrieve the value in which tight coupling will converge on.""" pass def interactError(self): """Called if an error occurs.""" pass def interactDistributeState(self): """Called after this interface is copied to a different (non-primary) MPI node.""" pass def interactRestart(self, startNode: Tuple[int, int], previousNode: Tuple[int, int]): """Perform any actions prior to simulating a restart. Interfaces may want to restore some state that would have existed at the start of ``startNode`` prior to calling :meth:`interactBOL` for the desired start point. The database interface will be used prior to any interfaces calling this method, so you can assume the reactor state has been correctly loaded from the database from the ``previousNode``. This helps ensure that interfaces restart at e.g., ``(cycle, node)=(4, 3)`` would see the same data compared to the nominal simulation without a restart. Parameters ---------- startNode Pair of ``(cycle, node)`` for the requested restart point. previousNode Pair of ``(cycle, node)`` for the time node immediately preceeding ``startNode``. """ def isRequestedDetailPoint(self, cycle=None, node=None): """ Determine if this interface should interact at this reactor state (cycle/node). Notes ----- By default, detail points are either during the requested snapshots, if any exist, or all cycles and nodes if none exist. This is useful for peripheral interfaces (CR Worth, perturbation theory, transients) that may or may not be requested during a standard run. If both cycle and node are None, this returns True Parameters ---------- cycle : int The cycle number (or None to only consider node) node : int The timenode (BOC, MOC, EOC, etc.). Returns ------- bool Whether or not this is a detail point. """ from armi.bookkeeping import snapshotInterface # avoid cyclic import if cycle is None and node is None: return True if not self.cs["dumpSnapshot"]: return True for cnStamp in self.cs["dumpSnapshot"]: ci, ni = snapshotInterface.extractCycleNodeFromStamp(cnStamp) if cycle is None and ni == node: # case where only node counts (like in equilibrium cases) return True if ci == cycle and ni == node: return True return False def workerOperate(self, _cmd): """ Receive an MPI command and do MPI work on worker nodes. Returns ------- bool True if this interface handled the incoming command. False otherwise. """ return False def enabled(self, flag=None): """ Mechanism to allow interfaces to be attached but not running at the interaction points. Must be implemented on the individual interface level hooks. If given no arguments, returns status of enabled. If arguments, sets enabled to that flag. (True or False) Notes ----- These ``return`` statements are inconsistent, but not wrong. """ if flag is None: return self._enabled elif isinstance(flag, bool): self._enabled = flag else: raise ValueError("Non-bool passed to assign {}.enable().".format(self)) def bolForce(self, flag=None): """ Run interactBOL even if this interface is disabled. Parameters ---------- flag : boolean, optional Will set the bolForce flag to this boolean Returns ------- bool true if should run at BOL. No return if you pass an input. Notes ----- These ``return`` statements are inconsistent, but not wrong. """ if flag is None: return self._bolForce self._bolForce = flag def writeInput(self, inName): """Write input file(s).""" raise NotImplementedError() def readOutput(self, outName): """Read output file(s).""" raise NotImplementedError() @staticmethod def specifyInputs(cs) -> Dict[Union[str, settings.Setting], List[str]]: """ Return a collection of file names that are considered input files. This is a static method (i.e. is not called on a particular instance of the class), since it should not require an Interface to actually be constructed. This would require constructing a reactor object, which is expensive. The files returned by an implementation should be those that one would want copied to a target location when cloning a Case or CaseSuite. These can be absolute paths, relative paths, or glob patterns that will be interpolated relative to the input directory. Absolute paths will not be copied anywhere. The returned dictionary will enable the source Settings object to be updated to the new file location. While the dictionary keys are recommended to be Setting objects, the name of the setting as a string, e.g., "shuffleLogic", is still interpreted. If the string name does not point to a valid setting then this will lead to a failure. Note ---- This existed before the advent of ARMI plugins. Perhaps it can be better served as a plugin hook. Potential future work. See Also -------- armi.cases.Case.clone() : Main user of this interface. Parameters ---------- cs : Settings The case settings for a particular Case """ return {} def updatePhysicsCouplingControl(self): """Adjusts physics coupling settings depending on current state of run.""" pass class InputWriter: """Use to write input files of external codes.""" def __init__(self, r=None, externalCodeInterface=None, cs=None): self.externalCodeInterface = externalCodeInterface self.eci = externalCodeInterface self.r = r self.cs = cs def getInterface(self, name): """Get another interface by name.""" if self.externalCodeInterface: return self.externalCodeInterface.getInterface(name) return None def write(self, fName): """Write the input file.""" raise NotImplementedError class OutputReader: """ A generic representation of a particular module's output. Attributes ---------- success : bool False by default, set to True if the run is considered to have completed without error. Notes ----- Should ideally not require r, eci, and fname arguments and would rather just have an apply(reactor) method. """ def __init__(self, r=None, externalCodeInterface=None, fName=None, cs=None): self.externalCodeInterface = externalCodeInterface self.eci = self.externalCodeInterface self.r = r self.cs = cs if fName: self.output = textProcessors.TextProcessor(fName) else: self.output = None self.fName = fName self.success = False def getInterface(self, name): """Get another interface by name.""" if self.externalCodeInterface: return self.externalCodeInterface.getInterface(name) return None def read(self, fileName): """Read the output file.""" raise NotImplementedError def apply(self, reactor): """ Apply the output back to a reactor state. This provides a generic interface for the output data of anything to be applied to a reactor state. The application could involve reading text or binary output or simply parameters to appropriate values in some other data structure. """ raise NotImplementedError() def _setTightCouplerByInterfaceFunction(interfaceClass, cs): """ Return an instance of a ``TightCoupler`` class or ``None``. Parameters ---------- interfaceClass : Interface Interface class that a ``TightCoupler`` object will be added to. cs : Settings Case settings that are parsed to determine if tight coupling is enabled globally and if both a target parameter and convergence criteria defined. """ # No tight coupling if there is no purpose for the Interface defined. if interfaceClass.purpose is None: return None if not cs["tightCoupling"] or (interfaceClass.purpose not in cs["tightCouplingSettings"]): return None parameter = cs["tightCouplingSettings"][interfaceClass.purpose]["parameter"] tolerance = cs["tightCouplingSettings"][interfaceClass.purpose]["convergence"] maxIters = cs["tightCouplingMaxNumIters"] return TightCoupler(parameter, tolerance, maxIters) def getActiveInterfaceInfo(cs): """ Return a list containing information for all of the Interface classes that are present. This creates a list of tuples, each containing an Interface subclass and appropriate kwargs for adding them to an Operator stack, given case settings. There should be entries for all Interface classes that are returned from implementations of the describeInterfaces() function in modules present in the passed list of packages. The list is sorted by the ORDER specified by the module in which the specific Interfaces are described. Parameters ---------- cs : Settings The case settings that activate relevant Interfaces """ interfaceInfo = [] for info in getPluginManagerOrFail().hook.exposeInterfaces(cs=cs): interfaceInfo += info interfaceInfo = [(iInfo.interfaceCls, iInfo.kwargs) for iInfo in sorted(interfaceInfo, key=lambda x: x.order)] return interfaceInfo def isInterfaceActive(klass, cs): """Return True if the Interface klass is active.""" return any(issubclass(k, klass) for k, _kwargs in getActiveInterfaceInfo(cs)) class InterfaceInfo(NamedTuple): """ Data structure with interface info. Notes ----- If kwargs is an empty dictionary, defaults from ``armi.operators.operator.Operator.addInterface`` will be applied. See Also -------- armi.operators.operator.Operator.createInterfaces : where these ultimately activate various interfaces. """ order: int interfaceCls: Interface kwargs: dict ================================================ FILE: armi/matProps/__init__.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The package armi.matProps is a material library capable of representing and computing material properties. The matProps package allows users to define materials in a custom YAML format. The format is simple, extensible, and easy to use. Each material has a list of "properties" (like density, specific heat, vapor pressure, etc). Each of those properties be an arbitrary function of multiple independent variables, or a look up table of one or more variables. Each of these properties can define their own set of references, to allow for trustworthy modeling. A major idea in matProps is that we separate out materials as "data", rather than representing them directly in Python as "code". This package does not include any material data files. The unit tests in this package have many example YAML files, and ARMI comes packaged with more real world examples at: ``armi/resources/materials/``. The user may create their own data files to use with ``matProps`` in a directory, and pass in that path via ``armi.matProps.loadAll(path)``. Loading Data ============ In your Python code, you can load a full set of matProps materials into memory with just one or two lines of code. You just have to provide a path to a directory filled with correctly-formatted YAML files: .. code-block:: python import armi.matProps pathToMaterialYAMLs = "path/to/materialDir/" armi.matProps.loadSafe(pathToMaterialYAMLs) If you do not specify a directory for the YAML files, there is a default location in your virtual environment you can store the data files (in a package named ``material_data``): .. code-block:: python import armi.matProps armi.matProps.loadSafe() Adding a Property ================= matProps comes with a large set of common material properties. But it is quite easy to add another material property to your simulation, if you need to. .. code-block:: python from armi.matProps.prop import defProp defProp("fuzz", "fuzziness", "1/m^2") defProp("goo", "gooiness", "m^2/s") defProp("squish", "squishiness", "1/Pa") armi.matProps.loadSafe("path/to/hilarious/materials/") A Note on Design ================ At the high-level, the ``matProps`` API exposes the functions in this file (``loadAll``, ``loadSafe``, ``getMaterials``, etc). And these functions all work off three global data collections: ``armi.matProps.loadedRootDirs``, ``armi.matProps.materials``, and ``armi.matProps.prop.properties``. It is worth noting that this design centers around global data. This could have a more object-oriented approach where the functions below and these three data sets are all stored in a class, e.g. via a ``MaterialLibrary`` class. This would be more Pythonic, and allow for multiple collections of materials, say for testing. So far, no one has ever needed multiple colletions of materials from matProps, because a single scientific model generally only needs one source of truth for what materials are. """ import os import sysconfig import warnings from glob import glob from armi.matProps.material import Material loadedRootDirs = [] materials = {} def getPaths(rootDir: str) -> list: """Get the paths of all the YAML files in a given directory.""" if not os.path.exists(rootDir): raise FileNotFoundError(f"Directory {rootDir} not found") elif not os.path.isdir(rootDir): raise NotADirectoryError(f"Input path {rootDir} is not a directory") patterns = ["*.yaml", "*.yml"] matFiles = [] for pattern in patterns: matFiles.extend(glob(os.path.join(rootDir, "**", pattern), recursive=True)) return matFiles def addMaterial(yamlPath: str, mat): """ Adds Material object instance to matProps.materials dict. Parameters ---------- yamlPath: str Yaml file path whose information is being parsed. mat: Material Material object whose data will be saved. """ global materials if mat.name in materials: msg = f"A material with the name `{mat.name}` as defined in ({yamlPath}) already exists." raise KeyError(msg) materials[mat.name] = mat mat.save() def loadAll(rootDir: str = None) -> None: """ Loads all material files from a particular directory. If a materials directory is not provided, this function will attempt to find materials in the default location in the virtual environment. Parameters ---------- rootDir: str Directory whose YAML files will be loaded into matProps. The default is the materials_data location in the venv. Notes ----- Hidden in here is a default directory which you can load your YAML files from. Inside your Python virtual environment, you can create a data directory named "materials_data", and store all your matProps formatted YAML files. This is optional, of course, you can just explicitly pass a directory path into this method. """ global loadedRootDirs if rootDir is None: rootDir = os.path.join(sysconfig.getPaths()["purelib"], "materials_data") if not os.path.exists(rootDir): raise OSError(f"No material directory provided, and default not found: {rootDir}") paths = getPaths(rootDir) for yamlPath in paths: mat = Material() try: mat.loadFile(yamlPath) except Exception as exc: msg = f"Failed to load `{yamlPath}`." raise RuntimeError(msg) from exc addMaterial(yamlPath, mat) loadedRootDirs.append(rootDir) def clear() -> None: """Clears all loaded materials in matProps.""" global materials global loadedRootDirs loadedRootDirs.clear() materials.clear() def loadSafe(rootDir: str = None) -> None: """ Safely load a single directory of matProps materials. Loading a materials directory via this function will first clear out any other materials that are loaded into matProps. If a materials directory is not provided, this function will attempt to find materials in the default location in the virtual environment. This is meant to be a helpful tool for testing. Parameters ---------- rootDir: str Directory whose yaml files will be loaded into matProps. The default is the materials_data location in the venv. See Also -------- loadAll : More flexible way to load materials into matProps. """ clear() loadAll(rootDir) def getHashes() -> dict: """Calls Material.hash() for each Material object in materials.""" global materials hashes = {} for material in materials.values(): hashes[material.name] = material.hash() return hashes def getMaterial(name: str) -> Material: """ Returns a material object with the given name from matProps.materials. Parameters ---------- name: str Name of material whose data user wishes to retrieve. Returns ------- Material Material object returned from matProps.materials. """ global materials try: return materials[name] except KeyError: msg = f"No material named `{name}` was loaded within loaded data." raise KeyError(msg) from None def loadMaterial(yamlPath: str, saveMaterial: bool = False) -> Material: """ Loads an individual material file. Parameters ---------- yamlPath: str Path to YAML file that will be parsed into this object instance. saveMaterial: bool If True, Material object instance will be saved into matProps.materials. Returns ------- Material Material object whose data is parsed from material file provided by yamlPath. """ mat = Material() mat.loadFile(yamlPath) if saveMaterial: addMaterial(yamlPath, mat) else: msg = f"Loading material {mat} {mat.hash()}" try: # If possible, keep matProps free of ARMI imports from armi import runLog runLog.info(msg) except ImportError: print(msg) return mat def loadedMaterials() -> list: """ Returns all the Material objects that have been loaded into matProps.materials. Returns ------- list of Material Loaded Material objects """ global materials mats = [] for mat in materials.values(): mats.append(mat) return mats def getLoadedRootDirs() -> list: """ Returns a list of all of the loaded root directories. Returns ------- list of str Loaded root directories """ global loadedRootDirs return loadedRootDirs def load_all(rootDir: str = None) -> None: """Pass-through to temporarily support an old API.""" warnings.warn("Please use matProps.loadAll, not matProps.load_all.", DeprecationWarning) loadAll(rootDir) def load_safe(rootDir: str = None) -> None: """Pass-through to temporarily support an old API.""" warnings.warn("Please use matProps.loadSafe, not matProps.load_safe.", DeprecationWarning) loadSafe(rootDir) def get_material(name: str) -> Material: """Pass-through to temporarily support an old API.""" warnings.warn("Please use matProps.getMaterial, not matProps.get_material.", DeprecationWarning) return getMaterial(name) def load_material(yamlPath: str, saveMaterial: bool = False) -> Material: """Pass-through to temporarily support an old API.""" warnings.warn("Please use matProps.loadMaterial, not matProps.load_material.", DeprecationWarning) return loadMaterial(yamlPath, saveMaterial) def loaded_materials() -> list: """Pass-through to temporarily support an old API.""" warnings.warn("Please use matProps.loadedMaterials, not matProps.loaded_materials.", DeprecationWarning) return loadedMaterials() def get_loaded_root_dirs() -> list: """Pass-through to temporarily support an old API.""" warnings.warn("Please use matProps.getLoadedRootDirs, not matProps.get_loaded_root_dirs.", DeprecationWarning) return getLoadedRootDirs() ================================================ FILE: armi/matProps/constituent.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Basic material composition.""" class Constituent: """Makeup of the Material.composition.""" def __init__(self, name: str, minValue: float, maxValue: float, isBalance: bool): """ Constructor for Constituent object. Parameters ---------- name: str Name of constituent element minValue: float Minimum value of constituent maxValue: float Maximum value of constituent isBalance: bool Boolean used to denote if constituent is balance element (True) or not (False). """ self.name = name """Name of the constituent""" self.minValue = minValue """Min value of the constituent""" self.maxValue = maxValue """Max value of the constituent""" self.isBalance = isBalance """Flag for indicating if the consitituent is intended to the balance of the composition""" if self.minValue < 0.0: msg = f"Constituent {self.name} has a negative minimum composition value." raise ValueError(msg) elif self.maxValue < self.minValue: msg = f"Constituent {self.name} has an invalid maximum composition value. (max < min)" raise ValueError(msg) elif self.maxValue > 100.0: msg = f"Constituent {self.name} has an invalid maximum composition value. (max > 100.0)" raise ValueError(msg) def __repr__(self): """Provides string representation of Constituent object.""" msg = f"<Constituent {self.name} min: {self.minValue} max: {self.maxValue}" if self.isBalance: msg += " computed based on balance" msg += ">" return msg @staticmethod def parseComposition(node): """ Method which parses "composition" node from yaml file and returns container of Contituent objects. Returns list of Constituent objects. Each element is constructed from a map element in the "composition node". Parameters ---------- node: dict YAML object representing composition node. Returns ------- list : Constituent List of Constituent objects representing elements of Material. """ composition = [] elementSet = set() balanceName = "" balanceMin = 100.0 balanceMax = 100.0 sumMin = 0.0 sumMax = 0.0 numBalance = 0 for element, nodeContent in node.items(): if element == "references": continue elementSet.add(element) if nodeContent == "balance": balanceName = element numBalance += 1 elif type(nodeContent) is str or len(nodeContent) != 2: msg = ( f"Composition values must be either a tuple of min/max values, or `balance`, but got: {nodeContent}" ) raise TypeError(msg) else: constituentMin = nodeContent[0] constituentMax = nodeContent[1] sumMin += constituentMin sumMax += constituentMax part = Constituent(element, constituentMin, constituentMax, False) composition.append(part) if numBalance != 1: msg = ( f"Composition node must have exactly one balance element. Composition node has {numBalance} balance " "elements instead." ) raise ValueError(msg) if balanceName: if sumMin > 100.0: raise ValueError("Composition has a minimum composition summation greater than 100.0") if sumMax >= 100.0: balanceMin = 0.0 else: balanceMin -= sumMax balanceMax -= sumMin balance = Constituent(balanceName, balanceMin, balanceMax, True) composition.append(balance) return composition ================================================ FILE: armi/matProps/function.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generic class for a function to be defined in a YAML.""" class Function: """ An base class for computing material Properties. The word "function" here is used in the mathematical sense, to describe a generic mathematical curve. The various Function types are read in from YAML, and interpreted at run time. The sub-classes of Function have specific requirements on the YAML format. """ # This is the list of all nodes that are associated with functions in the YAML input file. Any node named something # not in this list is assumed to be an independent variable for the function. This list needs to remain updated if # any child class adds a new YAML node. FUNCTION_NODES = { "type", # All equations have this to define the child class type "tabulated data", # Optional for all equations, required for table functions "equation", # Used by SymbolicFunction for the equation definition "functions", # Used by PiecewiseFunction to define the child functions "reference temperature", # Optional for all equations } def __init__(self, mat, prop): """ Constructor for base Function class. Parameters ---------- mat: Material Material object with which this Function is associated prop: Property Property that is represented by this Function """ self.material = mat """A pointer back to the parent Material for this Function.""" self.property = prop """The Property this Function represents.""" self.independentVars: dict = {} # Keys are the independent variables, values are a tuple of the min/max bounds self.tableData = None """A TableFunction containing verification data for this specific function. Note that for actual TableFunction instances, the tableData property is NULL.""" self._referenceTemperature: float = -274.0 """Reference temperature. Initialized be less than absolute zero in degrees Celsius""" self._references = [] """Reference data""" def clear(self): self.tableData = None @staticmethod def isTable(): """Returns True if a subclass of TableFunction, otherwise False.""" return False def getReferenceTemperature(self): """ Returns the reference temperature, in Celcius, if it is defined. Returns ------- float Reference temperature, in Celcius """ # If this statement below is true, either the reference temperature was not provided in the material YAML file # or was a non-physical value. if self._referenceTemperature < -273.15: raise ValueError("Reference temperature is undefined or set to less than absolute zero.") return self._referenceTemperature def getIndependentVariables(self): """ Returns the independent variables that are required for this function. Returns ------- list list of independent variable strings """ return list(self.independentVars.keys()) def getMinBound(self, var) -> float: """ Returns the minimum bound for the requested variable. Returns ------- float Minimum valid value """ return self.independentVars[var][0] def getMaxBound(self, var) -> float: """ Returns the minimum bound for the requested variable. Returns ------- float Maximum valid value """ return self.independentVars[var][1] @property def references(self) -> list: return self._references def calc(self, point: dict = None, **kwargs): """ Calculate the quantity of a specific Property. The user must provide a "point" dictionary, or kwargs, but not both or neither. Parameters ---------- point: dict dictionary of independent variable/value pairs kwargs: dictionary of independent variable/value pairs, same purpose but to allow a nicer API. Returns ------- float property evaluation """ # This method should take in one dictionary or a set of kwargs, but not both if point is not None and kwargs: raise ValueError("Please provide either a single dictionary or a set of kwargs, but not both.") elif point is None and not kwargs: raise ValueError("Please provide at least one input to this method.") # select the inputs provided if point: data = point else: data = kwargs # input sanity checking if not self.independentVars.keys() <= data.keys(): raise KeyError( f"Specified point {data} does contain the correct independent variables: {self.independentVars}" ) elif not self.inRange(data): raise ValueError(f"Requested calculation point, {data} is not in the valid range of the function") return self._calcSpecific(data) def inRange(self, point: dict) -> bool: """ Determine if a point is within range of the function. Parameters ---------- point: dict dictionary of independent variable/value pairs Returns ------- bool True if the point is in the valid range, False otherwise. """ for var, bounds in self.independentVars.items(): if point[var] < bounds[0] or point[var] > bounds[1]: return False return True def __repr__(self): """Provides string representation of Function object.""" return f"<{self.__class__.__name__}>" @staticmethod def _factory(mat, node, prop): """ Parsing a property node and using that information to construct a Function object. This method is responsible for searching for the assigning the Function object to the appropriate child class instance. Parameters ---------- mat: Material Material object which is associated with the returned Function object node: dict YAML object representing root level node of material yaml file being parsed prop: Property Property object that is being populated on the Material Returns ------- Function Function pointer parsed from the specified property. """ from armi.matProps.piecewiseFunction import PiecewiseFunction from armi.matProps.symbolicFunction import SymbolicFunction from armi.matProps.tableFunction1D import TableFunction1D from armi.matProps.tableFunction2D import TableFunction2D funTypes = { "symbolic": SymbolicFunction, "table": TableFunction1D, "two dimensional table": TableFunction2D, "piecewise": PiecewiseFunction, } funcNode = node["function"] funcType = str(funcNode["type"]) func = funTypes[funcType](mat, prop) func._parse(node) return func def _setBounds(self, node: dict, var: str): """ Validate and set the min and max bounds for a variable. Parameters ---------- node: dict dictionary that contains min and max values. var: str name of the variable """ if "min" not in node or "max" not in node: raise KeyError( f"The independent variable node, {var}, is not formatted correctly: {node}. If this node is not " "intended to be an independent variable, please ensure that the Function.FUNCTION_NODES set is updated " "properly." ) minVal = float(node["min"]) maxVal = float(node["max"]) if maxVal < minVal: raise ValueError(f"Maximum bound {maxVal} cannot be less than the minimum bound {minVal}") self.independentVars[var] = (minVal, maxVal) def _parse(self, node): """ Method used to parse property node and fill in appropriate Function data members. Parameters ---------- node YAML containing object to be parsed """ from armi.matProps.reference import Reference from armi.matProps.tableFunction1D import TableFunction1D from armi.matProps.tableFunction2D import TableFunction2D funcNode = node["function"] refTempNode = funcNode.get("reference temperature", None) if refTempNode is not None: self._referenceTemperature = float(refTempNode) funcType = str(funcNode["type"]) references = node.get("references", []) for ref in references: self._references.append(Reference._factory(ref)) tabulatedNode = node.get("tabulated data", None) if tabulatedNode: if funcType == "two dimensional table": self.tableData = TableFunction2D(self.material, self.property) else: self.tableData = TableFunction1D(self.material, self.property) if self.isTable(): self._parseSpecific(node) self.tableData = self else: self.tableData._parseSpecific(node) elif self.isTable(): raise KeyError("Missing node `tabulated data`") for var in funcNode: if var not in self.FUNCTION_NODES: self._setBounds(funcNode[var], var) if not self.isTable(): self._parseSpecific(node) def _parseSpecific(self, node): """ Abstract method that is used to parse information specific to Function child classes. Parameters ---------- node YAML containing object information to parse and fill in Function """ raise NotImplementedError() def _calcSpecific(self, point: dict) -> float: """ Private method that contains the analytic expression used to return a property value. Parameters ---------- point : dict dictionary of independent variable/value pairs Returns ------- float property evaluation at specified independent variable point """ raise NotImplementedError() ================================================ FILE: armi/matProps/interpolationFunctions.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Some basic interpolation routines.""" import math def findIndex(val: float, x: list) -> int: """ Find the location of the provided value in the provided collection. Parameters ---------- val: float Value whose index is needed in x x: list List of numbers Returns ------- int Integer containing index wherein x[i] <= Tc <= x[i+1] """ if val < x[0]: raise ValueError(f"Value {val} out of bounds: {x}") for ii in range(len(x) - 1): Tc1 = x[ii] Tc2 = x[ii + 1] if val >= Tc1 and val <= Tc2: return ii raise ValueError(f"Value {val} out of bounds: {x}") def linearLinear(Tc: float, x: list, y: list) -> float: """ Find the approximate value on a XY table assuming a linear-linear curve. Parameters ---------- Tc: float Independent variable at which an interpolation value is desired. x: list List of independent variable values y: list List of dependent variable values Returns ------- float Float containing final interpolation value based on a linear-linear interpolation. """ ii: int = findIndex(Tc, x) Tc1: float = x[ii] Tc2: float = x[ii + 1] return (Tc - Tc1) / (Tc2 - Tc1) * (y[ii + 1] - y[ii]) + y[ii] def logLinear(Tc: float, x: list, y: list) -> float: """ Find the approximate value on a XY table assuming a log-linear curve. Parameters ---------- Tc: float Independent variable at which an interpolation value is desired. x: list List of independent variable values y: list List of dependent variable values Returns ------- float Float containing final interpolation value based on a log-linear interpolation. """ ii: int = findIndex(Tc, x) Tc1: float = math.log10(x[ii]) Tc2: float = math.log10(x[ii + 1]) return (math.log10(Tc) - Tc1) / (Tc2 - Tc1) * (y[ii + 1] - y[ii]) + y[ii] ================================================ FILE: armi/matProps/material.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """How matProps defines a material class.""" import hashlib from pathlib import Path from ruamel.yaml import YAML from armi.matProps import prop from armi.matProps.constituent import Constituent from armi.matProps.function import Function from armi.matProps.materialType import MaterialType class Material: """ The Material class is a generic container for all Material types, whether they contain ASME properties, fluid properties, or steel properties. It may be necessary to have multiple Material definitions for a single material containing different phases. """ validFileFormatVersions = [3.0, "TESTS"] def __init__(self): """Constructor for Material class.""" self._saved = False """Boolean denoting whether or not Material object is saved in materials dict.""" self.materialType = None """Enum represting type for the Material object""" self.composition = [] """List of Constituent objects representing composition of Material.""" self.name = None """Name of Material object.""" self._sha1 = None """SHA1 value of parsed material file.""" def __repr__(self): """Provides string representation for Material class.""" return f"<Material {self.name} {str(self.materialType)}>" def hash(self) -> str: """Returns the SHA1 hash value of a Material instance.""" return self._sha1 def saved(self) -> bool: """ Returns a bool value indicating whether the Material has been stored internally in the matProps.materials map via matProps.addMaterial(). """ return self._saved def save(self): """Sets Material._saved flag to True.""" self._saved = True @staticmethod def dataCheckMaterialFile(filePath, rootNode): """ This is a partial data check of the material data file. Checks the first level of data keywords and also check that the file format is a valid version. Parameters ---------- filePath: str Path containing name of YAML file whose file format and property nodes are checked. rootNode: dict Root YAML node of file parsed from filePath. """ file_format = Material.getNode(rootNode, "file format") if file_format not in Material.validFileFormatVersions: msg = f"Invalid file format version `{file_format}` used in: {filePath}" raise ValueError(msg) for propName in rootNode: if propName in {"composition", "material type", "file format"}: continue if not prop.contains(propName): msg = f"Invalid property node `{propName}` found in: {filePath}" raise KeyError(msg) @staticmethod def getValidFileFormatVersions(): """Get a vector of strings with all of the valid file format versions.""" return Material.validFileFormatVersions @staticmethod def getNode(node: dict, subnodeName: str): """ Searches a node for a child element and returns it. Parameters ---------- node: dict Parent level node from which a child element is searched. subnodeName: str Name of the child element that is queried from node. """ if subnodeName not in node: msg = f"Missing YAML node `{subnodeName}`" raise KeyError(msg) return node[subnodeName] def loadNode(self, node: dict): """ Loads YAML and parses information to fill in Material data members including all relevant Function objects. Parameters ---------- node: dict Material definition, like a dict that is loaded from a YAML file. """ self.materialType = MaterialType.fromString(self.getNode(node, "material type")) self.composition = Constituent.parseComposition(self.getNode(node, "composition")) for p in prop.properties: if p.name and p.name in node: setattr(self, p.symbol, Function._factory(self, node[p.name], p)) else: # Any property not in the input file will be set to None. setattr(self, p.symbol, None) def loadFile(self, filePath: str): """ Loads yaml file and parses information to fill in Material data members including all relevant Function objects. Parameters ---------- filePath: str Path containing name of YAML file to parse. """ # load the file path y = YAML(pure=True) node = y.load(Path(filePath)) # grab the material name from the file name n = Path(filePath).name if n.lower().endswith(".yaml"): n = n[:-5] elif n.lower().endswith(".yml"): n = n[:-4] self.name = n # Generate SHA1 value and set data member sha1 = hashlib.sha1() with open(filePath, "rb") as materialFile: sha1.update(materialFile.read()) self._sha1 = sha1.hexdigest() self.dataCheckMaterialFile(filePath, node) self.loadNode(node) ================================================ FILE: armi/matProps/materialType.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Some definition of material types: fluid, fuel, metal, etc.""" class MaterialType: """ A container for the methods used to differentiate between the types of materials. The MaterialType class is used to determine whether the material contain ASME, fluid, fuel, or metal properties. It may also be used for the phase of the material. """ """Dictionary mapping material type strings to enum values.""" types = { "Fuel": 1, "Metal": 2, "Fluid": 4, "Ceramic": 8, "ASME2015": 16, "ASME2017": 32, "ASME2019": 64, } def __init__(self, value: int = 0): """ Constructor for MaterialType class. Parameters ---------- value: int Integer enum value denoting material type. """ self._value: int = value """Enum value representing type of material.""" @staticmethod def fromString(name: str) -> "MaterialType": """ Provides MaterialType object from a user provided string. Parameters ---------- name: str String from which a MaterialType object will be derived. Returns ------- MaterialType """ value: int = MaterialType.types.get(name, 0) if value == 0: msg = f"Invalid material type `{name}`, valid names are: {list(MaterialType.types.keys())}" raise KeyError(msg) return MaterialType(value) def __repr__(self): """Provides string representation of MaterialType instance.""" name = "None" for typ, val in self.types.items(): if val == self._value: name = typ break return f"<MaterialType {name}>" def __eq__(self, other) -> bool: """ Support for "==" comparison operator. Parameters ---------- other: MaterialType or int RHS object that is compared to MaterialType instance. Returns ------- bool True if objects ._value data members are equivalent, False otherwise. """ if type(other) is int: return self._value == other elif type(other) is MaterialType: return self._value == other._value else: raise TypeError(f"Cannot compare MaterialType to type {type(other)}") ================================================ FILE: armi/matProps/piecewiseFunction.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A piecewise-defined function for used in material YAML files. Each piece can be of any other type that matProps supports. """ import math from armi.matProps.function import Function class PiecewiseFunction(Function): """ A piecewise function is composed of many other subfunctions, any of which can be any subclass of the Function type, including ``PiecewiseFunction``. The PiecewiseFunction uses the ``Function.inRange`` method to determine which sub-function should be used for computing the quantity. An example with the YAML format is:: function: <var1>: min: <min1> max: <max1> <var2>: min: <min2> max: <max2> type: piecewise functions: - function: <var1>: min: <local min1> max: <local max1> <var2>: min: <local min2> max: <local max2> type: ... tabulated data: *alias # it is suggested that the same table is used for the entire range - function: <var1>: min: <local min1> max: <local max1> <var2>: min: <local min2> max: <local max2> type: ... tabulated data: *alias # it is suggested that the same table is used for the entire range """ def __init__(self, mat, prop): """ Constructor for PiecewiseFunction object. Parameters ---------- mat: Material Material object with which this PiecewiseFunction is associated prop: Property Property that is represented by this PiecewiseFunction """ super().__init__(mat, prop) self.functions = [] """List of Function objects used to compose PiecewiseFunction object.""" def __repr__(self): """Provides string representation of PiecewiseFunction object.""" msg = "<PiecewiseFunction " for subFunc in self.functions: msg += str(subFunc) msg += ">" return msg def clear(self) -> None: for fun in self.functions: del fun self.functions.clear() def _parseSpecific(self, node): """ Parses nodes that are specific to PiecewiseFunction objects. Parameters ---------- node : dict Dictionary containing the node whose values will be parsed to fill object. """ def checkOverlap(func1, func2): """Checks if the valid range for two functions overlaps on all dimensions.""" for var in self.independentVars: min1, max1 = func1.independentVars[var] min2, max2 = func2.independentVars[var] if math.isclose(max1, min2) or math.isclose(min1, max2): # This handles floating point comparison. Adjoining regions is allowed. return False if max1 < min2 or min1 > max2: # overlap on this dimension, so no overlap overall return False # Overlap on all dimensions return True for subFunctionDef in node["function"]["functions"]: func = self._factory(self.material, subFunctionDef, self.property) self.functions.append(func) # Ensure bounds have same variables in parent and child functions. for subFunc in self.functions: for var in self.independentVars: if var not in subFunc.independentVars: raise KeyError( "Piecewise child function must have same variables for valid range as main function." ) # Check for overlapping regions for i, func1 in enumerate(self.functions): for func2 in self.functions[i + 1 :]: if checkOverlap(func1, func2): raise ValueError(f"Piecewise child functions overlap: {func1}, {func2}") def _calcSpecific(self, point: dict) -> float: """ Private method that contains the analytic expression used to return a property value. Parameters ---------- point: dict dictionary of independent variable/value pairs Returns ------- float property evaluation at specified independent variable point """ for subFunc in self.functions: if subFunc.inRange(point): return subFunc.calc(point) raise ValueError("PiecewiseFunction error, could not evaluate") ================================================ FILE: armi/matProps/point.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A single data point in a YAML file.""" class Point: """A single data point in a YAML file.""" def __init__(self, var1, var2, val): """ Constructor for Point class. Parameters ---------- var1: float Independent variable 1 var2: float If provided, independent variable 2 val: float Dependent variable value for property """ self.variable1 = var1 """Value of first independent variable.""" self.variable2 = var2 """Value of second independent variable.""" self.value = val """Value of Property dependent value""" def __repr__(self): """Provides string representation of Point object.""" return f"<Point {self.variable1}, {self.variable2} -> {self.value}>" ================================================ FILE: armi/matProps/prop.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ In the parlance of matProps, a material 'Property' is a physical characteristic of the material that can be described mathematically. For instance, density, specific heat, specific gravity, coefficient of linear expansion, etc. """ properties = set() PROPERTIES = { "alpha_d": ("thermal diffusivity", "m^2/s", r"(\alpha_d)"), "alpha_inst": ("instantaneous coefficient of thermal expansion", r"(1/^\circ{}C)", r"(\alpha_{inst})"), "alpha_mean": ("mean coefficient of thermal expansion", r"(1/^\circ{}C)", r"(\alpha_{mean})"), "c_p": ("specific heat capacity", r"U(J/(kg\dot{}^\circ{}C))U"), "dH_fus": ("enthalpy of fusion", "J/kg", r"(\Delta H_{f})"), "dH_vap": ("latent heat of vaporization", "J/kg", r"(\Delta H_{v})"), "dl_l": ("linear expansion", "unitless", r"\Delta l_{percent}"), "dV": ("volumetric expansion", r"m^3/(^\circ{}C)", r"\Delta V"), "E": ("Young's modulus", "Pa"), "Elong": ("elongation", "%", r"\epsilon"), "eps_iso": ("strain from isochronous stress-strain curve", "unitless"), "eps_t": ("design fatigue strain range", "unitless"), "f": ("factor f from ASME.III.5 Fig. HBB-T-1432-2", "unitless"), "G": ("electrical conductance", r"U(1/(\Omega\dot m))U"), "gamma": ("surface tension", r"(N\dot m)", r"(\gamma)"), "H": ("enthalpy", "J/kg"), "H_calc_T": ("temperature from enthalpy", r"(^\circ{}C)", r"(^\circ{}C)"), "HBW": ("Brinell Hardness", "BHN"), "k": ("thermal conductivity", r"U(W/(m\dot{}^\circ{}C))U"), "K_IC": ("fracture toughness", r"MPa\dot\sqrt(m)", r"K_{IC}"), "kappa": ("isothermal compressibility", r"(1/Pa)", r"(\kappa)"), "Kv_prime": ("factor Kv' from ASME.III.5 Fig. HBB-T-1432-3", "unitless", r"K_{v}^{'}"), "mu_d": ("dynamic viscosity", r"(Pa\dot{}s)", r"(\mu_d)"), "mu_k": ("kinematic viscosity", "m^2/s", r"(\mu_k)"), "nu": ("Poisson's ratio", "unitless", r"(\nu)"), "nu_g": ("vapor specific volume", "m^3/kg", r"\nu"), "P_sat": ("vapor pressure", r"(Pa)", "P_{sat}"), "rho": ("density", "kg/m^3", r"(\rho)"), "S": ("shear modulus", "Pa"), "Sa": ("allowable stress", "Pa"), "SaFat": ("design fatigue stress", "Pa"), "Sm": ("design stress", "Pa"), "Smt": ("service reference stress", "Pa"), "So": ("design reference stress", "Pa"), "Sr": ("stress to rupture", "Pa"), "St": ("time dependent design stress", "Pa"), "Su": ("tensile strength", "Pa"), "Sy": ("yield strength", "Pa"), "T_boil": ("boiling temperature", r"(^\circ{}C)", r"(T_{boil})"), "T_liq": ("liquidus temperature", r"(^\circ{}C)", r"(T_{liq})"), "T_melt": ("melting temperature", r"(^\circ{}C)", r"(T_{melt})"), "T_sol": ("solidus temperature", r"(^\circ{}C)", r"(T_{sol})"), "tMaxSr": ("allowable time to rupture", "s"), "tMaxSt": ("allowable time to allowable stress", "s"), "TSRF": ("tensile strength reduction factor", "unitless"), "v_sound": ("speed of sound", "m/s", r"(v_{sound})"), "WSRF": ("weld strength reduction factor", "unitless"), "YSRF": ("yield strength reduction factor", "unitless"), } class Property: """A Property of a material. Most properties are computed as temperature-dependent functions.""" def __init__(self, name: str, symbol: str, units: str, tex: str = None): """ Constructor for Property class. Parameters ---------- name: str Name of the property. symbol: str Symbol of the property. units: str String representing the units of the property. tex: str (optional) TeX symbol used to represent the property. Defaults to symbol. """ self.name: str = name """Name of the Property, used to retrieve the property from the data file""" self.symbol: str = symbol """Symbol of the property, same as the module-level attribute and Material attribute""" self.units: str = units """Units of the Property""" self.TeX: str = tex if tex is not None else symbol """math-style TeX symbol""" def __repr__(self): """Provides string representation of Property instance.""" return f"<Property {self.name}, {self.symbol}, in {self.units}>" def contains(name: str): """ Checks to see if a string representing a desired property is in the global properties list. Parameters ---------- name: str Name of the property whose value is searched for in global properties list. Returns ------- bool True if name is in properties, False otherwise. """ global properties return any(name == p.name for p in properties) def defProp(symbol: str, name: str, units: str, tex: str = None): """ Method which constructs and adds Property objects to global properties object. Parameters ---------- name: str Name of the property. symbol: str Symbol of the property. units: str String representing the units of the property. tex: str (optional) TeX symbol used to represent the property. Defaults to symbol. """ global properties if contains(name): raise KeyError(f"Property already defined: {name}") if tex is None: tex = symbol p = Property(name, symbol, units, tex) properties.add(p) def initialize(): """Construct the global list of default properties in matProps.""" for symbol, vals in PROPERTIES.items(): name = vals[0] units = vals[1] tex = vals[2] if len(vals) > 2 else None defProp(symbol, name, units, tex) initialize() ================================================ FILE: armi/matProps/reference.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """All data in the material YAMLs need to have a reference for the information source.""" UNDEFINED_REF_DATA = "NONE" class Reference: """ A container for the source of the material's data. The Reference class is used to manage the material data's source information and have methods to extract the data for generating the reference section of documentation. """ def __init__(self): self._ref = "" """Entire reference in a single string""" self._type = "" """Type of document (open literature|export controlled|test|your company name)""" def __repr__(self): if not self._ref: return UNDEFINED_REF_DATA elif not self._type: return self._ref else: return f"{self._ref} ({self._type})" @staticmethod def _factory(node): """ Sets Reference data from a given reference node. Parameters ---------- node: dict Dictionary representing a child element from the "references" node. Returns ------- Reference Reference object with data parsed from node. """ reference = Reference() refNode = node["ref"] if refNode: reference._ref = str(refNode) typeNode = node["type"] if typeNode: reference._type = str(typeNode) return reference def getRef(self): """Accessor which returns _ref value.""" return self._ref def getType(self): """Accessor which returns _type value.""" return self._type ================================================ FILE: armi/matProps/symbolicFunction.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A generic symbolic function support for curves in a material YAML file.""" # Import math so that it is available for the eval statement import math from copy import copy from sympy import symbols from sympy.parsing import parse_expr from sympy.utilities.lambdify import lambdastr from armi.matProps.function import Function class SymbolicFunction(Function): """ A symbolic function. A functional form defined in the YAML file is parsed. An example with the YAML format is:: function: <var1>: min: <min1> max: <max1> <var2>: min: <min2> max: <max2> ... type: symbolic equation: <functional form> """ def __init__(self, mat, prop): """ Constructor for SymbolicFunction object. Parameters ---------- mat: Material Material object with which this SymbolicFunction is associated prop: Property Property that is represented by this SymbolicFunction """ super().__init__(mat, prop) self.eqn = None self.sympyStr = None def _parseSpecific(self, node): """ Parses nodes that are specific to Symbolic Function object. Parameters ---------- node: dict Dictionary containing the node whose values will be parsed to fill object. """ eqn = str(node["function"]["equation"]) try: symbolList = [] for var in self.independentVars: symbolList.append(symbols(var)) sympyEqn = parse_expr(eqn, evaluate=False) self.sympyStr = lambdastr(symbolList, sympyEqn) self.eqn = eval(self.sympyStr) # Try evaluating the function at the maximum bound. This should result in a number if the equation is # properly formatted. Bad equations will throw an error either in the `lambdastr` `eval` or this `float( )` # line. This is important to catch poor equations now before they cause problems intermittently later (only # when calc is called for that equation). point = [] for var in self.independentVars: point.append(self.getMaxBound(var)) float(self.eqn(*point)) except Exception as e: raise ValueError( f"Equation provided could not be interpreted:" f" {eqn}, {getattr(self, 'sympyStr', 'Symbolic string not created yet.')}" ) from e def _calcSpecific(self, point: dict) -> float: """ Returns an evaluation for a symbolic function. Parameters ---------- point: dict dictionary of independent variable/value pairs """ result = self.eqn(*[point[var] for var in self.independentVars]) if isinstance(result, complex): raise ValueError(f"Function is undefined at {point}. Evaluates to complex number: {result}") if math.isnan(result): raise ValueError(f"Function is undefined at {point}. Evaluates to not a number.") return float(result) def __repr__(self): """Provides string representation of SymbolicFunction object.""" return f"<SymbolicFunction {self.sympyStr}>" def __getstate__(self): d = copy(self.__dict__) d["eqn"] = None return d def __setstate__(self, s): self.__dict__ = s self.eqn = eval(self.sympyStr) ================================================ FILE: armi/matProps/tableFunction.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A simple implementation for a simple table to replace analytic curves in the YAML data files.""" from armi.matProps.function import Function class TableFunction(Function): """An abstract TableFunction; the base class for other table lookup methods.""" @staticmethod def isTable(): return True def _setBounds(self, node: dict): """ Validate and set the min and max bounds for a variable. Parameters ---------- node: dict dictionary that contains min and max values. """ raise NotImplementedError() ================================================ FILE: armi/matProps/tableFunction1D.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A simple implementation for a one dimensional table to replace analytic curves in the YAML data files.""" from armi.matProps.interpolationFunctions import linearLinear from armi.matProps.tableFunction import TableFunction class TableFunction1D(TableFunction): """ A one dimensional table function, containing pairs of data. An example with the YAML format is:: function: <var>: 0 type: table tabulated data: - [0.0, 0.0] - [50, 1e99] - [100, 2e-99] - [150, 100] """ def __init__( self, mat, prop, ): """ Constructor for TableFunction1D object. Parameters ---------- mat: Material Material object with which this TableFunction1D is associated prop: Property Property that is represented by this TableFunction1D """ super().__init__(mat, prop) self._var1s = [] """List of independent variable values for TableFunction1D object.""" self._values = [] """List of property values for TableFunction1D object.""" def __repr__(self): """Provides string representation of TableFunction1D object.""" return "<TableFunction1D>" def _setBounds(self, node: dict, var: str): """ Validate and set the min and max bounds for a variable. Parameters ---------- node: dict dictionary that contains min and max values. var: str name of the variable """ self.independentVars[var] = (float(min(self._var1s)), float(max(self._var1s))) def _parseSpecific(self, prop): """ Parses a temperature dependent table function. Parameters ---------- prop: dict Node containing tabulated data that needs to be parsed. """ tabulated_data = prop["tabulated data"] for val in tabulated_data: self._var1s.append(float(val[0])) self._values.append(float(val[1])) def _calcSpecific(self, point: dict) -> float: """ Performs a linear interpolation on tabular data. Parameters ---------- point: dict dictionary of independent variable/value pairs """ var = list(self.independentVars.keys())[0] if var in point: return linearLinear(point[var], self._var1s, self._values) raise ValueError(f"Specified point does contain the correct independent variables: {self.independentVars}") ================================================ FILE: armi/matProps/tableFunction2D.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A simple implementation for a 2D table to replace analytic curves in the YAML data files.""" import copy from armi.matProps.interpolationFunctions import findIndex, logLinear from armi.matProps.tableFunction import TableFunction class TableFunction2D(TableFunction): """ A 2 dimensional table function. The input format, below, is permitted to have null values in it, which if used during the calculation/interpolation will throw a ValueError. The YAML format demonstrating the two dimensional tabulated data is:: function: <var1>: 0 <var2>: 1 type: two dimensional table tabulated data: - [null, [ 375., 400., 425., 450., 475., 500., 525., 550., 575., 600., 625., 650.]] - [1., [ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]] - [10., [ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]] - [300., [ 1., 1., 1., 1., 1., 1., 1., 1., .97, .91, .87, .84]] - [30000., [ 1., 1., 1., 1., .93, .88, .83, .80, .75, null, null, null]] - [300000.,[ 1., 1., 1., .89, .83, .79, .74, .70, .66, null, null, null]] """ def __init__(self, mat, prop): """ Constructor for TableFunction2D object. Parameters ---------- mat: Material Material object with which this TableFunction2D is associated prop: Property Property that is represented by this TableFunction2D """ super().__init__(mat, prop) self._rowValues = [] """List containing all of the time or cycle values for TableFunction2D object.""" self._columnValues = [] """List containing all of the temperature values for TableFunction2D object.""" self._data = [] """List containing all of the property values in TableFunction2D object.""" def __repr__(self): """Provides string representation of TableFunction2D object.""" return "<TableFunction2D>" def _setBounds(self, node: int, var: str): """ Validate and set the min and max bounds for a variable. Parameters ---------- node: int This number is zero for columns, and one for rows. var: str name of the variable Notes ----- The method declaration here does not match the one in the super class Function. The type of the "node" arguement should be dict, but it is int. This is a surprising and acquard asymmetry. """ if node == 0: cache = None if self.independentVars: # Need to re-arrange order. cache = copy.deepcopy(self.independentVars) self.independentVars = {} self.independentVars[var] = ( float(min(self._columnValues)), float(max(self._columnValues)), ) if cache: self.independentVars[list(cache.keys())[0]] = list(cache.values())[0] elif node == 1: self.independentVars[var] = (float(min(self._rowValues)), float(max(self._rowValues))) else: raise ValueError(f"The node value must be 0 or 1, but was: {node}") def _parseSpecific(self, prop): """ Parses a 2D table function. Parameters ---------- prop: dict Node containing tabulated data that needs to be parsed. """ tabulatedData = prop["tabulated data"] skippedFirst = False for rowNode in tabulatedData: if not skippedFirst: for cValNode in rowNode[1]: self._columnValues.append(float(cValNode)) self._data.append([]) skippedFirst = True continue currentRowVal = float(rowNode[0]) self._rowValues.append(currentRowVal) var1DependentData = rowNode[1] for cIndex in range(len(self._columnValues)): value = var1DependentData[cIndex] self._data[cIndex].append(None if value in ("null", None) else float(value)) def _calcSpecific(self, point: dict) -> float: """ Performs 2D interpolation on tabular data. Parameters ---------- point: dict dictionary of independent variable/value pairs """ columnVar = list(self.independentVars.keys())[0] rowVar = list(self.independentVars.keys())[1] if columnVar in point and rowVar in point: columnVal = point[columnVar] rowVal = point[rowVar] else: raise ValueError(f"Specified point does contain the correct independent variables: {self.independentVars}") cIndex = findIndex(columnVal, self._columnValues) rVal0 = logLinear(rowVal, self._rowValues, self._data[cIndex]) rVal1 = logLinear(rowVal, self._rowValues, self._data[cIndex + 1]) cVal0 = self._columnValues[cIndex] cVal1 = self._columnValues[cIndex + 1] return (columnVal - cVal0) / (cVal1 - cVal0) * (rVal1 - rVal0) + rVal0 ================================================ FILE: armi/matProps/tests/__init__.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generic testing tools for the matProps package.""" import math import unittest from armi.matProps.material import Material class MatPropsFunTestBase(unittest.TestCase): """Base class that provides some common functionality for testing matProps Functions.""" def setUp(self): self.testName = self.id().split(".")[-1] searchStr = "test_" if self.testName.startswith(searchStr): self.testName = self.testName[len(searchStr) :] @staticmethod def polynomialEvaluation(powerMap, value): """ Perform a polynomial evaluation at a specified value. Parameters ---------- powerMap : dict Dictionary mapping power to its corresponding coefficient. value: float Independent variable to evaluate the polynomial at. Returns ------- float The polynomial evaluation """ return sum(coefficient * pow(value, power) for power, coefficient in powerMap.items()) @staticmethod def powerLawEvaluation(coefficients, value): """Perform a power law evaluation at a specified value.""" intercept = coefficients.get("intercept", 0.0) outerMultiplier = coefficients.get("outer multiplier", 1.0) innerAdder = coefficients["inner adder"] exponent = coefficients["exponent"] return intercept + outerMultiplier * (value + innerAdder) ** exponent @staticmethod def hyperbolicEvaluation(coefficients, value): """Perform a hyperbolic function evaluation at a specified value.""" intercept = coefficients["intercept"] outerMultiplier = coefficients["outer multiplier"] innerAdder = coefficients["inner adder"] innerDenominator = coefficients["inner denominator"] return intercept + outerMultiplier * math.tanh((value + innerAdder) / innerDenominator) @staticmethod def createEqnPoly(coefficients): """Creates a symbolic polynomial function from a dictionary of powers.""" eqn = "" for power, value in coefficients.items(): if not eqn: # Make sure we don't have a leading + sign eqn += f"{value}*T**{power}" else: eqn += f" + {value}*T**{power}" return eqn @staticmethod def createEqnPower(coefficients): """Creates a symbolic power law function from a dictionary of constants.""" eqn = f"{coefficients.get('intercept', '')}" if "outer multiplier" in coefficients: eqn += f" + {coefficients['outer multiplier']}*" else: eqn += " +" eqn += f"(T + {coefficients['inner adder']})**{coefficients['exponent']}" return eqn @staticmethod def createEqnHyper(coefficients): """Creates a symbolic hyperbolic function from a dictionary of constants.""" return ( f"{coefficients['intercept']} + " f"{coefficients['outer multiplier']}*" f"{coefficients['hyperbolic function']}(" f"(T+{coefficients['inner adder']})/{coefficients['inner denominator']})" ) def _createFunctionWithoutTable(self, data=None): """ Helper function designed to create a basic viable yaml file without tabulated data in the function. Parameters ---------- data : dict A dictionary containing user specified function child nodes. """ funcBody = {"T": {"min": -100.0, "max": 500.0}} funcBody.update(data or {}) materialData = { "file format": "TESTS", "composition": {"Fe": "balance"}, "material type": "Metal", "density": {"function": funcBody}, } mat = Material() mat.loadNode(materialData) return mat def _createFunction(self, data=None, tableData=None, minT=-100.0, maxT=500.0): """ Helper function designed to create a basic viable yaml file. Parameters ---------- data : dict A dictionary containing user specified function child nodes. tableData : dict Table data to include in the function definition minT : float Float containing the minimum T variable value for the function. maxT : float Float containing the maximum T variable value for the function. """ funcBody = {"T": {"min": minT, "max": maxT}} funcBody.update(data or {}) materialData = { "file format": "TESTS", "composition": {"Fe": "balance"}, "material type": "Metal", "density": {"function": funcBody, "tabulated data": tableData or {}}, } mat = Material() mat.loadNode(materialData) return mat def belowMinimumCheck(self, yamlData, tableData=None): """Check if a ValueError is thrown if attempting to evaluate below the min value of a given T variable.""" mat = self._createFunction(yamlData, tableData) func = mat.rho with self.assertRaises(ValueError): func.calc({"T": func.getMinBound("T") - 0.01}) def aboveMaximumCheck(self, yamlData, tableData=None): """Checksif a ValueError is thrown if attempting to evaluate above the max value of the T variable.""" mat = self._createFunction(yamlData, tableData) func = mat.rho with self.assertRaises(ValueError): func.calc({"T": func.getMaxBound("T") + 0.01}) ================================================ FILE: armi/matProps/tests/invalidTestFiles/badFileFormat.YAML ================================================ file format: INVALID material type: Fluid composition: a: balance density: function: T: min: 100.0 max: 200.0 type: symbolic equation: 1.0 ================================================ FILE: armi/matProps/tests/invalidTestFiles/badProperty.yaml ================================================ file format: TESTS material type: Metal composition: Na: 1.0 bad_prop: whatever ================================================ FILE: armi/matProps/tests/invalidTestFiles/duplicateComposition.yaml ================================================ file format: TESTS material type: Fluid composition: a: [15, 30] b: [10, 15] b: [11, 16] c: balance density: function: T: min: 100.0 max: 200.0 type: symbolic equation: 1.0 ================================================ FILE: armi/matProps/tests/testDir1/a.yaml ================================================ file format: TESTS material type: Fluid composition: a: balance references: - ref: ACME II.2017, Table 3 pg 182 refType: open literature density: function: T: min: 100.0 max: 200.0 type: symbolic equation: 1.0 ================================================ FILE: armi/matProps/tests/testDir1/b.yaml ================================================ file format: TESTS material type: Fluid composition: b: balance density: function: T: min: 100.0 max: 200.0 type: symbolic equation: 2.0 ================================================ FILE: armi/matProps/tests/testDir2/c.yml ================================================ file format: TESTS material type: Fluid composition: c: balance density: function: T: min: 100.0 max: 200.0 type: symbolic equation: 3.0 ================================================ FILE: armi/matProps/tests/testDir2/d.yaml ================================================ file format: TESTS material type: Fluid composition: d: balance density: function: T: min: 100.0 max: 200.0 type: symbolic equation: 4.0 ================================================ FILE: armi/matProps/tests/testDir3/a.yaml ================================================ file format: TESTS material type: Fluid composition: a: balance density: function: T: min: 100.0 max: 200.0 type: symbolic equation: 6.0 ================================================ FILE: armi/matProps/tests/testDir3/e.yaml ================================================ file format: TESTS material type: Fluid composition: e: balance density: function: T: min: 100.0 max: 200.0 type: symbolic equation: 5.0 ================================================ FILE: armi/matProps/tests/testDir4/sampleProperty.yaml ================================================ file format: TESTS material type: Fluid composition: a: balance density: function: T: min: 101.0 max: 501.0 type: symbolic equation: 1.0 specific heat capacity: function: T: min: 102.0 max: 502.0 type: symbolic equation: 2.0 thermal conductivity: function: T: min: 103.0 max: 503.0 type: symbolic equation: 3.0 thermal diffusivity: function: T: min: 104.0 max: 504.0 type: symbolic equation: 4.0 dynamic viscosity: function: T: min: 105.0 max: 505.0 type: symbolic equation: 5.0 kinematic viscosity: function: T: min: 106.0 max: 506.0 type: symbolic equation: 6.0 melting temperature: function: T: min: 107.0 max: 507.0 type: symbolic equation: 7.0 boiling temperature: function: T: min: 108.0 max: 508.0 type: symbolic equation: 8.0 latent heat of vaporization: function: T: min: 109.0 max: 509.0 type: symbolic equation: 9.0 enthalpy of fusion: function: T: min: 110.0 max: 510.0 type: symbolic equation: 10.0 surface tension: function: T: min: 111.0 max: 511.0 type: symbolic equation: 11.0 vapor pressure: function: T: min: 112.0 max: 512.0 type: symbolic equation: 12.0 isothermal compressibility: function: T: min: 113.0 max: 513.0 type: symbolic equation: 13.0 mean coefficient of thermal expansion: function: T: min: 114.0 max: 514.0 type: symbolic equation: 14.0 instantaneous coefficient of thermal expansion: function: T: min: 115.0 max: 515.0 type: symbolic equation: 15.0 Young's modulus: function: T: min: 116.0 max: 516.0 type: symbolic equation: 16.0 Poisson's ratio: function: T: min: 117.0 max: 517.0 type: symbolic equation: 17.0 yield strength: function: T: min: 118.0 max: 518.0 type: symbolic equation: 18.0 tensile strength: function: T: min: 119.0 max: 519.0 type: symbolic equation: 19.0 design stress: function: T: min: 120.0 max: 520.0 type: symbolic equation: 20.0 design reference stress: function: T: min: 121.0 max: 521.0 type: symbolic equation: 21.0 allowable stress: function: T: min: 122.0 max: 522.0 type: symbolic equation: 22.0 time dependent design stress: function: T: min: 123.0 max: 523.0 type: symbolic equation: 23.0 service reference stress: function: T: min: 124.0 max: 524.0 type: symbolic equation: 24.0 stress to rupture: function: T: min: 125.0 max: 525.0 type: symbolic equation: 25.0 tensile strength reduction factor: function: T: min: 126.0 max: 526.0 type: symbolic equation: 26.0 yield strength reduction factor: function: T: min: 127.0 max: 527.0 type: symbolic equation: 27.0 weld strength reduction factor: function: T: min: 127.0 max: 527.0 type: symbolic equation: 28.0 allowable time to rupture: function: T: min: 128.0 max: 528.0 type: symbolic equation: 29.0 allowable time to allowable stress: function: T: min: 129.0 max: 529.0 type: symbolic equation: 30.0 design fatigue strain range: function: T: min: 130.0 max: 530.0 type: symbolic equation: 31.0 strain from isochronous stress-strain curve: function: T: min: 130.0 max: 530.0 type: symbolic equation: 32.0 design fatigue stress: function: T: min: 131.0 max: 531.0 type: symbolic equation: 33.0 linear expansion: function: T: min: 132.0 max: 532.0 type: symbolic equation: 34.0 vapor specific volume: function: T: min: 133.0 max: 533.0 type: symbolic equation: 35.0 speed of sound: function: T: min: 134.0 max: 534.0 type: symbolic equation: 36.0 solidus temperature: function: T: min: 135.0 max: 535.0 type: symbolic equation: 37.0 liquidus temperature: function: T: min: 136.0 max: 536.0 type: symbolic equation: 38.0 volumetric expansion: function: T: min: 137.0 max: 537.0 type: symbolic equation: 39.0 enthalpy: function: T: min: 138.0 max: 538.0 type: symbolic equation: 40.0 temperature from enthalpy: function: T: min: 139.0 max: 539.0 type: symbolic equation: 41.0 fracture toughness: function: T: min: 140.0 max: 540.0 type: symbolic equation: 42.0 Brinell Hardness: function: T: min: 141.0 max: 541.0 type: symbolic equation: 43.0 factor f from ASME.III.5 Fig. HBB-T-1432-2: function: T: min: 141.0 max: 541.0 type: symbolic equation: 44.0 factor Kv' from ASME.III.5 Fig. HBB-T-1432-3: function: T: min: 141.0 max: 541.0 type: symbolic equation: 45.0 shear modulus: function: T: min: 141.0 max: 541.0 type: symbolic equation: 46.0 elongation: function: T: min: 141.0 max: 541.0 type: symbolic equation: 47.0 ================================================ FILE: armi/matProps/tests/testMaterialsData/materialA.yaml ================================================ file format: TESTS material type: Fluid composition: a: balance density: function: T: min: 201.0 max: 601.0 type: symbolic equation: 101.0*T + 500 ================================================ FILE: armi/matProps/tests/testMaterialsData/materialB.yaml ================================================ file format: TESTS material type: Fluid composition: b: balance specific heat capacity: function: T: min: 202.0 max: 602.0 type: symbolic equation: 102.0 ================================================ FILE: armi/matProps/tests/testMaterialsData/materialsSubDir/materialC.yaml ================================================ file format: TESTS material type: Fluid composition: c: balance thermal conductivity: function: T: min: 103.0 max: 503.0 type: symbolic equation: 3.0 ================================================ FILE: armi/matProps/tests/testMaterialsData/materialsSubDir/materialD.yaml ================================================ file format: TESTS material type: Fluid composition: d: balance thermal diffusivity: function: T: min: 204.0 max: 604.0 type: symbolic equation: 104.0 ================================================ FILE: armi/matProps/tests/test_1DSymbolicFunction.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Simple examples to verify constant, polynomial, hyperbolic, and power law functional forms.""" import numpy as np from armi.matProps.tests import MatPropsFunTestBase class Test1DSymbolicFunction(MatPropsFunTestBase): """Test 1D symbolic functions.""" @classmethod def setUpClass(cls): super().setUpClass() cls.basePolynomialMap = {0: 5, 1: 2, 2: -3, 3: 4, 4: -5, 5: 6, 6: -7, 7: 8} cls.basePolynomialData = { "type": "symbolic", "equation": cls.createEqnPoly(cls.basePolynomialMap), } cls.basePowerLawTerms = { "exponent": 2.0, "inner adder": 125.0, "outer multiplier": 3.4, "intercept": -2.5, } cls.basePowerLawData = { "type": "symbolic", "equation": cls.createEqnPower(cls.basePowerLawTerms), } cls.baseHyperbolicTerms = { "hyperbolic function": "tanh", "intercept": 5, "outer multiplier": 2, "inner denominator": 4, "inner adder": 1, } cls.baseHyperbolicData = { "type": "symbolic", "equation": cls.createEqnHyper(cls.baseHyperbolicTerms), } cls.baseConstantData = {"type": "symbolic", "equation": "9123.5"} def test_polynomialEqnIntInt(self): """ Evaluates a PolynomialFunction that has 8 power values that are all integers. Ensure that the override methods PolynomialFunction._parseSpecific() and PolynomialFunction._calcSpecific() are functioning appropriately. A minimal input with a defined polynomial function is provided. The polynomial is comprised of all integer coefficients and powers to ensure that matProps can properly handle integer inputs. The function is evaluated at several values in the valid range and compared to a lambda expression inside the test method to make sure their results are equivalent. """ # these polynomials have up to 8 powers/terms (including 0) mat = self._createFunction(self.basePolynomialData) mat.name = self.testName self.assertEqual(str(mat), f"<Material {self.testName} <MaterialType Metal>>") # test using input dict for calc self.assertAlmostEqual(mat.rho.calc({"T": 0}), self.polynomialEvaluation(self.basePolynomialMap, 0)) self.assertAlmostEqual(mat.rho.calc({"T": 50}), self.polynomialEvaluation(self.basePolynomialMap, 50)) self.assertAlmostEqual(mat.rho.calc({"T": 100}), self.polynomialEvaluation(self.basePolynomialMap, 100)) # test using kwargs for calc self.assertAlmostEqual(mat.rho.calc(T=0), self.polynomialEvaluation(self.basePolynomialMap, 0)) self.assertAlmostEqual(mat.rho.calc(T=50), self.polynomialEvaluation(self.basePolynomialMap, 50)) self.assertAlmostEqual(mat.rho.calc(T=100), self.polynomialEvaluation(self.basePolynomialMap, 100)) def test_polynomialEqnFloatInt(self): """Evaluates a PolynomialFunction with floating point coefficients and integer point power terms.""" coefficientsMap = {0: -2.523536, 1: 5.374489, 2: 4.897134} data = {"type": "symbolic", "equation": self.createEqnPoly(coefficientsMap)} mat = self._createFunction(data) func = mat.rho # test using input dict for calc self.assertAlmostEqual(func.calc({"T": -100.0}), self.polynomialEvaluation(coefficientsMap, -100.0)) self.assertAlmostEqual(func.calc({"T": 0.0}), self.polynomialEvaluation(coefficientsMap, 0.0)) self.assertAlmostEqual(func.calc({"T": 100.0}), self.polynomialEvaluation(coefficientsMap, 100.0)) self.assertAlmostEqual(func.calc({"T": 500.0}), self.polynomialEvaluation(coefficientsMap, 500.0)) # test using kwargs for calc self.assertAlmostEqual(func.calc(T=-100.0), self.polynomialEvaluation(coefficientsMap, -100.0)) self.assertAlmostEqual(func.calc(T=0.0), self.polynomialEvaluation(coefficientsMap, 0.0)) self.assertAlmostEqual(func.calc(T=100.0), self.polynomialEvaluation(coefficientsMap, 100.0)) self.assertAlmostEqual(func.calc(T=500.0), self.polynomialEvaluation(coefficientsMap, 500.0)) def test_polynomialEqnFloatFloat(self): """Evaluates a PolynomialFunction with floating point coefficients and floating point power terms.""" coefficientsMap = {0.5: -2.5, 2.5: 5.389, 1.5: 4.375} data = {"type": "symbolic", "equation": self.createEqnPoly(coefficientsMap)} mat = self._createFunction(data, minT=0.0) mat.name = self.testName self.assertEqual(str(mat), f"<Material {self.testName} <MaterialType Metal>>") func = mat.rho self.assertAlmostEqual(func.calc({"T": 0.0}), self.polynomialEvaluation(coefficientsMap, 0.0)) self.assertAlmostEqual(func.calc({"T": 100.0}), self.polynomialEvaluation(coefficientsMap, 100.0)) self.assertAlmostEqual(func.calc({"T": 500.0}), self.polynomialEvaluation(coefficientsMap, 500.0)) def test_polynomialDiffFloatTypes(self): """Evaluates a PolynomialFunction with floating point coefficients power terms, checking exact values.""" coefficientsMap = {0.5: -2.5, 2.5: 5.389, 1.5: 4.375} data = {"type": "symbolic", "equation": self.createEqnPoly(coefficientsMap)} mat = self._createFunction(data, minT=0.0) self.assertAlmostEqual(mat.rho.calc({"T": np.float64(0.0)}), 0.0) self.assertAlmostEqual(mat.rho.calc({"T": np.float64(100.0)}), 543250.0) self.assertAlmostEqual(mat.rho.calc({"T": np.float64(500.0)}), 30174283.91217429) def test_symbolicEqnError(self): """Ensure symbolic equations fail correctly when given empty or nonsense inputs.""" # Leave out equation node dataNoCoeff = {"type": "symbolic"} with self.assertRaises(KeyError): self._createFunction(dataNoCoeff) # Provide invalid equation node. dataBadCoeff = {"type": "symbolic", "equation": "NOT AN EQUATION"} with self.assertRaises(ValueError): self._createFunction(dataBadCoeff) def test_powerEqn(self): """Evaluates a PowerLaw with floating point coefficients and exponents.""" mat = self._createFunction(self.basePowerLawData) func = mat.rho self.assertAlmostEqual(func.calc({"T": 0}), self.powerLawEvaluation(self.basePowerLawTerms, 0)) self.assertAlmostEqual(func.calc({"T": 12.5}), self.powerLawEvaluation(self.basePowerLawTerms, 12.5)) self.assertAlmostEqual(func.calc({"T": 25}), self.powerLawEvaluation(self.basePowerLawTerms, 25)) self.assertAlmostEqual(func.calc({"T": 50}), self.powerLawEvaluation(self.basePowerLawTerms, 50)) self.assertAlmostEqual(func.calc({"T": 75}), self.powerLawEvaluation(self.basePowerLawTerms, 75)) self.assertAlmostEqual(func.calc({"T": 100}), self.powerLawEvaluation(self.basePowerLawTerms, 100)) def test_powerEqnAllInt(self): """Evaluates a PowerLaw with integer coefficients and exponents.""" coefficients = { "exponent": 2, "inner adder": 125, "outer multiplier": 3, "intercept": -2, } powerLawDataInt = { "type": "symbolic", "equation": self.createEqnPower(coefficients), } mat = self._createFunction(powerLawDataInt) func = mat.rho self.assertAlmostEqual(func.calc({"T": 0}), self.powerLawEvaluation(coefficients, 0)) self.assertAlmostEqual(func.calc({"T": 25}), self.powerLawEvaluation(coefficients, 25)) self.assertAlmostEqual(func.calc({"T": 50}), self.powerLawEvaluation(coefficients, 50)) self.assertAlmostEqual(func.calc({"T": 75}), self.powerLawEvaluation(coefficients, 75)) self.assertAlmostEqual(func.calc({"T": 100}), self.powerLawEvaluation(coefficients, 100)) def test_powerEqnFloatInt(self): """Evaluates a PowerLaw with a mixture of integer and floating point coefficients and exponents.""" coefficients = { "exponent": 2.5, "inner adder": 125, "outer multiplier": 3.14159, "intercept": -2, } powerLawDataInt = { "type": "symbolic", "equation": self.createEqnPower(coefficients), } mat = self._createFunction(powerLawDataInt) func = mat.rho self.assertAlmostEqual(func.calc({"T": 0}), self.powerLawEvaluation(coefficients, 0)) self.assertAlmostEqual(func.calc({"T": 25}), self.powerLawEvaluation(coefficients, 25)) self.assertAlmostEqual(func.calc({"T": 50}), self.powerLawEvaluation(coefficients, 50)) self.assertAlmostEqual(func.calc({"T": 75}), self.powerLawEvaluation(coefficients, 75)) self.assertAlmostEqual(func.calc({"T": 100}), self.powerLawEvaluation(coefficients, 100)) def test_powerEqnNoInter(self): """Evaluates a PowerLaw with no intercept term.""" coefficients = {"exponent": 2.0, "inner adder": 125.0, "outer multiplier": 3.4} data = {"type": "symbolic", "equation": self.createEqnPower(coefficients)} mat = self._createFunction(data) # Intercept in self.powerLawEvaluation is 0.0 to reflect default value in matProps self.assertAlmostEqual(mat.rho.calc({"T": 0}), self.powerLawEvaluation(coefficients, 0)) self.assertAlmostEqual(mat.rho.calc({"T": 25}), self.powerLawEvaluation(coefficients, 25)) self.assertAlmostEqual(mat.rho.calc({"T": 50}), self.powerLawEvaluation(coefficients, 50)) self.assertAlmostEqual(mat.rho.calc({"T": 75}), self.powerLawEvaluation(coefficients, 75)) self.assertAlmostEqual(mat.rho.calc({"T": 100}), self.powerLawEvaluation(coefficients, 100)) def test_powerEqnNoOuter(self): """Evaluates a PowerLaw with no outer multiplier term.""" coefficients = {"exponent": 2.0, "inner adder": 125.0, "intercept": -2.5} data = {"type": "symbolic", "equation": self.createEqnPower(coefficients)} mat = self._createFunction(data) func = mat.rho # Outer multiplier in self.powerLawEvaluation is 1.0 to reflect default value in matProps self.assertAlmostEqual(func.calc({"T": 0}), self.powerLawEvaluation(coefficients, 0)) self.assertAlmostEqual(func.calc({"T": 25}), self.powerLawEvaluation(coefficients, 25)) self.assertAlmostEqual(func.calc({"T": 50}), self.powerLawEvaluation(coefficients, 50)) self.assertAlmostEqual(func.calc({"T": 75}), self.powerLawEvaluation(coefficients, 75)) self.assertAlmostEqual(func.calc({"T": 100}), self.powerLawEvaluation(coefficients, 100)) def test_powerEqnNoOuterInter(self): """Evaluates a PowerLaw with no outer multiplier or intercept term.""" coefficients = {"exponent": 2.0, "inner adder": 125.0} data = {"type": "symbolic", "equation": self.createEqnPower(coefficients)} mat = self._createFunction(data) func = mat.rho self.assertAlmostEqual(func.calc({"T": 0}), self.powerLawEvaluation(coefficients, 0)) self.assertAlmostEqual(func.calc({"T": 25}), self.powerLawEvaluation(coefficients, 25)) self.assertAlmostEqual(func.calc({"T": 50}), self.powerLawEvaluation(coefficients, 50)) self.assertAlmostEqual(func.calc({"T": 75}), self.powerLawEvaluation(coefficients, 75)) self.assertAlmostEqual(func.calc({"T": 100}), self.powerLawEvaluation(coefficients, 100)) def test_constantsEval(self): """Evaluates a PowerLaw for integer and floating point values.""" mat = self._createFunction(self.baseConstantData) func = mat.rho self.assertAlmostEqual(func.calc({"T": 0}), 9123.5) self.assertAlmostEqual(func.calc({"T": 12.5}), 9123.5) self.assertAlmostEqual(func.calc({"T": 50}), 9123.5) self.assertAlmostEqual(func.calc({"T": 100}), 9123.5) def test_hyperbolicEqnEval(self): """Evaluates a HyperbolicFunction for integer and floating point values.""" mat = self._createFunction(self.baseHyperbolicData) # test using input dict for calc self.assertAlmostEqual(mat.rho.calc({"T": 0}), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 0)) self.assertAlmostEqual(mat.rho.calc({"T": 12.5}), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 12.5)) self.assertAlmostEqual(mat.rho.calc({"T": 50}), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 50)) self.assertAlmostEqual(mat.rho.calc({"T": 100}), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 100)) # test using kwargs for calc self.assertAlmostEqual(mat.rho.calc(T=0), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 0)) self.assertAlmostEqual(mat.rho.calc(T=12.5), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 12.5)) self.assertAlmostEqual(mat.rho.calc(T=50), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 50)) self.assertAlmostEqual(mat.rho.calc(T=100), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 100)) def test_hyperbolicEqnEval2(self): """Evaluates a HyperbolicFunction for a different set of floating point values.""" coefficients = { "hyperbolic function": "tanh", "intercept": 3.829e8, "outer multiplier": -4.672e8, "inner denominator": 216.66, "inner adder": -613.52, } data = {"type": "symbolic", "equation": self.createEqnHyper(coefficients)} mat = self._createFunction(data) func = mat.rho expectedValue = self.hyperbolicEvaluation(coefficients, 500) self.assertAlmostEqual(func.calc({"T": 500}), expectedValue, delta=expectedValue * 1e-5) ================================================ FILE: armi/matProps/tests/test_composition.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Basic tests of the Composition class.""" import os import unittest from ruamel.yaml.constructor import DuplicateKeyError import armi.matProps from armi.matProps.material import Material class TestComposition(unittest.TestCase): def setUp(self): self.testName = self.id().split(".")[-1] searchStr = "test_" if self.testName.startswith(searchStr): self.testName = self.testName[len(searchStr) :] def _createFunction(self, compMap=None): compValue = {} if compMap is not None: compValue = compMap materialMap = { "file format": "TESTS", "composition": compValue, "material type": "Metal", "density": { "function": { "T": {"min": 100.0, "max": 200.0}, "type": "symbolic", "equation": 1.0, } }, } mat = Material() mat.loadNode(materialMap) return mat def test_compositionMissing(self): materialMap = { "file format": "TESTS", "material type": "Metal", "density": "whatever", } mat = Material() with self.assertRaisesRegex(KeyError, "Missing YAML node `composition`"): mat.loadNode(materialMap) def test_compositionInvTuple(self): # Invalid doesn't have two elements badCompMap = {"Fe": [1.0]} with self.assertRaisesRegex( TypeError, "Composition values must be either a tuple of min/max values, or `balance`", ): self._createFunction(badCompMap) def test_compositionInvStr(self): badCompMap = {"a": [0.5, 0.5], "b": "remainder"} with self.assertRaisesRegex( TypeError, "Composition values must be either a tuple of min/max values, or `balance`", ): self._createFunction(badCompMap) def test_compositionMissBalance(self): compMap = {"a": [0.25, 0.26], "b": [0.3, 0.31], "c": [0.45, 0.46]} with self.assertRaisesRegex(ValueError, "exactly one balance element"): self._createFunction(compMap) def test_compositionBalanceNum(self): compMap = {"a": [15.0, 15.1], "b": "balance", "c": "balance"} with self.assertRaisesRegex(ValueError, "exactly one balance element"): self._createFunction(compMap) def test_compositionBalance(self): compMap = {"a": [15.0, 20.0], "b": [30.0, 35.0], "c": "balance"} mat = self._createFunction(compMap) mat.name = self.testName self.assertEqual(str(mat), f"<Material {self.testName} <MaterialType Metal>>") c_minValue, c_maxValue = None, None sumMin, sumMax = 0.0, 0.0 for compElement in mat.composition: if compElement.name != "c": self.assertFalse(compElement.isBalance) compValue = compMap.get(compElement.name) self.assertIsNotNone(compValue) self.assertAlmostEqual(compElement.minValue, compValue[0]) self.assertAlmostEqual(compElement.maxValue, compValue[1]) sumMin += compElement.minValue sumMax += compElement.maxValue else: self.assertTrue(compElement.isBalance) c_minValue = compElement.minValue c_maxValue = compElement.maxValue self.assertAlmostEqual(c_minValue, 100.0 - sumMax) self.assertAlmostEqual(c_maxValue, 100.0 - sumMin) def test_compositionBalance2(self): compMap = { "a": [10.0, 15.0], "b": [20.1, 35.1], "c": [30.2, 50.2], "d": "balance", } mat = self._createFunction(compMap) mat.name = self.testName self.assertEqual(str(mat), f"<Material {self.testName} <MaterialType Metal>>") sumMin = 0.0 d_minValue, d_maxValue = None, None for compElement in mat.composition: if compElement.name != "d": self.assertFalse(compElement.isBalance) compValue = compMap.get(compElement.name) self.assertIsNotNone(compValue) self.assertAlmostEqual(compElement.minValue, compValue[0]) self.assertAlmostEqual(compElement.maxValue, compValue[1]) sumMin += compElement.minValue else: self.assertTrue(compElement.isBalance) d_minValue = compElement.minValue d_maxValue = compElement.maxValue self.assertAlmostEqual(d_minValue, 0.0) self.assertAlmostEqual(d_maxValue, 100.0 - sumMin) def test_compositionMinValue(self): compMap = {"a": [-1.0, 20.0], "b": "balance"} with self.assertRaisesRegex(ValueError, "negative minimum"): self._createFunction(compMap) def test_compositionMaxValue(self): compMap = {"a": [15.0, 14.9], "b": "balance"} with self.assertRaisesRegex(ValueError, "max < min"): self._createFunction(compMap) def test_compositionMaxValue2(self): compMap = {"a": [15.0, 100.1], "b": "balance"} with self.assertRaisesRegex(ValueError, "max > 100.0"): self._createFunction(compMap) def test_compositionMinSum(self): compMap = { "a": [30.0, 30.1], "b": [40.1, 40.2], "c": [50.2, 50.3], "d": "balance", } with self.assertRaisesRegex(ValueError, "minimum composition summation greater than 100.0"): self._createFunction(compMap) def test_compositionDuplicate(self): duplicateTestFile = os.path.join( os.path.dirname(os.path.realpath(__file__)), "invalidTestFiles", "duplicateComposition.yaml", ) with self.assertRaises(DuplicateKeyError): armi.matProps.loadMaterial(duplicateTestFile) ================================================ FILE: armi/matProps/tests/test_constituent.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Basic tests of the Constituent class.""" import unittest from armi.matProps.constituent import Constituent class TestConstituent(unittest.TestCase): def test_errorHandling(self): c = Constituent("Fe", 10.0, 25.0, False) self.assertEqual(str(c), "<Constituent Fe min: 10.0 max: 25.0>") c = Constituent("Fe", 0.0, 99.0, True) self.assertEqual(str(c), "<Constituent Fe min: 0.0 max: 99.0 computed based on balance>") with self.assertRaises(ValueError): Constituent("Fe", -10.0, 25.0, False) with self.assertRaises(ValueError): Constituent("Fe", 50.0, 101.0, False) with self.assertRaises(ValueError): Constituent("Fe", 50.0, 1.0, False) def test_parseComposition(self): # test we fail correctly when providing invalid inputs with self.assertRaises(ValueError): Constituent.parseComposition({}) with self.assertRaises(ValueError): node = {"Fe": (0.1, 0.25)} Constituent.parseComposition(node) # a simple Iron-only material node = {"Fe": "balance"} c = Constituent.parseComposition(node) self.assertEqual(len(c), 1) self.assertEqual(c[0].maxValue, 100.0) self.assertTrue(c[0].isBalance) # a hypothetical steel-like material node = {"C": (0.0, 10.0), "Cr": (0.0, 1.0), "Fe": "balance"} c = Constituent.parseComposition(node) self.assertEqual(len(c), 3) self.assertEqual(c[0].maxValue, 10.0) self.assertFalse(c[0].isBalance) self.assertEqual(c[2].maxValue, 100.0) self.assertTrue(c[2].isBalance) ================================================ FILE: armi/matProps/tests/test_functions.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the Function class.""" from armi.matProps.material import Material from armi.matProps.tests import MatPropsFunTestBase class TestFunctions(MatPropsFunTestBase): """Class which encapsulates the unit tests data and methods to test the matProps Function class.""" @classmethod def setUpClass(cls): super().setUpClass() cls.baseConstantData = {"type": "symbolic", "equation": "9123.5"} def test_getReferences(self): mat = self._createFunction(self.baseConstantData) mat.rho._references = ["1", "2"] self.assertEqual(mat.rho.references[0], "1") self.assertEqual(mat.rho.references[1], "2") def test_datafilesVarVals(self): """ Test to make sure that parsing variable values return the expected values when parsing "max" and "min" nodes for the T variable. """ mat = self._createFunction(self.baseConstantData) mat.name = self.testName self.assertEqual(str(mat), f"<Material {self.testName} <MaterialType Metal>>") density = mat.rho self.assertEqual(density.getMinBound("T"), -100.0) self.assertEqual(density.getMaxBound("T"), 500.0) def test_datafilesMaxVar(self): """Test that makes sure a ValueError is thrown if the max of a variable is less than the min.""" with self.assertRaises(ValueError): self._createFunction(self.baseConstantData, maxT=-101.0) def test_datafilesInvType(self): """Test that makes sure a KeyError is thrown if an unsupported function type is provided.""" data = {"type": "fake function"} with self.assertRaisesRegex(KeyError, "fake function"): self._createFunction(data) def test_refTempEval(self): """Test that a function with a reference temperature correctly parses and returns the expected value.""" testData = self.baseConstantData.copy() testData.update({"reference temperature": 200.0}) mat = self._createFunction(testData) func = mat.rho self.assertAlmostEqual(func.getReferenceTemperature(), 200.0) def test_refTempMissing(self): """Test that a ValueError is thrown when accessing a reference temperature value that is not provided.""" mat = self._createFunction(self.baseConstantData) func = mat.rho with self.assertRaisesRegex(ValueError, "Reference temperature is undefined"): func.getReferenceTemperature() def test_refTempInvalid(self): """Test to make sure that a ValueError is thrown if the provided reference temperature value is invalid.""" testData = self.baseConstantData.copy() testData.update({"reference temperature": -273.25}) mat = self._createFunction(testData) func = mat.rho with self.assertRaisesRegex(ValueError, "Reference temperature is undefined"): func.getReferenceTemperature() def test_independentVars(self): mat = self._createFunction(self.baseConstantData) fun = mat.rho self.assertEqual(len(fun.independentVars), 1) self.assertEqual(fun.getIndependentVariables(), ["T"]) self.assertEqual(fun.getMinBound("T"), -100) self.assertEqual(fun.getMaxBound("T"), 500) with self.assertRaises(KeyError): fun.getMinBound("X") with self.assertRaises(KeyError): fun.getMaxBound("Y") def test_calcEdgeCases(self): mat = self._createFunction(self.baseConstantData) fun = mat.rho with self.assertRaises(ValueError): fun.calc({"T": 200}, T=300) with self.assertRaises(ValueError): fun.calc() with self.assertRaises(KeyError): fun.calc({"Z": 200}) # whoops, I forgot to declare a "max" value materialData = { "file format": "TESTS", "composition": {"Fe": "balance"}, "material type": "Metal", "density": {"function": {"T": {"min": 1.0}, "type": "symbolic", "equation": 1.0}}, } mat = Material() with self.assertRaises(KeyError): mat.loadNode(materialData) def test_references(self): materialData = { "file format": "TESTS", "composition": {"Fe": "balance"}, "material type": "Metal", "density": { "function": { "T": {"min": 1.0, "max": 10.0}, "type": "symbolic", "equation": 1.0, }, "references": [{"ref": "things", "type": "open literature"}], }, } mat = Material() mat.loadNode(materialData) self.assertEqual(len(mat.rho.references), 1) self.assertEqual(mat.rho.references[0].getRef(), "things") def test_tabulatedData(self): tableData = [ [300, 25], [400, 26.28], [500, 26.26], [600, 25.89], [700, 25.19], [800, 25.10], [900, 26.32], ] materialData = { "file format": "TESTS", "composition": {"Fe": "balance"}, "material type": "Metal", "density": { "function": { "T": {"min": 1.0, "max": 10.0}, "type": "symbolic", "equation": 1.0, }, "tabulated data": tableData, }, } mat = Material() mat.loadNode(materialData) self.assertEqual(len(mat.rho.references), 0) self.assertEqual(len(mat.rho.tableData._values), 7) ================================================ FILE: armi/matProps/tests/test_hashing.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Program that runs tests for the TestHashValues class.""" import os import unittest import armi.matProps class TestHashValues(unittest.TestCase): """Testing the material hashing logic.""" @classmethod def setUpClass(cls): cls.testDir = os.path.dirname(__file__) def test_hash(self): testFileA = os.path.join(self.testDir, "testDir1", "a.yaml") testFileB = os.path.join(self.testDir, "testMaterialsData", "materialB.yaml") matA = armi.matProps.loadMaterial(testFileA, False) matB = armi.matProps.loadMaterial(testFileB, False) hA = matA.hash() hB = matB.hash() # NOTE: We cannot check exact hashes, because of OS differences self.assertEqual(len(hA), 40) self.assertEqual(len(hB), 40) self.assertNotEqual(hA, hB) ================================================ FILE: armi/matProps/tests/test_interpolationFunctions.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Program that runs all of the tests contained in the TestInterpolationFunctions class.""" import unittest import numpy as np from scipy import interpolate from armi.matProps.interpolationFunctions import findIndex, linearLinear, logLinear class TestInterpolationFunctions(unittest.TestCase): """Class which creates tests for the matProps InterpolationFunctions files.""" def test_findIndex(self): x = [2, 4, 6, 8] self.assertEqual(findIndex(2, x), 0) self.assertEqual(findIndex(3, x), 0) self.assertEqual(findIndex(3.14, x), 0) self.assertEqual(findIndex(4, x), 0) # NOTE: This is 0, not 1. self.assertEqual(findIndex(4.001, x), 1) self.assertEqual(findIndex(6, x), 1) # NOTE: This is 1, not 2. self.assertEqual(findIndex(6.2, x), 2) with self.assertRaises(ValueError): findIndex(-9, x) with self.assertRaises(ValueError): findIndex(9, x) def test_linearLinear(self): """ Test which validates the values returned from the linear-linear interpolation method. Uses numpy linspace function to generate values at which interpolation will be performed. """ x = np.arange(10) y = [1.0 + xx + xx**2 for xx in range(10)] f = interpolate.interp1d(x, y, bounds_error=False) for nn in np.linspace(0, 9, 20): self.assertTrue(np.allclose(f(nn), linearLinear(nn, x.tolist(), y))) def test_linearLinearInterpolation(self): """ Duplicate test validating that the correct values are returned from a linear-linear interpolation. Differs from test_linearLinear by constructing interpolation points using standard lists instead of numpy linspace. """ x = [0.0, 1.0] y = [1.0, 2.0] for xx, yy in [(0.0, 1.0), (0.5, 1.5), (1.0, 2.0)]: self.assertAlmostEqual(yy, linearLinear(xx, x, y)) def test_linearLinearExtrapolation(self): """Check to make sure a ValueError is thrown if attempting an interpolation outside the function domain.""" x = [0.0, 1.0] y = [1.0, 2.0] with self.assertRaisesRegex(ValueError, "out of bounds"): linearLinear(-2.0, x, y) def test_logLinear(self): """Test that validates the values returned from the log-linear interpolation function.""" x = np.arange(1.0, 11.0) y = -42.0 + x + x**-2 n_vals = np.interp(np.log10(np.linspace(1, 10, 20)), np.log10(x), y) m_vals = [logLinear(nn, x, y) for nn in np.linspace(1, 10, 20)] self.assertTrue( np.allclose(n_vals, m_vals), f"np: {n_vals}\nmatProps:{np.array(m_vals)}", ) def test_logLinearExtrapolation(self): """A ValueError should be thrown if performing a log-linear interpolation outside the function domain.""" x = np.arange(1.0, 11.0) y = -42.0 + x + x**-2 with self.assertRaisesRegex(ValueError, "out of bounds"): logLinear(0.5, x, y) ================================================ FILE: armi/matProps/tests/test_material.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Program that runs all of the tests in the TestMapPropsMaterial class.""" import os import unittest import armi.matProps from armi.matProps.material import Material from armi.matProps.materialType import MaterialType THIS_DIR = os.path.dirname(__file__) class TestMapPropsMaterial(unittest.TestCase): """Class which tests the functionality of the matProps Material class.""" @staticmethod def _createFunction(materialType): """ Helper function used to construct a minimum viable YAML file for tests. Parameters ---------- fileName String containing name of yaml file being written materialType String containing the "material type" node value """ testNode = { "file format": "TESTS", "composition": {"Fe": "balance"}, "material type": materialType, "density": { "function": { "T": { "min": 100.0, "max": 200.0, }, "type": "symbolic", "equation": 1.0, } }, } mat = Material() mat.loadNode(testNode) return mat def test_getValidFileFormatVersions(self): versions = armi.matProps.Material.getValidFileFormatVersions() self.assertGreater(len(versions), 1) for version in versions: if type(version) is not float: self.assertEqual(version, "TESTS") def test_loadFile(self): mat = armi.matProps.Material() self.assertEqual(str(mat), "<Material None None>") fPath = os.path.join(THIS_DIR, "testMaterialsData", "materialA.yaml") self.assertEqual(len(sorted(armi.matProps.materials.keys())), 0) mat.loadFile(fPath) self.assertEqual(len(sorted(armi.matProps.materials.keys())), 0) def test_datafilesType(self): materialTypeNames = [ "Fuel", "Metal", "Fluid", "Ceramic", "ASME2015", "ASME2017", "ASME2019", ] for matTypeName in materialTypeNames: parseType = self._createFunction(matTypeName).materialType typeIdx = MaterialType.types[matTypeName] expectedType = MaterialType(typeIdx) self.assertEqual(parseType, expectedType) def test_invalidFileFormat(self): fPath = os.path.join(THIS_DIR, "invalidTestFiles", "badFileFormat.YAML") mat = armi.matProps.Material() with self.assertRaises(ValueError): mat.loadFile(fPath) def test_datafilesInvType(self): with self.assertRaisesRegex(KeyError, "Invalid material type"): self._createFunction("Solid") def test_saveLogic(self): mat = self._createFunction("Metal") self.assertFalse(mat.saved()) mat.save() self.assertTrue(mat.saved()) ================================================ FILE: armi/matProps/tests/test_materialType.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the MaterialType class.""" import unittest from armi.matProps.materialType import MaterialType class TestMaterialType(unittest.TestCase): def test_fromString(self): mt = MaterialType.fromString("Fuel") self.assertEqual(mt._value, 1) mt = MaterialType.fromString("Metal") self.assertEqual(mt._value, 2) mt = MaterialType.fromString("Fluid") self.assertEqual(mt._value, 4) def test_repr(self): mt = MaterialType.fromString("Fuel") self.assertEqual(str(mt), "<MaterialType Fuel>") mt = MaterialType.fromString("Metal") self.assertEqual(str(mt), "<MaterialType Metal>") mt = MaterialType.fromString("Fluid") self.assertEqual(str(mt), "<MaterialType Fluid>") def test_equality(self): mt1 = MaterialType(1) mt11 = MaterialType(1) mt4 = MaterialType(4) self.assertTrue(mt1 == mt1) self.assertTrue(mt1 == mt11) self.assertFalse(mt1 == mt4) self.assertFalse(mt11 == mt4) self.assertTrue(mt1 == 1) self.assertTrue(mt11 == 1) self.assertFalse(mt1 == 4) self.assertFalse(mt11 == 4) with self.assertRaises(TypeError): self.assertTrue(mt1 == "1") ================================================ FILE: armi/matProps/tests/test_parsing.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test YAML parsers for all files in the matProps data directory to ensure that there are no parsing errors.""" import os import tempfile import unittest from os import path import armi.matProps class TestParsing(unittest.TestCase): """Class which tests the parsing and material library loading functions of matProps.""" @property def dirname(self): """Provide the directory where this file is located.""" return path.dirname(path.realpath(__file__)) @classmethod def setUpClass(cls): cls.dummyDataPath = path.join(path.dirname(path.realpath(__file__)), "testMaterialsData") cls.dummyMatFiles = {} for root, _, files in os.walk(cls.dummyDataPath): for fileName in files: if fileName.lower().endswith((".yaml", ".yml")): cls.dummyMatFiles[fileName] = os.path.join(root, fileName) armi.matProps.clear() def tearDown(self): armi.matProps.clear() def test_datafilesMatOwner(self): for matFile, matPath in self.dummyMatFiles.items(): matNam = path.splitext(matFile)[0] # the default behavior is loadMaterial(matPath, false) m = armi.matProps.loadMaterial(matPath) self.assertIsNotNone(m) with self.assertRaisesRegex(KeyError, f"No material named `{matNam}` was loaded within loaded data."): armi.matProps.getMaterial(matNam) m = armi.matProps.loadMaterial(self.dummyMatFiles[matFile], False) self.assertIsNotNone(m) with self.assertRaisesRegex(KeyError, f"No material named `{matNam}` was loaded within loaded data."): armi.matProps.getMaterial(matNam) # test the pass-through function load_material, instead of the preferred loadMaterial m = armi.matProps.load_material(self.dummyMatFiles[matFile], True) self.assertIsNotNone(m) m = armi.matProps.getMaterial(matNam) self.assertIsNotNone(m) def test_multiDataLoadingLoadingAll(self): armi.matProps.loadAll(self.dummyDataPath) self.assertEqual(len(self.dummyMatFiles), len(armi.matProps.loadedMaterials())) armi.matProps.clear() self.assertEqual(0, len(armi.matProps.loadedMaterials())) def test_loadSafe(self): armi.matProps.clear() self.assertEqual(0, len(armi.matProps.loadedMaterials())) # verify that it is safe to call loadSafe() multiple times in a row for _ in range(3): armi.matProps.loadSafe(self.dummyDataPath) self.assertEqual(len(self.dummyMatFiles), len(armi.matProps.loadedMaterials())) # verify the correct behavior if a bad directory is provided badDir = "does_not_exist_2924" with self.assertRaisesRegex(FileNotFoundError, f"Directory {badDir} not found"): # test with the pass through "load_safe", instead of the preferred loadSafe armi.matProps.load_safe(badDir) def test_dataLoadingPrioSameDir(self): armi.matProps.loadAll(self.dummyDataPath) with self.assertRaises(KeyError): armi.matProps.loadAll(self.dummyDataPath) # bonus test of getHashes hashes = armi.matProps.getHashes() self.assertGreater(len(hashes), 3) for h in hashes: self.assertGreater(len(h), 8) self.assertIsInstance(h, str) def test_datafilesBadPath(self): badDir = "nopity-nopers-missing" with self.assertRaisesRegex(FileNotFoundError, f"Directory {badDir} not found"): armi.matProps.loadAll(badDir) with self.assertRaisesRegex(NotADirectoryError, "Input path"): armi.matProps.loadAll(path.abspath(__file__)) with tempfile.TemporaryDirectory() as tmpDirName: armi.matProps.loadAll(tmpDirName) def test_multiDataLoadingMultidir(self): """Tests loading multiple data directories. Load all files present in the following subdirectories of the matProps repository: tests/testDir1 and tests/testDir2. """ dir1 = path.join(self.dirname, "testDir1") dir2 = path.join(self.dirname, "testDir2") # Load the two directories armi.matProps.loadAll(dir1) armi.matProps.loadAll(dir2) # Check that the two directories are in loaded materials loadList = armi.matProps.get_loaded_root_dirs() self.assertTrue(dir1 in loadList) self.assertTrue(dir2 in loadList) self.assertTrue(len(loadList) == 2) # Create list of file names in two directories. They are unique fileSet = set() for fileName in os.listdir(dir1): fileSet.add(path.splitext(fileName)[0]) for fileName in os.listdir(dir2): fileSet.add(path.splitext(fileName)[0]) materialSet = set() for material in armi.matProps.loadedMaterials(): materialSet.add(material.name) self.assertTrue(fileSet == materialSet) def test_dataLoadingPrioDiffDir(self): """ Tests that an error is raised for loading a material twice different directories. Attempts to load all files present in the following subdirectories of the matProps repository: tests/testDir1 and tests/testDir3. Though that includes some duplicates that should raise an error. """ dir1 = path.join(self.dirname, "testDir1") dir3 = path.join(self.dirname, "testDir3") armi.matProps.loadAll(dir1) with self.assertRaisesRegex(KeyError, "already exists"): armi.matProps.loadAll(dir3) matA = armi.matProps.getMaterial("a") density = matA.rho # Will evaluate to 1.0 if we have the data loaded from testDir1/a.yaml. # If we load from testDir3/a.yaml it will have a different value self.assertAlmostEqual(density.calc({"T": 150.0}), 1.0) self.assertAlmostEqual(density.calc(T=150.0), 1.0) def test_datafilesGetMat(self): """ Test a material retrieved by getMaterial(name) is the same as another material with the same name. Also tests trying to access an unknown material. """ # test the deprecated "load_all", that is just a pass-through for "loadAll" armi.matProps.load_all(self.dummyDataPath) # test with the pass-through loaded_materials instead of the preferred loadedMaterials for mat in armi.matProps.loaded_materials(): self.assertEqual(mat, armi.matProps.getMaterial(mat.name)) with self.assertRaisesRegex(KeyError, "No material named `Fahrvergnugen` was loaded"): # test with the pass-through get_material instead of the preferred getMaterial armi.matProps.get_material("Fahrvergnugen") ================================================ FILE: armi/matProps/tests/test_performance.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test rough matProps performance timing.""" import copy import os import pickle import timeit import unittest import armi.matProps # NOTE: This is a sketchy magic number for testing that are heavily machine dependent. _LIMIT_SECONDS = 15 class TestPerformance(unittest.TestCase): """ The tests in this class are an early warning of matProps performance changes. It tests common operations that are done with matProps to ensure their execution time remains in the correct ballpark. """ def test_load(self): """Tests the speed of loading a set of material files.""" armi.matProps.clear() testFiles = os.path.join(os.path.dirname(__file__), "testMaterialsData") t = timeit.timeit(lambda: (armi.matProps.loadAll(testFiles), armi.matProps.clear()), number=10) self.assertLess(t, _LIMIT_SECONDS, msg="matProps material loading takes too long to execute.") def test_pickle(self): """Tests the speed of pickling a set of material files. Pickling is important for multiprocessing.""" armi.matProps.clear() # This directory's material has many properties so it is more representative for pickle size. testFiles = os.path.join(os.path.dirname(__file__), "testDir4") armi.matProps.loadAll(testFiles) mat = armi.matProps.getMaterial("sampleProperty") t = timeit.timeit(lambda: pickle.loads(pickle.dumps(mat)), number=100) self.assertLess(t, _LIMIT_SECONDS, msg="matProps material pickling takes too long to execute.") def test_calc(self): """Tests the speed of calculating a property value.""" armi.matProps.clear() testFiles = os.path.join(os.path.dirname(__file__), "testMaterialsData") armi.matProps.loadAll(testFiles) # This material's density is a linear function. mat = armi.matProps.getMaterial("materialA") prop = mat.rho t = timeit.timeit(lambda: prop.calc({"T": 300}), number=10000) self.assertLess(t, _LIMIT_SECONDS, msg="matProps material calculation takes too long to execute.") def test_deepcopy(self): """ Tests the speed of deepcopying a material. Copying is important for copying other objects that may be referencing a matProps material. """ armi.matProps.clear() # This directory's material has many properties so it is more representative for copy size. testFiles = os.path.join(os.path.dirname(__file__), "testDir4") armi.matProps.loadAll(testFiles) mat = armi.matProps.getMaterial("sampleProperty") t = timeit.timeit(lambda: copy.deepcopy(mat), number=100) self.assertLess(t, _LIMIT_SECONDS, msg="matProps material copying takes too long to execute.") ================================================ FILE: armi/matProps/tests/test_piecewiseFunction.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests related to piecewise functions.""" from armi.matProps.material import Material from armi.matProps.tests import MatPropsFunTestBase class TestPiecewiseFunction(MatPropsFunTestBase): """Tests related to piecewise functions.""" @classmethod def setUpClass(cls): super().setUpClass() cls.basePiecewiseData = { "type": "piecewise", "T": { "min": 0, "max": 100, }, "functions": [ { "function": { "T": {"min": 0, "max": 25.4}, "type": "symbolic", "equation": "10", }, "tabulated data": None, }, { "function": { "T": {"min": 25.4, "max": 50}, "type": "symbolic", "equation": "99", }, "tabulated data": None, }, { "function": { "T": {"min": 50, "max": 100}, "type": "symbolic", "equation": "-99", }, "tabulated data": None, }, ], } def test_piecewiseEqnEval(self): """Tests the parsing of a PiecewiseFunction and make sure it evaluates at the appropriate sub function.""" mat = self._createFunction(self.basePiecewiseData) func = mat.rho self.assertIn("PiecewiseFunction", str(func)) self.assertAlmostEqual(func.calc({"T": 0}), 10) self.assertAlmostEqual(func.calc({"T": 25.4}), 10) self.assertAlmostEqual(func.calc({"T": 25.41}), 99) self.assertAlmostEqual(func.calc({"T": 50}), 99) self.assertAlmostEqual(func.calc({"T": 50.1}), -99) self.assertAlmostEqual(func.calc({"T": 100}), -99) func.clear() with self.assertRaises(ValueError): func.calc({"T": 0}) def test_piecewiseEqnGap(self): """Test that PiecewiseFunction evaluates correctly with gaps.""" data = { "type": "piecewise", "functions": [ { "function": { "T": {"min": 0, "max": 20}, "type": "symbolic", "equation": "10", }, "tabulated data": None, }, { "function": { "T": {"min": 30, "max": 50}, "type": "symbolic", "equation": "99", }, "tabulated data": None, }, { "function": { "T": {"min": 50, "max": 100}, "type": "symbolic", "equation": "-99", }, "tabulated data": None, }, ], } mat = self._createFunction(data) func = mat.rho with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"): func.calc({"T": -1.0}) with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"): func.calc({"T": 25.0}) with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"): func.calc({"T": 101.0}) self.assertAlmostEqual(func.calc(T=0), 10) self.assertAlmostEqual(func.calc(T=10), 10) self.assertAlmostEqual(func.calc(T=20), 10) self.assertAlmostEqual(func.calc(T=30), 99) self.assertAlmostEqual(func.calc(T=40), 99) self.assertAlmostEqual(func.calc(T=50), 99) self.assertAlmostEqual(func.calc(T=75), -99) self.assertAlmostEqual(func.calc(T=100), -99) def test_piecewiseEqnPoly(self): """Test that makes a PiecewiseFunction composed of multiple PolynomialFunctions.""" poly1CoMap = {0: -2.5, 1: 5, 2: 4} poly2CoMap = {0: 3.5, 1: 3, 2: -2, 3: 1} poly3CoMap = {0: 4.5, 1: -2, 2: 3, 3: -2, 4: 1} data = { "type": "piecewise", "functions": [ { "function": { "T": {"min": -100, "max": 100}, "type": "symbolic", "equation": self.createEqnPoly(poly1CoMap), }, "tabulated data": None, }, { "function": { "T": {"min": 100, "max": 300}, "type": "symbolic", "equation": self.createEqnPoly(poly2CoMap), }, "tabulated data": None, }, { "function": { "T": {"min": 300, "max": 500}, "type": "symbolic", "equation": self.createEqnPoly(poly3CoMap), }, "tabulated data": None, }, ], } mat = self._createFunction(data) func = mat.rho self.assertAlmostEqual(func.calc({"T": -100.0}), self.polynomialEvaluation(poly1CoMap, -100.0)) self.assertAlmostEqual(func.calc({"T": 0.0}), self.polynomialEvaluation(poly1CoMap, 0.0)) self.assertAlmostEqual(func.calc({"T": 100.0}), self.polynomialEvaluation(poly1CoMap, 100.0)) self.assertAlmostEqual(func.calc({"T": 200.0}), self.polynomialEvaluation(poly2CoMap, 200.0)) self.assertAlmostEqual(func.calc({"T": 300.0}), self.polynomialEvaluation(poly2CoMap, 300.0)) self.assertAlmostEqual(func.calc({"T": 400.0}), self.polynomialEvaluation(poly3CoMap, 400.0)) self.assertAlmostEqual(func.calc({"T": 500.0}), self.polynomialEvaluation(poly3CoMap, 500.0)) def test_piecewiseEqnPolyTable(self): """Test that makes a PiecewiseFunction composed of a mixture of polynomial and table functions.""" poly1CoMap = {0: 3.5, 1: 3, 2: -2, 3: 1} poly2CoMap = {0: 4.5, 1: -2, 2: 3, 3: -2, 4: 1} data = { "type": "piecewise", "functions": [ { "function": { "T": 0, "type": "table", }, "tabulated data": [[-100.0, -50.0], [0.0, 0.0], [100.0, 50.0]], }, { "function": { "T": {"min": 100, "max": 300}, "type": "symbolic", "equation": self.createEqnPoly(poly1CoMap), }, "tabulated data": None, }, { "function": { "T": {"min": 300, "max": 500}, "type": "symbolic", "equation": self.createEqnPoly(poly2CoMap), }, "tabulated data": None, }, ], } mat = self._createFunction(data) func = mat.rho self.assertAlmostEqual(func.calc({"T": -100.0}), -50.0) self.assertAlmostEqual(func.calc({"T": -50.0}), -25.0) self.assertAlmostEqual(func.calc({"T": 0.0}), 0.0) self.assertAlmostEqual(func.calc({"T": 50.0}), 25.0) self.assertAlmostEqual(func.calc({"T": 100.0}), 50.0) self.assertAlmostEqual(func.calc({"T": 200.0}), self.polynomialEvaluation(poly1CoMap, 200.0)) self.assertAlmostEqual(func.calc({"T": 300.0}), self.polynomialEvaluation(poly1CoMap, 300.0)) self.assertAlmostEqual(func.calc({"T": 400.0}), self.polynomialEvaluation(poly2CoMap, 400.0)) self.assertAlmostEqual(func.calc({"T": 500.0}), self.polynomialEvaluation(poly2CoMap, 500.0)) def test_inputCheckPiecewiseMinTemp(self): """Test to make sure an error is thrown when attempting to evaluate below the minimum valid range.""" self.belowMinimumCheck(self.basePiecewiseData) def test_inputCheckPiecewiseMaxTemp(self): """Test to make sure an error is thrown when attempting to evaluate above the maximum valid range.""" self.aboveMaximumCheck(self.basePiecewiseData) def _createFunction2D(self, data=None): """ Helper function designed to create a basic viable yaml file for a two dimensional function. Parameters ---------- data : dict A dictionary containing user specified function child nodes. """ funcBody = {"T": {"min": -100, "max": 100}, "t": {"min": -100, "max": 100}} funcBody.update(data or {}) materialData = { "file format": "TESTS", "composition": {"Fe": "balance"}, "material type": "Metal", "density": {"function": funcBody, "tabulated data": {}}, } mat = Material() mat.loadNode(materialData) return mat def test_piecewiseEqn2d(self): """Test that PiecewiseFunction evaluates correctly with multiple dimensions.""" data = { "type": "piecewise", "functions": [ { "function": { "T": {"min": 0, "max": 20}, "t": {"min": 0, "max": 20}, "type": "symbolic", "equation": "10", }, "tabulated data": None, }, { "function": { "T": {"min": 30, "max": 40}, "t": {"min": 0, "max": 20}, "type": "symbolic", "equation": "99", }, "tabulated data": None, }, { "function": { "T": {"min": 0, "max": 20}, "t": {"min": 30, "max": 40}, "type": "symbolic", "equation": "20", }, "tabulated data": None, }, { "function": { "T": {"min": 30, "max": 40}, "t": {"min": 30, "max": 40}, "type": "symbolic", "equation": "199", }, "tabulated data": None, }, ], } mat = self._createFunction2D(data) func = mat.rho # Below var 1 with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"): func.calc({"T": -1.0, "t": 10}) # Middle gap var 1 with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"): func.calc({"T": 25.0, "t": 10}) # Above var 1 with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"): func.calc({"T": 45.0, "t": 10}) # Below var 2 with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"): func.calc({"T": 10, "t": -1}) # Middle gap var 2 with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"): func.calc({"T": 10, "t": 25}) # Above var 2 with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"): func.calc({"T": 10, "t": 45}) self.assertAlmostEqual(func.calc(T=10, t=10), 10) self.assertAlmostEqual(func.calc(T=10, t=35), 20) self.assertAlmostEqual(func.calc(T=35, t=10), 99) self.assertAlmostEqual(func.calc(T=35, t=35), 199) def test_piecewiseEqnOverlap(self): """Test that PiecewiseFunction fails to load with overlapping regions.""" data = { "type": "piecewise", "functions": [ { "function": { "T": {"min": 0, "max": 20}, "t": {"min": 0, "max": 20}, "type": "symbolic", "equation": "10", }, "tabulated data": None, }, { "function": { "T": {"min": 10, "max": 40}, "t": {"min": 0, "max": 20}, "type": "symbolic", "equation": "99", }, "tabulated data": None, }, { "function": { "T": {"min": 0, "max": 20}, "t": {"min": 30, "max": 40}, "type": "symbolic", "equation": "20", }, "tabulated data": None, }, { "function": { "T": {"min": 30, "max": 40}, "t": {"min": 30, "max": 40}, "type": "symbolic", "equation": "199", }, "tabulated data": None, }, ], } with self.assertRaisesRegex(ValueError, "Piecewise child functions overlap"): self._createFunction2D(data) def test_piecewiseEqnDiffVars(self): """Test that PiecewiseFunction fails to load when child functions use different variables.""" data = { "type": "piecewise", "functions": [ { "function": { "T": {"min": 0, "max": 20}, "t": {"min": 0, "max": 20}, "type": "symbolic", "equation": "10", }, "tabulated data": None, }, { "function": { "T": {"min": 30, "max": 40}, "t": {"min": 0, "max": 20}, "type": "symbolic", "equation": "99", }, "tabulated data": None, }, { "function": { "R": {"min": 0, "max": 20}, "t": {"min": 30, "max": 40}, "type": "symbolic", "equation": "20", }, "tabulated data": None, }, { "function": { "T": {"min": 30, "max": 40}, "t": {"min": 30, "max": 40}, "type": "symbolic", "equation": "199", }, "tabulated data": None, }, ], } with self.assertRaisesRegex(KeyError, "Piecewise child function must have same variables"): self._createFunction2D(data) ================================================ FILE: armi/matProps/tests/test_point.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Program that runs all of the tests for the Point class. Notes ----- This file is used to verify the matProps stand-alone wheel installation. As such, it needs to remain small. Do not add any tests to this file with explicit file IO: no temporary directories, and no test YAML files. """ import unittest from armi.matProps.point import Point class TestPoint(unittest.TestCase): """Unit tests for the matProps Point class.""" def test_string(self): """Test string representation of Point.""" p = Point(1, 2, 3) self.assertEqual(str(p), "<Point 1, 2 -> 3>") ================================================ FILE: armi/matProps/tests/test_property.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Program that runs all of the tests contained in PropertyTests class.""" import os import unittest from os import path from armi.matProps import loadMaterial from armi.matProps.prop import defProp, properties class PropertyTests(unittest.TestCase): """Class which contains tests for the matProps Property class.""" @classmethod def setUpClass(cls): # Properties allowed for based on SDID. cls.allowedPropertiesList = [ "density", "specific heat capacity", "thermal conductivity", "thermal diffusivity", "dynamic viscosity", "kinematic viscosity", "melting temperature", "boiling temperature", "surface tension", "vapor pressure", "electrical conductance", "isothermal compressibility", "mean coefficient of thermal expansion", "instantaneous coefficient of thermal expansion", "Young's modulus", "shear modulus", "elongation", "Poisson's ratio", "yield strength", "tensile strength", "design stress", "design reference stress", "allowable stress", "time dependent design stress", "service reference stress", "stress to rupture", "tensile strength reduction factor", "yield strength reduction factor", "weld strength reduction factor", "allowable time to rupture", "allowable time to allowable stress", "design fatigue strain range", "strain from isochronous stress-strain curve", "design fatigue stress", "linear expansion", "vapor specific volume", "speed of sound", "solidus temperature", "liquidus temperature", "volumetric expansion", "enthalpy", "temperature from enthalpy", "enthalpy of fusion", "latent heat of vaporization", "fracture toughness", "Brinell Hardness", "factor f from ASME.III.5 Fig. HBB-T-1432-2", "factor Kv' from ASME.III.5 Fig. HBB-T-1432-3", ] def test_propertiesUnique(self): """Ensure the Property.name and Property.symbol are all unique inside the matProps.properties container.""" num = len(properties) self.assertEqual(num, len({p.name for p in properties})) self.assertEqual(num, len({p.symbol for p in properties})) def test_propertiesNames(self): """Ensure that we have the correct set of Properties in matProps.""" propertySet = {p.name for p in properties} allowedPropertiesSet = set(self.allowedPropertiesList) self.assertEqual(propertySet, allowedPropertiesSet) def test_propertiesInvName(self): """Ensure loadNode fails correctly when provided when provided an unknown property.""" tempFileName = os.path.join(os.path.dirname(__file__), "invalidTestFiles", "badProperty.yaml") with self.assertRaisesRegex(KeyError, "Invalid property node"): loadMaterial(tempFileName) def test_propertiesDefinitions(self): """ Check a logic branch in the Function.factory method which initializes armi.matProps.Function objects to be null. armi.matProps.Function objects only get set to a non-null object if the appropriate property node is provided in the YAML file. A test YAML file with only the density property provided. It checks to make sure that the Material.rho object corresponding with density is not a null object and performs an evaluation. A check is then performed on the Material.k object. This object, which corresponds to the thermal conductivity property, should be null as it is not defined in the test YAML file. """ # Only the density property exists for the material below. It is a constant function yamlFilePath = path.join(path.dirname(path.realpath(__file__)), "testDir1", "a.yaml") mat = loadMaterial(yamlFilePath) # Name of density function is rho for materials self.assertIsNotNone(mat.rho) self.assertAlmostEqual(mat.rho.calc({"T": 150.0}), 1.0) # k corresponds to thermal conductivity which is not provided in test file. self.assertIsNone(mat.k) def test_spotCheckAllPropsDict(self): """Spot check every property at least once, using a dictionary of input values.""" pathToTestYaml = path.join(path.dirname(path.realpath(__file__)), "testDir4") testMat = loadMaterial(path.join(pathToTestYaml, "sampleProperty.yaml")) self.assertAlmostEqual(testMat.rho.calc({"T": 300.0}), 1.0) self.assertAlmostEqual(testMat.c_p.calc({"T": 300.0}), 2.0) self.assertAlmostEqual(testMat.k.calc({"T": 300.0}), 3.0) self.assertAlmostEqual(testMat.alpha_d.calc({"T": 300.0}), 4.0) self.assertAlmostEqual(testMat.mu_d.calc({"T": 300.0}), 5.0) self.assertAlmostEqual(testMat.mu_k.calc({"T": 300.0}), 6.0) self.assertAlmostEqual(testMat.T_melt.calc({"T": 300.0}), 7.0) self.assertAlmostEqual(testMat.T_boil.calc({"T": 300.0}), 8.0) self.assertAlmostEqual(testMat.dH_vap.calc({"T": 300.0}), 9.0) self.assertAlmostEqual(testMat.dH_fus.calc({"T": 300.0}), 10.0) self.assertAlmostEqual(testMat.gamma.calc({"T": 300.0}), 11.0) self.assertAlmostEqual(testMat.P_sat.calc({"T": 300.0}), 12.0) self.assertAlmostEqual(testMat.kappa.calc({"T": 300.0}), 13.0) self.assertAlmostEqual(testMat.alpha_mean.calc({"T": 300.0}), 14.0) self.assertAlmostEqual(testMat.alpha_inst.calc({"T": 300.0}), 15.0) self.assertAlmostEqual(testMat.E.calc({"T": 300.0}), 16.0) self.assertAlmostEqual(testMat.nu.calc({"T": 300.0}), 17.0) self.assertAlmostEqual(testMat.Sy.calc({"T": 300.0}), 18.0) self.assertAlmostEqual(testMat.Su.calc({"T": 300.0}), 19.0) self.assertAlmostEqual(testMat.Sm.calc({"T": 300.0}), 20.0) self.assertAlmostEqual(testMat.So.calc({"T": 300.0}), 21.0) self.assertAlmostEqual(testMat.Sa.calc({"T": 300.0}), 22.0) self.assertAlmostEqual(testMat.St.calc({"T": 300.0}), 23.0) self.assertAlmostEqual(testMat.Smt.calc({"T": 300.0}), 24.0) self.assertAlmostEqual(testMat.Sr.calc({"T": 300.0}), 25.0) self.assertAlmostEqual(testMat.TSRF.calc({"T": 300.0}), 26.0) self.assertAlmostEqual(testMat.YSRF.calc({"T": 300.0}), 27.0) self.assertAlmostEqual(testMat.WSRF.calc({"T": 300.0}), 28.0) self.assertAlmostEqual(testMat.tMaxSr.calc({"T": 300.0}), 29.0) self.assertAlmostEqual(testMat.tMaxSt.calc({"T": 300.0}), 30.0) self.assertAlmostEqual(testMat.eps_t.calc({"T": 300.0}), 31.0) self.assertAlmostEqual(testMat.eps_iso.calc({"T": 300.0}), 32.0) self.assertAlmostEqual(testMat.SaFat.calc({"T": 300.0}), 33.0) self.assertAlmostEqual(testMat.dl_l.calc({"T": 300.0}), 34.0) self.assertAlmostEqual(testMat.nu_g.calc({"T": 300.0}), 35.0) self.assertAlmostEqual(testMat.v_sound.calc({"T": 300.0}), 36.0) self.assertAlmostEqual(testMat.T_sol.calc({"T": 300.0}), 37.0) self.assertAlmostEqual(testMat.T_liq.calc({"T": 300.0}), 38.0) self.assertAlmostEqual(testMat.dV.calc({"T": 300.0}), 39.0) self.assertAlmostEqual(testMat.H.calc({"T": 300.0}), 40.0) self.assertAlmostEqual(testMat.H_calc_T.calc({"T": 300.0}), 41.0) self.assertAlmostEqual(testMat.K_IC.calc({"T": 300.0}), 42.0) self.assertAlmostEqual(testMat.HBW.calc({"T": 300.0}), 43.0) self.assertAlmostEqual(testMat.f.calc({"T": 300.0}), 44.0) self.assertAlmostEqual(testMat.Kv_prime.calc({"T": 300.0}), 45.0) self.assertAlmostEqual(testMat.S.calc({"T": 300.0}), 46.0) self.assertAlmostEqual(testMat.Elong.calc({"T": 300.0}), 47.0) def test_spotCheckAllPropsKwargs(self): """Spot check every property at least once, using kwargs.""" pathToTestYaml = path.join(path.dirname(path.realpath(__file__)), "testDir4") testMat = loadMaterial(path.join(pathToTestYaml, "sampleProperty.yaml")) self.assertAlmostEqual(testMat.rho.calc(T=300.0), 1.0) self.assertAlmostEqual(testMat.c_p.calc(T=300.0), 2.0) self.assertAlmostEqual(testMat.k.calc(T=300.0), 3.0) self.assertAlmostEqual(testMat.alpha_d.calc(T=300.0), 4.0) self.assertAlmostEqual(testMat.mu_d.calc(T=300.0), 5.0) self.assertAlmostEqual(testMat.mu_k.calc(T=300.0), 6.0) self.assertAlmostEqual(testMat.T_melt.calc(T=300.0), 7.0) self.assertAlmostEqual(testMat.T_boil.calc(T=300.0), 8.0) self.assertAlmostEqual(testMat.dH_vap.calc(T=300.0), 9.0) self.assertAlmostEqual(testMat.dH_fus.calc(T=300.0), 10.0) self.assertAlmostEqual(testMat.gamma.calc(T=300.0), 11.0) self.assertAlmostEqual(testMat.P_sat.calc(T=300.0), 12.0) self.assertAlmostEqual(testMat.kappa.calc(T=300.0), 13.0) self.assertAlmostEqual(testMat.alpha_mean.calc(T=300.0), 14.0) self.assertAlmostEqual(testMat.alpha_inst.calc(T=300.0), 15.0) self.assertAlmostEqual(testMat.E.calc(T=300.0), 16.0) self.assertAlmostEqual(testMat.nu.calc(T=300.0), 17.0) self.assertAlmostEqual(testMat.Sy.calc(T=300.0), 18.0) self.assertAlmostEqual(testMat.Su.calc(T=300.0), 19.0) self.assertAlmostEqual(testMat.Sm.calc(T=300.0), 20.0) self.assertAlmostEqual(testMat.So.calc(T=300.0), 21.0) self.assertAlmostEqual(testMat.Sa.calc(T=300.0), 22.0) self.assertAlmostEqual(testMat.St.calc(T=300.0), 23.0) self.assertAlmostEqual(testMat.Smt.calc(T=300.0), 24.0) self.assertAlmostEqual(testMat.Sr.calc(T=300.0), 25.0) self.assertAlmostEqual(testMat.TSRF.calc(T=300.0), 26.0) self.assertAlmostEqual(testMat.YSRF.calc(T=300.0), 27.0) self.assertAlmostEqual(testMat.WSRF.calc(T=300.0), 28.0) self.assertAlmostEqual(testMat.tMaxSr.calc(T=300.0), 29.0) self.assertAlmostEqual(testMat.tMaxSt.calc(T=300.0), 30.0) self.assertAlmostEqual(testMat.eps_t.calc(T=300.0), 31.0) self.assertAlmostEqual(testMat.eps_iso.calc(T=300.0), 32.0) self.assertAlmostEqual(testMat.SaFat.calc(T=300.0), 33.0) self.assertAlmostEqual(testMat.dl_l.calc(T=300.0), 34.0) self.assertAlmostEqual(testMat.nu_g.calc(T=300.0), 35.0) self.assertAlmostEqual(testMat.v_sound.calc(T=300.0), 36.0) self.assertAlmostEqual(testMat.T_sol.calc(T=300.0), 37.0) self.assertAlmostEqual(testMat.T_liq.calc(T=300.0), 38.0) self.assertAlmostEqual(testMat.dV.calc(T=300.0), 39.0) self.assertAlmostEqual(testMat.H.calc(T=300.0), 40.0) self.assertAlmostEqual(testMat.H_calc_T.calc(T=300.0), 41.0) self.assertAlmostEqual(testMat.K_IC.calc(T=300.0), 42.0) self.assertAlmostEqual(testMat.HBW.calc(T=300.0), 43.0) self.assertAlmostEqual(testMat.f.calc(T=300.0), 44.0) self.assertAlmostEqual(testMat.Kv_prime.calc(T=300.0), 45.0) self.assertAlmostEqual(testMat.S.calc(T=300.0), 46.0) self.assertAlmostEqual(testMat.Elong.calc(T=300.0), 47.0) def test_defPropDup(self): with self.assertRaises(KeyError): defProp("rho", "density", "kg/m^3", "rho") ================================================ FILE: armi/matProps/tests/test_references.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test the Reference.""" import unittest from armi.matProps.reference import Reference class TestReference(unittest.TestCase): """Unit tests for Reference.""" def test_str(self): ref = Reference() ref._ref = "REF123" ref._type = "TYPE321" self.assertEqual(str(ref), "REF123 (TYPE321)") def test_getRef(self): ref = Reference() ref._ref = "REF234" self.assertEqual(ref.getRef(), "REF234") def test_getType(self): ref = Reference() ref._type = "TYPE789" self.assertEqual(ref.getType(), "TYPE789") def test_factory(self): node = {"ref": "REF234", "type": "TYPE789"} ref = Reference._factory(node) self.assertEqual(str(ref), "REF234 (TYPE789)") self.assertEqual(ref.getRef(), "REF234") self.assertEqual(ref.getType(), "TYPE789") ================================================ FILE: armi/matProps/tests/test_symbolicFunction.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the symbolic function class.""" import copy import math import pickle import unittest import numpy as np from armi.matProps.material import Material class TestSymbolicFunction(unittest.TestCase): """Unit tests for the symbolic function class.""" def setUp(self): self.yaml = { "file format": "TESTS", "material type": "Metal", "composition": {"a": "balance"}, "density": { "function": { "type": "symbolic", "X": {"min": -10, "max": 500.0}, "Y": {"min": 1.0, "max": 20.0}, "Z": {"min": -30.0, "max": -10.0}, "equation": 1.0, } }, } def loadMaterial(self, num=1): """Loads the material file based on `self.yaml` and returns the material object.""" mat = Material() mat.loadNode(self.yaml) return mat def functionTest(self, func, num=1): """ Takes a function as input to compare against matProps material output. It is assumed that `self.yaml` has been updated to match the provided evaluation function. """ mat = self.loadMaterial(num=num) prop = mat.rho for x in np.linspace(prop.getMinBound("X"), prop.getMaxBound("X"), 20): for y in np.linspace(prop.getMinBound("Y"), prop.getMaxBound("Y"), 20): for z in np.linspace(prop.getMinBound("Z"), prop.getMaxBound("Z"), 20): received = prop.calc({"X": x, "Y": y, "Z": z}) expected = func(x, y, z) self.assertAlmostEqual( received, expected, msg=( f"Material property evaluation does not match for: {prop.sympyStr} at ({x}, {y}, {z}).\n" f" Received: {received}, Expected: {expected}" ), delta=abs( expected / 1e8 ), # very large numbers can have floating point differences at low decimal count ) def setEqnField(self, eqn): self.yaml["density"]["function"]["equation"] = eqn def test_symbolicMult(self): """ Test multiplication operator for symbolic equations. Four combinations of spacing and the operator are tested for multiplying a variable and a constant as well as multiplying two variables. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: x * 20 self.setEqnField("X * 20") self.functionTest(func, 1) self.setEqnField("X*20") self.functionTest(func, 2) self.setEqnField("X* 20") self.functionTest(func, 3) self.setEqnField("X *20") self.functionTest(func, 4) func = lambda x, y, z: x * y self.setEqnField("X * Y") self.functionTest(func, 5) self.setEqnField("X*Y") self.functionTest(func, 6) self.setEqnField("X*Y") self.functionTest(func, 7) self.setEqnField("X *Y") self.functionTest(func, 8) def test_symbolicExponent(self): """ Test exponent operator for symbolic equations. Four combinations of spacing and the operator are tested for raising a variable by a constant as well as raising a constant by a constant. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: x**3 self.setEqnField("X ** 3") self.functionTest(func, 1) self.setEqnField("X**3") self.functionTest(func, 2) self.setEqnField("X** 3") self.functionTest(func, 3) self.setEqnField("X **3") self.functionTest(func, 4) func = lambda x, y, z: 1.1**y self.setEqnField("1.1 ** Y") self.functionTest(func, 5) self.setEqnField("1.1**Y") self.functionTest(func, 6) self.setEqnField("1.1** Y") self.functionTest(func, 7) self.setEqnField("1.1 **Y") self.functionTest(func, 8) def test_symbolicDiv(self): """ Test division operator for symbolic equations. The four combinations of spacing and the operator are tested for dividing a variable and a constant as well as dividing two variables. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: x / 3 self.setEqnField("X / 3") self.functionTest(func, 1) self.setEqnField("X/3") self.functionTest(func, 2) self.setEqnField("X/ 3") self.functionTest(func, 3) self.setEqnField("X /3") self.functionTest(func, 4) func = lambda x, y, z: x / y self.setEqnField("X / Y") self.functionTest(func, 5) self.setEqnField("X/Y") self.functionTest(func, 6) self.setEqnField("X/ Y") self.functionTest(func, 7) self.setEqnField("X /Y") self.functionTest(func, 8) def test_symbolicAdd(self): """ Test addition operator for symbolic equations. Four combinations of spacing and the operator are tested for adding a variable and a constant as well as adding two variables. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: x + 3 self.setEqnField("X + 3") self.functionTest(func, 1) self.setEqnField("X+3") self.functionTest(func, 2) self.setEqnField("X+ 3") self.functionTest(func, 3) self.setEqnField("X +3") self.functionTest(func, 4) func = lambda x, y, z: x + y self.setEqnField("X + Y") self.functionTest(func, 5) self.setEqnField("X+Y") self.functionTest(func, 6) self.setEqnField("X+ Y") self.functionTest(func, 7) self.setEqnField("X +Y") self.functionTest(func, 8) def test_symbolicSub(self): """ Test subtraction operator for symbolic equations. Four combinations of spacing and the operator are tested for subtracting a variable and a constant as well as subtracting two variables. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: x - 3 self.setEqnField("X - 3") self.functionTest(func, 1) self.setEqnField("X-3") self.functionTest(func, 2) self.setEqnField("X- 3") self.functionTest(func, 3) self.setEqnField("X -3") self.functionTest(func, 4) func = lambda x, y, z: x - z self.setEqnField("X - Z") self.functionTest(func, 5) self.setEqnField("X-Z") self.functionTest(func, 6) self.setEqnField("X- Z") self.functionTest(func, 7) self.setEqnField("X -Z") self.functionTest(func, 8) def test_symbolicParens(self): """ Test the grouping operator for symbolic equations. Various combinations of grouping is tested with spacing on a simple addition operation. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: x + 3 self.setEqnField("(X + 3)") self.functionTest(func, 1) self.setEqnField("(X) + 3") self.functionTest(func, 2) self.setEqnField("X + (3)") self.functionTest(func, 3) self.setEqnField("(X) + (3)") self.functionTest(func, 4) self.setEqnField("(X ) + 3") self.functionTest(func, 5) self.setEqnField("( X) + 3") self.functionTest(func, 6) self.setEqnField("( X ) + 3") self.functionTest(func, 7) self.setEqnField("( X + 3)") self.functionTest(func, 8) self.setEqnField("(X + 3 )") self.functionTest(func, 9) def test_symbolicSine(self): """ Test sine operator for symbolic equations. Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: math.sin(x) self.setEqnField("sin(X)") self.functionTest(func, 1) self.setEqnField("sin (X)") self.functionTest(func, 2) self.setEqnField("sin( X)") self.functionTest(func, 3) self.setEqnField("sin(X )") self.functionTest(func, 4) def test_symbolicCosine(self): """ Test cosine operator for symbolic equations. Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: math.cos(x) self.setEqnField("cos(X)") self.functionTest(func, 1) self.setEqnField("cos (X)") self.functionTest(func, 2) self.setEqnField("cos( X)") self.functionTest(func, 3) self.setEqnField("cos(X )") self.functionTest(func, 4) def test_symbolicTan(self): """ Test tangent operator for symbolic equations. Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: math.tan(x) self.setEqnField("tan(X)") self.functionTest(func, 1) self.setEqnField("tan (X)") self.functionTest(func, 2) self.setEqnField("tan( X)") self.functionTest(func, 3) self.setEqnField("tan(X )") self.functionTest(func, 4) def test_symbolicSinh(self): """ Test hyperbolic sine operator for symbolic equations. Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: math.sinh(x) self.setEqnField("sinh(X)") self.functionTest(func, 1) self.setEqnField("sinh (X)") self.functionTest(func, 2) self.setEqnField("sinh( X)") self.functionTest(func, 3) self.setEqnField("sinh(X )") self.functionTest(func, 4) def test_symbolicCosh(self): """ Test hyperbolic cosine operator for symbolic equations. Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: math.cosh(x) self.setEqnField("cosh(X)") self.functionTest(func, 1) self.setEqnField("cosh (X)") self.functionTest(func, 2) self.setEqnField("cosh( X)") self.functionTest(func, 3) self.setEqnField("cosh(X )") self.functionTest(func, 4) def test_symbolicTanh(self): """ Test hyperbolic tangent operator for symbolic equations. Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: math.tanh(x) self.setEqnField("tanh(X)") self.functionTest(func, 1) self.setEqnField("tanh (X)") self.functionTest(func, 2) self.setEqnField("tanh( X)") self.functionTest(func, 3) self.setEqnField("tanh(X )") self.functionTest(func, 4) def test_symbolicNatLog(self): """ Test natural logarithm operator for symbolic equations. Both log and ln variations of the function name are tested. Four combinations of spacing and the operator are tested for each function name. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: math.log(y) self.setEqnField("ln(Y)") self.functionTest(func, 1) self.setEqnField("ln (Y)") self.functionTest(func, 2) self.setEqnField("ln( Y)") self.functionTest(func, 3) self.setEqnField("ln(Y )") self.functionTest(func, 4) self.setEqnField("log(Y)") self.functionTest(func, 5) self.setEqnField("log (Y)") self.functionTest(func, 6) self.setEqnField("log( Y)") self.functionTest(func, 7) self.setEqnField("log(Y )") self.functionTest(func, 8) def test_symbolicLog10(self): """ Test base ten logarithm operator for symbolic equations. Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: math.log10(y) self.setEqnField("log10(Y)") self.functionTest(func, 1) self.setEqnField("log10 (Y)") self.functionTest(func, 2) self.setEqnField("log10( Y)") self.functionTest(func, 3) self.setEqnField("log10(Y )") self.functionTest(func, 4) def test_symbolicExp(self): """ Test exponential operator for symbolic equations. Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: math.exp(y) self.setEqnField("exp(Y)") self.functionTest(func, 1) self.setEqnField("exp (Y)") self.functionTest(func, 2) self.setEqnField("exp( Y)") self.functionTest(func, 3) self.setEqnField("exp(Y )") self.functionTest(func, 4) def test_symbolicComposition(self): """ Test composition of functions for symbolic equations. Four different functions are tested that are composites of other functions. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ # Multiple functions on one side of multiplication/divide func = lambda x, y, z: x / (math.exp(y) + z) self.setEqnField("X / (exp(Y) + Z)") self.functionTest(func, 1) # Multiple functions inside trig function func = lambda x, y, z: x * math.sin(z**y) self.setEqnField("X * sin(Z**Y)") self.functionTest(func, 2) # Multiple functions inside hyperbolic function func = lambda x, y, z: math.tanh((x + 30) ** math.cos(y) + z * 0.2) self.setEqnField("tanh((X+30) ** cos(Y) + Z*0.2)") self.functionTest(func, 3) # Many sets of nested parentheses func = lambda x, y, z: ((x / (y * z + 1.0)) + 2.5) * 10.2 self.setEqnField("((X / (Y*Z + 1.0)) + 2.5)*10.2") self.functionTest(func, 4) def test_symbolicOrdop(self): """ Test order of operations for symbolic equations. Five different equations are evaluated that test different components of order precedence. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ # multiplication and division before addition and subtraction func = lambda x, y, z: (x * y) + z self.setEqnField("X * Y + Z") self.functionTest(func, 1) func = lambda x, y, z: x + (y * z) self.setEqnField("X + Y * Z") self.functionTest(func, 2) # Left to right for same precedence operators func = lambda x, y, z: (x * y) / z self.setEqnField("X * Y / Z") self.functionTest(func, 3) # Exponents before multiplication/division func = lambda x, y, z: ((x + 30) ** 1.1) * (y**2) self.setEqnField("(X+30) ** 1.1 * Y ** 2") self.functionTest(func, 4) # Parentheses before exponents func = lambda x, y, z: (x + 30) ** (y / 2) - z self.setEqnField("(X+30) ** (Y/2) - Z") self.functionTest(func, 5) def test_symbolicWhitespace(self): """ Test excess whitespace is ignored for symbolic equations. Two different equations are evaluated with varying amounts of whitespace introduced to ensure they produce the same results. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range of the property. """ func = lambda x, y, z: x + y + z self.setEqnField(" X + Y + Z") self.functionTest(func, 1) self.setEqnField(" X + Y + Z") self.functionTest(func, 2) self.setEqnField("X + Y + Z") self.functionTest(func, 3) self.setEqnField("X + Y + Z") self.functionTest(func, 4) self.setEqnField(" X + Y + Z") self.functionTest(func, 5) func = lambda x, y, z: math.sin(x) * y + z self.setEqnField("sin (X) * Y + Z") self.functionTest(func, 6) self.setEqnField(" sin( X ) * Y + Z") self.functionTest(func, 7) self.setEqnField("sin(X ) * Y + Z") self.functionTest(func, 8) def test_symbolicIntFloat(self): """ Test handling of integers and floats for symbolic equations. Multiple equations are tested that verify that when integers are used in equations they do not result in integer multiplication and division in Python and are instead treated as floating point numbers. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: x / 2.0 + 3.0 self.setEqnField("X / 2 + 3") self.functionTest(func, 1) self.setEqnField("X / 2.0 + 3.0") self.functionTest(func, 2) func = lambda x, y, z: (x + 30) ** (4.0 / 3.0) self.setEqnField("(X + 30) ** (4/3)") self.functionTest(func, 3) self.setEqnField("(X + 30) ** (4.0/3.0)") self.functionTest(func, 4) def test_symbolicBadParens(self): """ Test unbalanced parentheses results in errors for symbolic equations. Multiple equations are tested that verify that various combinations of unbalanced parentheses are detected and result in an error when parsing the input. Additionally, an expression with extraneous but balanced parentheses is tested for correctness. For that input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ with self.assertRaises(ValueError): self.setEqnField("(X + Y") self.loadMaterial(num=1) with self.assertRaises(ValueError): self.setEqnField("((X) + Y") self.loadMaterial(num=2) with self.assertRaises(ValueError): self.setEqnField("(X) + Y)") self.loadMaterial(num=3) with self.assertRaises(ValueError): self.setEqnField("exp(X") self.loadMaterial(num=4) with self.assertRaises(ValueError): self.setEqnField("exp X") self.loadMaterial(num=5) with self.assertRaises(ValueError): self.setEqnField("(((((X + Y)))) + (Z)))") self.loadMaterial(num=6) # Test extraneous parentheses as well func = lambda x, y, z: x + y + z self.setEqnField("(((((X + Y)))) + (Z))") self.functionTest(func, 7) def test_symbolicUndefined(self): """ Test that undefined functions results in errors for symbolic equations. A logarithmic function is evaluated at two points in the valid range to show that the material input is parsed correctly. The function is then evaluated at a value that results in a negative expression inside the logarithm which is undefined. """ self.setEqnField("ln(X)") mat = self.loadMaterial(num=1) prop = mat.rho self.assertAlmostEqual(prop.calc({"X": 3, "Y": 3, "Z": -20}), math.log(3)) self.assertAlmostEqual(prop.calc({"X": 100, "Y": 3, "Z": -20}), math.log(100)) with self.assertRaises(ValueError): prop.calc({"X": -5, "Y": 3, "Z": -20}) def test_symbolicCaps(self): """ Test bad capitalization results in errors for symbolic equations. Multiple equations are tested that verify that various combinations of capitalization are detected and result in an error when parsing the inputs. """ with self.assertRaises(ValueError): self.setEqnField("x + Y") self.loadMaterial(num=1) with self.assertRaises(ValueError): self.setEqnField("TAN(X) + Y") self.loadMaterial(num=2) with self.assertRaises(ValueError): self.setEqnField("Tan(X) + Y") self.loadMaterial(num=3) with self.assertRaises(ValueError): self.setEqnField("eXP(X) + Y") self.loadMaterial(num=4) def test_symbolicImpmult(self): """ Test implicit multiplication results in errors for symbolic equations. Multiple equations are tested that verify that various combinations of implicit multiplication are detected and result in an error when parsing the inputs. """ with self.assertRaises(ValueError): self.setEqnField("2 X") self.loadMaterial(num=1) with self.assertRaises(ValueError): self.setEqnField("X 2") self.loadMaterial(num=2) with self.assertRaises(ValueError): self.setEqnField("2X") self.loadMaterial(num=3) with self.assertRaises(ValueError): self.setEqnField("2(X)") self.loadMaterial(num=4) with self.assertRaises(ValueError): self.setEqnField("X(2)") self.loadMaterial(num=5) with self.assertRaises(ValueError): self.setEqnField("X (2)") self.loadMaterial(num=6) with self.assertRaises(ValueError): self.setEqnField("2 sin(X)") self.loadMaterial(num=7) def test_symbolicVarVar(self): """ Test repeat variables for symbolic equations. Multiple equations are tested that verify that various combinations of repeat variable usage evaluate correctly. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ func = lambda x, y, z: x * x + y / x + z * x self.setEqnField("X * X + Y / X + Z * X") self.functionTest(func, 1) func = lambda x, y, z: math.tan(x * y) + math.cos(x * y) + math.exp(z * y) self.setEqnField("tan(X * Y) + cos(X * Y) + exp(Z * Y)") self.functionTest(func, 2) def test_symbolicScientific(self): """ Test scientific notation for symbolic equations. Multiple equations are tested that verify that various combinations of both upper and lower case scientific notation evaluate correctly. For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid range. """ # Test upper case E func = lambda x, y, z: 3e5 / x self.setEqnField("3E5 / X") self.functionTest(func, 1) func = lambda x, y, z: 1.23e-3 * x self.setEqnField("1.23E-3 * X") self.functionTest(func, 2) # Test lower case e func = lambda x, y, z: 3e5 / x self.setEqnField("3e5 / X") self.functionTest(func, 3) func = lambda x, y, z: 1.23e-3 * x self.setEqnField("1.23e-3 * X") self.functionTest(func, 4) def test_symbolicExamples(self): """Test a handful of complicated symbolic equations.""" # example 1 func = lambda x, y, z: 10**5 * ( (-540 / (1 + math.exp(-0.02 * (x - 220))) + 520) + (-120 / (1 + math.exp(-0.02 * (x - 122))) + 92) ) self.setEqnField("10**5*((-540/(1+exp(-0.02*(X-220)))+ 520)+ (-120/(1+exp(-0.02*(X-122)))+ 92))") self.functionTest(func, 1) # example 2 func = lambda x, y, z: 222.0 + 225.2 * (1 - (x + 273.15) / 2500) + 512.2 * (1 - (x + 273.15) / 2502) ** 0.5 self.setEqnField("222.0 + 225.2 * (1 - (X + 273.15) / 2500) + 512.2 * (1 - (X + 273.15) / 2502) ** 0.5") self.functionTest(func, 2) # example 3 func = lambda x, y, z: (2.2e11 - 7.2e6 * x - 4.2e2 * x**2) * (y / (4.2 - 2.2 * y)) self.setEqnField("(2.2E11 - 7.2E6 * X - 4.2E2 * X**2) * (Y / (4.2 - 2.2 * Y))") self.functionTest(func, 3) def test_symbolicBadparse(self): """Test incorrect expressions results in errors for symbolic equations.""" # Not a math equation self.setEqnField("Not an equation") with self.assertRaises(ValueError): self.loadMaterial(num=1) # Unknown variable self.setEqnField("X + Y + W") with self.assertRaises(ValueError): self.loadMaterial(num=2) # Missing an operator self.setEqnField("X Y") with self.assertRaises(ValueError): self.loadMaterial(num=3) # Missing equation field del self.yaml["density"]["function"]["equation"] with self.assertRaises(KeyError): self.loadMaterial(num=4) def test_pickleSymbolicFunction(self): """Downstream usages might need to pickle a material. Ensure symbolic expression can be pickled.""" self.setEqnField("X + Y") mat = self.loadMaterial() stream = pickle.dumps(mat) mat2 = pickle.loads(stream) self.assertEqual(mat.rho.getMinBound("X"), mat2.rho.getMinBound("X")) self.assertEqual( mat.rho.calc({"X": 0.0, "Y": 10, "Z": -10}), mat2.rho.calc({"X": 0.0, "Y": 10, "Z": -10}), ) self.assertEqual( mat.rho.calc({"X": 300.0, "Y": 15, "Z": -10}), mat2.rho.calc({"X": 300.0, "Y": 15, "Z": -10}), ) def test_numpyEvals(self): """Test that numpy floats and integers work in evaluations same as integers and floats.""" self.setEqnField("X * 2.0") mat = self.loadMaterial() func = lambda x: x * 2 self.assertAlmostEqual(mat.rho.calc(X=np.float64(10), Y=5.0, Z=-10.0), func(10)) self.assertAlmostEqual(mat.rho.calc(X=np.int64(10), Y=5.0, Z=-10.0), func(10)) def test_largeExponentials(self): """Test that exponentials don't overflow.""" # If sympy is allowed to simplify this expression it will try to evaluate e^-1400 which will overflow. The # remainder of the values are chosen just to get a reasonable magnitude expression based on the min/max bounds # for X/Y. self.setEqnField("exp(-1400.0 + 2.6*(X*0.1+30*Y))") mat = self.loadMaterial() func = lambda x, y: math.exp(-1400 + 2.6 * (x * 0.1 + 30 * y)) self.assertAlmostEqual(mat.rho.calc(X=300, Y=5.0, Z=-10.0), func(300, 5)) def test_symbolicOutofbounds(self): """Test evaluation outside of bounds results in ValueError for symbolic equations.""" mat = self.loadMaterial() prop = mat.rho mins = [prop.getMinBound(var) for var in ["X", "Y", "Z"]] maxs = [prop.getMaxBound(var) for var in ["X", "Y", "Z"]] for i in range(3): minsEdited = copy.copy(mins) maxsEdited = copy.copy(maxs) minsEdited[i] -= 0.1 maxsEdited[i] += 0.1 with self.assertRaises(ValueError): prop.calc({"X": minsEdited[0], "Y": minsEdited[1], "Z": minsEdited[2]}) with self.assertRaises(ValueError): prop.calc({"X": maxsEdited[0], "Y": maxsEdited[1], "Z": maxsEdited[2]}) class TestBrokenSymbolicFunctions(unittest.TestCase): def test_complexNumbers(self): yaml = { "file format": "TESTS", "material type": "Metal", "composition": {"a": "balance"}, "density": { "function": { "type": "symbolic", "X": {"min": -10, "max": 500.0}, "Y": {"min": 1.0, "max": 20.0}, "Z": {"min": -30.0, "max": -10.0}, "equation": 1.0, } }, } mat = Material() mat.loadNode(yaml) # stomp all over the equation, to force it to return a complex number mat.rho.eqn = eval("lambda x, y, z: 1.0 + 2.0j") with self.assertRaises(ValueError): mat.rho._calcSpecific({"X": 1, "Y": 2, "Z": -20}) def test_isNan(self): yaml = { "file format": "TESTS", "material type": "Metal", "composition": {"a": "balance"}, "density": { "function": { "type": "symbolic", "X": {"min": -10, "max": 500.0}, "Y": {"min": 1.0, "max": 20.0}, "Z": {"min": -30.0, "max": -10.0}, "equation": 1.0, } }, } mat = Material() mat.loadNode(yaml) # stomp all over the equation, to force it to return a complex number mat.rho.eqn = eval("lambda x, y, z: math.nan") with self.assertRaises(ValueError): mat.rho._calcSpecific({"X": 1, "Y": 2, "Z": -20}) ================================================ FILE: armi/matProps/tests/test_tableFunctions.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests 1D and 2D table Functions.""" import numpy as np from armi.matProps.tableFunction2D import TableFunction2D from armi.matProps.tests import MatPropsFunTestBase class TestTableFunctions(MatPropsFunTestBase): """Tests 1D and 2D table Functions.""" @classmethod def setUpClass(cls): super().setUpClass() cls.baseOneDimTableData = {"type": "table", "T": 0} cls.baseOneDimTable = [[0.0, 5.0], [100.0, 105.0]] cls.baseTwoDimTableData = { "type": "two dimensional table", "T": 0, "t": 1, } cls.baseTwoDimTable = [ [None, [2.0, 200.0, 632.4555]], [1.0, [10.0, 208.0, 640.4555]], [100.0, [110.0, 308.0, 740.4555]], [316.2278, [135, 333, 765.4555]], ] def test_interpolation1Dtable(self): """Test interpolation for a two-point one-dimensional table.""" mat = self._createFunction(self.baseOneDimTableData, self.baseOneDimTable) mat.name = self.testName self.assertEqual(str(mat), f"<Material {self.testName} <MaterialType Metal>>") func = mat.rho self.assertIn("TableFunction1D", str(func)) for index in range(9): val = float(index) * 12.5 self.assertAlmostEqual(func.calc({"T": np.float64(val)}), 5.0 + val) self.assertAlmostEqual(func.calc({"T": val}), 5.0 + val) # directly check error is correctly raised if the variable is unknown with self.assertRaises(ValueError): func._calcSpecific({"X": 1}) def test_interpolation1DtableMissnode(self): """Test to make sure a KeyError is thrown if 'tabulated data' node is absent.""" with self.assertRaisesRegex(KeyError, "tabulated data"): self._createFunctionWithoutTable(self.baseOneDimTableData) def test_interpolation1Dtable2(self): """Test interpolation for a many-point one-dimensional table.""" data = {"type": "table", "T": {"min": 900, "max": 250}} tableData = [ [250, 25.68], [300, 25.97], [400, 26.28], [500, 26.26], [600, 25.89], [700, 25.19], [759.7, 24.61], [800, 25.10], [900, 26.32], ] mat = self._createFunction(data, tableData) mat.name = self.testName self.assertEqual(str(mat), f"<Material {self.testName} <MaterialType Metal>>") self.assertAlmostEqual(mat.rho.calc(T=250), 25.68) self.assertAlmostEqual(mat.rho.calc(T=275), 25.825) self.assertAlmostEqual(mat.rho.calc(T=500), 26.26) self.assertAlmostEqual(mat.rho.calc(T=512.5), 26.21375) self.assertAlmostEqual(mat.rho.calc(T=729.7), 24.9014572864322) self.assertAlmostEqual(mat.rho.calc(T=759.7), 24.61) with self.assertRaises(ValueError): mat.rho.calc(T=999) # bonus test of method to clear table data self.assertIsNotNone(mat.rho.tableData) mat.rho.clear() self.assertIsNone(mat.rho.tableData) def test_interpolation1DtableInt(self): """Test interpolation for one-dimensional tables with all integer values.""" tableData = [ [250, 5], [300, 6], [400, 7], [500, 8], [600, 9], [700, 10], [800, 11], [900, 12], ] mat = self._createFunction(self.baseOneDimTableData, tableData, minT=250, maxT=900) mat.name = self.testName self.assertEqual(str(mat), f"<Material {self.testName} <MaterialType Metal>>") self.assertAlmostEqual(mat.rho.calc(T=275), 5.5) self.assertAlmostEqual(mat.rho.calc(T=312.5), 6.125) def test_interpolationTable2D(self): """Test that evaluates TableFunction2D for different combinations of integer and floating values.""" mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable) mat.name = self.testName self.assertEqual(str(mat), f"<Material {self.testName} <MaterialType Metal>>") func = mat.rho self.assertIn("TableFunction2D", str(func)) self.assertAlmostEqual(func.calc({"T": 2, "t": 1}), 10) self.assertAlmostEqual(func.calc({"T": 2, "t": 100.0}), 110) self.assertAlmostEqual(func.calc({"T": 200, "t": 1}), 208) self.assertAlmostEqual(func.calc({"T": 200, "t": 100}), 308) self.assertAlmostEqual(func.calc({"T": 100, "t": 1}), 108) self.assertAlmostEqual(func.calc({"T": 100, "t": 100}), 208) self.assertAlmostEqual(func.calc({"T": 2, "t": 10}), 60) self.assertAlmostEqual(func.calc({"T": 100, "t": 10}), 158) self.assertAlmostEqual(func.calc({"T": 2, "t": 316.2278}), 135) self.assertAlmostEqual(func.calc({"T": 632.4555, "t": 1}), 640.4555) self.assertAlmostEqual(func.calc({"T": 200, "t": 316.2278}), 333) self.assertAlmostEqual(func.calc({"T": 632.4555, "t": 100}), 740.4555) self.assertAlmostEqual(func.calc({"T": 632.4555, "t": 316.2278}), 765.4555) self.assertAlmostEqual(func.calc({"T": 200, "t": 177.828}), 320.500006) self.assertAlmostEqual(func.calc({"T": 355.6559, "t": 100}), 463.6559) self.assertAlmostEqual(func.calc({"T": 355.6559, "t": 177.828}), 476.155906) def test_interpolationTable2DMissNode(self): """Test to make sure TableFunction2D throws a KeyError if 'tabulated data' node is absent.""" with self.assertRaisesRegex(KeyError, "tabulated data"): self._createFunctionWithoutTable(self.baseTwoDimTableData) def test_inputCheckTable2Doutbounds(self): """Ensure a ValueError is thrown when evaluating out of the valid bounds.""" mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable) func = mat.rho with self.assertRaises(ValueError): func.calc({"T": 1.99, "t": 1.0}) with self.assertRaises(ValueError): func.calc({"T": 632.4655, "t": 1.0}) with self.assertRaises(ValueError): func.calc({"T": 2.0, "t": 0.99}) with self.assertRaises(ValueError): func.calc({"T": 2.0, "t": 316.2378}) def test_inputCheckTableMinVar(self): """Test to make sure an error is raised when attempting to evaluate below the valid range.""" self.belowMinimumCheck(self.baseOneDimTableData, self.baseOneDimTable) def test_inputCheckTableMaxVar(self): """Test to make sure an error is raised when attempting to evaluate above the valid range.""" self.aboveMaximumCheck(self.baseOneDimTableData, self.baseOneDimTable) def test_inputCheckTable2DMinVar1(self): """Test to make sure an error is raised when attempting to evaluate below the valid range.""" mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable) func = mat.rho with self.assertRaises(ValueError): func.calc({"T": 1, "t": 50}) def test_inputCheckTable2DMaxVar1(self): """Test to make sure an error is raised when attempting to evaluate above the valid range.""" mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable) func = mat.rho with self.assertRaises(ValueError): func.calc({"T": 650, "t": 50}) def test_inputCheckTable2DMinVar2(self): """Ensure an ValueError is raised when evaluating below the valid range.""" mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable) func = mat.rho with self.assertRaises(ValueError): func.calc({"T": 1, "t": 0}) def test_table2DsetBounds(self): mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable) fun = mat.rho # staring values self.assertEqual(fun.independentVars["T"], (2.0, 632.4555)) self.assertEqual(fun.independentVars["t"], (1.0, 316.2278)) # calling _setBounds will wipe out the "t" variable, but not update "T" fun._columnValues = [123, 987] fun._setBounds(0, "T") self.assertEqual(fun.independentVars["T"], (2.0, 632.4555)) with self.assertRaises(KeyError): fun.independentVars["t"] # Here we update "T" with new column values fun._columnValues = [123, 987] fun._setBounds(0, "X") self.assertEqual(fun.independentVars["X"], (123.0, 987.0)) # Here we update the new variable "X" with new row values fun._rowValues = [11, 99] fun._setBounds(1, "X") self.assertEqual(fun.independentVars["T"], (2.0, 632.4555)) self.assertEqual(fun.independentVars["X"], (11.0, 99.0)) with self.assertRaises(KeyError): fun.independentVars["t"] # Bad inputs with self.assertRaises(ValueError): fun._setBounds(2, "X") def test_inputCheckTable2DMaxVar2(self): """Ensure an ValueError is raised when evaluating above the valid range.""" mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable) func = mat.rho with self.assertRaises(ValueError): func.calc({"T": 1, "t": 1000}) def test_calcSpec2dEdgeCase(self): f = TableFunction2D("mat", "prop") f.independentVars = {"T": (250.0, 800.0), "t": (1, 3)} # This should fail correctly when given a bad input param with self.assertRaises(ValueError): f._calcSpecific({"Pa": 1.0}) ================================================ FILE: armi/materials/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The material package defines compositions and material-specific properties. Properties in scope include temperature dependent thermo/mechanical properties (like heat capacity, linear expansion coefficients, viscosity, density), and material-specific nuclear properties that can't exist at the nuclide level alone (like :py:mod:`thermal scattering laws <armi.nucDirectory.thermalScattering>`). As the fundamental macroscopic building blocks of any physical object, these are highly important to reactor analysis. This module handles the dynamic importing of all the materials defined here at the framework level as well as in all the attached plugins. It is expected that most teams will have special material definitions that they will want to define. It may also make sense in the future to support user-input materials that are not hard-coded into the app. The base class for all materials is in :py:mod:`armi.materials.material`. """ import importlib import inspect import pkgutil from typing import List from armi.materials.material import Material # This will frequently be updated by the CONF_MATERIAL_NAMESPACE_ORDER setting # during reactor construction (see armi.reactor.reactors.factory). _MATERIAL_NAMESPACE_ORDER = ["armi.materials"] def setMaterialNamespaceOrder(order): """ Set the material namespace order at the Python interpreter, global level. .. impl:: Material collections are defined with an order of precedence in the case of duplicates. :id: I_ARMI_MAT_ORDER :implements: R_ARMI_MAT_ORDER An ARMI application will need materials. Materials can be imported from any code the application has access to, like plugin packages. This leads to the situation where one ARMI application will want to import multiple collections of materials. To handle this, ARMI keeps a list of material namespaces. This is an ordered list of importable packages that ARMI can search for a particular material by name. This automatic exploration of an importable package saves the user the tedium have having to import or include hundreds of materials manually somehow. But it comes with a caveat; the list is ordered. If two different namespaces in the list include a material with the same name, the first one found in the list is chosen, i.e. earlier namespaces in the list have precedence. """ global _MATERIAL_NAMESPACE_ORDER _MATERIAL_NAMESPACE_ORDER = order def importMaterialsIntoModuleNamespace(path, name, namespace, updateSource=None): """ Import all Material subclasses into the top subpackage. This allows devs to use ``from armi.materials import HT9`` This can be used in plugins for similar purposes. .. warning:: Do not directly import materials from this namespace in code. Use the full module import instead. This is just for material resolution. This will be replaced with a more formal material registry in the future. Parameters ---------- path : str Path to package/module being imported name : str module name namespace : dict The namespace updateSource : str, optional Change DATA_SOURCE on import to a different string. Useful for saying where plugin materials are coming from. """ for _modImporter, modname, _ispkg in pkgutil.walk_packages(path=path, prefix=name + "."): if "test" not in modname: mod = importlib.import_module(modname) for item, obj in mod.__dict__.items(): try: if issubclass(obj, Material): namespace[item] = obj if updateSource: obj.DATA_SOURCE = updateSource except TypeError: # some non-class local pass importMaterialsIntoModuleNamespace(__path__, __name__, globals()) def iterAllMaterialClassesInNamespace(namespace): """ Iterate over all Material subclasses found in a namespace. Notes ----- Useful for testing. """ for obj in namespace.__dict__.values(): if inspect.isclass(obj): if issubclass(obj, Material): yield obj def resolveMaterialClassByName(name: str, namespaceOrder: List[str] = None): """ Find the first material class that matches a name in an ordered namespace. Names can either be fully resolved class paths (e.g. ``armi.materials.uZr:UZr``) or simple class names (e.g. ``UZr``). In the latter case, the ``CONF_MATERIAL_NAMESPACE_ORDER`` setting to allows users to choose which particular material of a common name (like UO2 or HT9) gets used. Input files usually specify a material like UO2. Which particular implementation gets used (Framework's UO2 vs. a user plugins UO2 vs. the Kentucky Transportation Cabinet's UO2) is up to the user at runtime. .. impl:: Materials can be searched across packages in a defined namespace. :id: I_ARMI_MAT_NAMESPACE :implements: R_ARMI_MAT_NAMESPACE During the runtime of an ARMI application, but particularly during the construction of the reactor in memory, materials will be requested by name. At that point, this code is called to search for that material name. The search goes through the ordered list of Python namespaces provided. The first time an instance of that material is found, it is returned. In this way, the first items in the material namespace list take precedence. When a material name is passed to this function, it may be either a simple name like the string ``"UO2"`` or it may be much more specific, like ``armi.materials.uraniumOxide:UO2``. Parameters ---------- name : str The material class name to find, e.g. ``"UO2"``. Optionally, a module path and class name can be provided with a colon separator as ``module:className``, e.g. ``armi.materials.uraniumOxide:UO2`` for direct specification. namespaceOrder : list of str, optional A list of namespaces in order of preference in which to search for the material. If not passed, the value in the global ``MATERIAL_NAMESPACE_ORDER`` will be used, which is often set by the ``CONF_MATERIAL_NAMESPACE_ORDER`` setting (e.g. during reactor construction). Any value passed into this argument will be ignored if the ``name`` is provided with a ``modulePath``. Returns ------- matCls : armi.materials.material.Material The material Raises ------ KeyError When material of name cannot be found in namespaces. Examples -------- >>> resolveMaterialClassByName("UO2", ["something.else.materials", "armi.materials"]) <class 'something.else.materials.UO2'> See Also -------- armi.reactor.reactors.factory Applies user settings to default namespace order. """ if ":" in name: # assume direct package path like `armi.materials.uZr:UZr` modPath, clsName = name.split(":") mod = importlib.import_module(modPath) return getattr(mod, clsName) namespaceOrder = namespaceOrder or _MATERIAL_NAMESPACE_ORDER for namespace in namespaceOrder: mod = importlib.import_module(namespace) if hasattr(mod, name): return getattr(mod, name) raise KeyError( f"Cannot find material named `{name}` in any of: {str(namespaceOrder)}. " "Please update inputs or plugins. See CONF_MATERIAL_NAMESPACE_ORDER setting." ) ================================================ FILE: armi/materials/air.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Simple air material. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials import material from armi.utils.units import G_PER_CM3_TO_KG_PER_M3, getTk class Air(material.Fluid): """ Dry, Near Sea Level. Correlations based off of values in Incropera, Frank P., et al. Fundamentals of heat and mass transfer. Vol. 5. New York: Wiley, 2002. Elemental composition from PNNL-15870 Rev. 1 https://www.pnnl.gov/main/publications/external/technical_reports/PNNL-15870Rev1.pdf """ """ temperature ranges based on where values are more than 1% off of reference """ propertyValidTemperature = { "pseudoDensity": ((100, 2400), "K"), "heat capacity": ((100, 1300), "K"), "thermal conductivity": ((200, 850), "K"), } def setDefaultMassFracs(self): """ Set mass fractions. Notes ----- Mass fraction reference McConn, Ronald J., et al. Compendium of material composition data for radiation transport modeling. No. PNNL-15870 Rev. 1. Pacific Northwest National Lab.(PNNL), Richland, WA (United States), 2011. https://www.pnnl.gov/main/publications/external/technical_reports/PNNL-15870Rev1.pdf """ self.setMassFrac("C", 0.000124) self.setMassFrac("N", 0.755268) self.setMassFrac("O", 0.231781) self.setMassFrac("AR", 0.012827) def pseudoDensity( self, Tk=None, Tc=None, ): """ Returns density of Air in g/cc. This is from Table A.4 in Fundamentals of Heat and Mass Transfer Incropera, DeWitt Parameters ---------- Tk : float, optional temperature in degrees Kelvin Tc : float, optional temperature in degrees Celsius Notes ----- In ARMI, we define pseudoDensity() and density() as the same for Fluids. Returns ------- density : float mass density in g/cc """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("pseudoDensity", Tk) inv_Tk = 1.0 / Tk rho_kgPerM3 = 1.15675e03 * inv_Tk**2 + 3.43413e02 * inv_Tk + 2.99731e-03 return rho_kgPerM3 / G_PER_CM3_TO_KG_PER_M3 def specificVolumeLiquid(self, Tk=None, Tc=None): """Returns the liquid specific volume in m^3/kg of this material given Tk in K or Tc in C.""" return 1 / (1000.0 * self.pseudoDensity(Tk, Tc)) def thermalConductivity(self, Tk=None, Tc=None): """ Returns thermal conductivity of Air in g/cc. This is from Table A.4 in Fundamentals of Heat and Mass Transfer Incropera, DeWitt Parameters ---------- Tk : float, optional temperature in degrees Kelvin Tc : float, optional temperature in degrees Celsius Returns ------- thermalConductivity : float thermal conductivity in W/m*K """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("thermal conductivity", Tk) thermalConductivity = 2.13014e-08 * Tk**3 - 6.31916e-05 * Tk**2 + 1.11629e-01 * Tk - 2.00043e00 return thermalConductivity * 1e-3 def heatCapacity(self, Tk=None, Tc=None): """ Returns heat capacity of Air in g/cc. This is from Table A.4 in Fundamentals of Heat and Mass Transfer Incropera, DeWitt Parameters ---------- Tk : float, optional temperature in degrees Kelvin Tc : float, optional temperature in degrees Celsius Returns ------- heatCapacity : float heat capacity in J/kg*K """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("heat capacity", Tk) return ( sum( [ +1.38642e-13 * Tk**4, -6.47481e-10 * Tk**3, +1.02345e-06 * Tk**2, -4.32829e-04 * Tk, +1.06133e00, ] ) * 1000.0 ) # kJ / kg K to J / kg K ================================================ FILE: armi/materials/alloy200.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Alloy-200 are wrought commercially pure nickel. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from numpy import interp from armi.materials.material import Material from armi.utils.units import getTk class Alloy200(Material): references = { "linearExpansion": [ "Alloy 200/201 Data Sheet http://www.jacquet.biz/JACQUET/USA/files/JCQusa-alloy-200-201.pdf" ], "refDens": ["Alloy 200/201 Data Sheet http://www.jacquet.biz/JACQUET/USA/files/JCQusa-alloy-200-201.pdf"], "referenceMaxPercentImpurites": [ "Alloy 200/201 Data Sheet http://www.jacquet.biz/JACQUET/USA/files/JCQusa-alloy-200-201.pdf" ], } modelConst = { "a0": 1.21620e-5, "a1": 8.30010e-9, "a2": -3.94985e-12, "TRefa": 20, # Constants for thermal expansion } propertyValidTemperature = {"linear expansion": ((73.15, 1273.15), "K")} referenceMaxPercentImpurites = [ ("C", 0.15), ("MN", 0.35), ("S", 0.01), ("SI", 0.35), ("CU", 0.25), ("FE", 0.40), ] linearExpansionTableK = [ 73.15, 173.15, 373.15, 473.15, 573.15, 673.15, 773.15, 873.15, 973.15, 1073.15, 1173.15, 1273.15, ] linearExpansionTable = [ 10.1e-6, 11.3e-6, 13.3e-6, 13.9e-6, 14.3e-6, 14.8e-6, 15.2e-6, 15.6e-6, 15.8e-6, 16.2e-6, 16.5e-6, 16.7e-6, ] def linearExpansion(self, Tk=None, Tc=None): r""" Returns instantaneous coefficient of thermal expansion of Alloy 200. Parameters ---------- Tk : float, optional temperature in degrees Kelvin Tc : float, optional temperature in degrees Celsius Returns ------- linearExpansion : float instantaneous coefficient of thermal expansion of Alloy 200 (1/C) """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion", Tk) return interp(Tk, self.linearExpansionTableK, self.linearExpansionTable) def setDefaultMassFracs(self): """ Notes ----- It is assumed half the max composition for the impurities and the rest is Ni. """ nickleMassFrac = 1.0 for elementSymbol, massFrac in self.referenceMaxPercentImpurites: assumedMassFrac = massFrac * 0.01 / 2.0 self.setMassFrac(elementSymbol, assumedMassFrac) nickleMassFrac -= assumedMassFrac self.setMassFrac("NI", nickleMassFrac) self.refDens = 8.9 ================================================ FILE: armi/materials/b4c.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Boron carbide; a very typical reactor control material. Note that this material defaults to a theoretical density fraction of 0.9, reflecting the difficulty of producing B4C at 100% theoretical density in real life. To get different fraction, use the `TD_frac` material modification in your assembly definition. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi import runLog from armi.materials import material from armi.utils.units import getTc class B4C(material.Material): DEFAULT_MASS_DENSITY = 2.52 DEFAULT_THEORETICAL_DENSITY_FRAC = 0.90 enrichedNuclide = "B10" NATURAL_B10_NUM_FRAC = 0.199 propertyValidTemperature = {"linear expansion percent": ((25, 600), "C")} def __init__(self): self.b10NumFrac = self.NATURAL_B10_NUM_FRAC super().__init__() def applyInputParams(self, B10_wt_frac=None, theoretical_density=None, TD_frac=None, *args, **kwargs): if B10_wt_frac is not None: # we can't just use the generic enrichment adjustment here because the # carbon has to change with enrich. self.adjustMassEnrichment(B10_wt_frac) if theoretical_density is not None: runLog.warning( "The 'theoretical_density' material modification for B4C will be " "deprecated. Update your inputs to use 'TD_frac' instead.", single=True, ) if TD_frac is not None: runLog.warning( f"Both 'theoretical_density' and 'TD_frac' are specified for {self}. 'TD_frac' will be used." ) else: self.updateTD(theoretical_density) if TD_frac is not None: self.updateTD(TD_frac) def updateTD(self, td: float) -> None: self.theoreticalDensityFrac = td self.clearCache() def setNewMassFracsFromMassEnrich(self, massEnrichment): r""" Calculate the mass fractions for a given mass enrichment and set it on any parent. Parameters ---------- massEnrichment : float The mass enrichment as a fraction. Returns ------- boron10MassGrams, boron11MassGrams, carbonMassGrams : float The resulting mass of each nuclide/element Notes ----- B-10: 10.012 g/mol B-11: 11.009 g/mol Carbon: 12.0107 g/mol 4 moles of boron/1 mole of carbon grams of boron-10 = 10.012 g/mol* 4 mol * 0.199 = 7.969552 g grams of boron-11 = 11.009 g/mol* 4 mol * 0.801 = 35.272836 g grams of carbon= 12.0107 g/mol * 1 mol = 12.0107 g from number enrichment mi: mB10 = nB10*AB10 /(nB10*AB10 + nB11*AB11) """ if massEnrichment < 0 or massEnrichment > 1: raise ValueError(f"massEnrichment {massEnrichment} is unphysical for B4C") nb = self.parent.nuclideBases if self.parent else None if nb is None: b10AtomicMass = 10.01293728 b11AtomicMass = 11.0093054803 cAtomicMass = 12.011137118560828 else: b10AtomicMass = nb.byName["B10"].weight b11AtomicMass = nb.byName["B11"].weight cAtomicMass = nb.byName["C"].weight b10NumEnrich = (massEnrichment / b10AtomicMass) / ( massEnrichment / b10AtomicMass + (1 - massEnrichment) / b11AtomicMass ) b11NumEnrich = 1.0 - b10NumEnrich boron10MassGrams = b10AtomicMass * b10NumEnrich * 4.0 boron11MassGrams = b11AtomicMass * b11NumEnrich * 4.0 carbonMassGrams = cAtomicMass gTotal = boron10MassGrams + boron11MassGrams + carbonMassGrams boron10MassGrams /= gTotal boron11MassGrams /= gTotal carbonMassGrams /= gTotal if self.parent: self.parent.setMassFracs({"B10": boron10MassGrams, "B11": boron11MassGrams, "C": carbonMassGrams}) return boron10MassGrams, boron11MassGrams, carbonMassGrams def setDefaultMassFracs(self) -> None: r"""B4C mass fractions. Using Natural B4C. 19.9% B-10/ 80.1% B-11 Boron: 10.811 g/mol Carbon: 12.0107 g/mol. 4 moles of boron/1 mole of carbon grams of boron-10 = 10.01 g/mol* 4 mol * 0.199 = 7.96796 g grams of boron-11 = 11.01 g/mol* 4 mol * 0.801 = 35.27604 g grams of carbon= 12.0107 g/mol * 1 mol = 12.0107 g total=55.2547 g. Mass fractions are computed from this. """ nb = self.parent.nuclideBases if self.parent else None if nb is None: b10AtomicMass = 10.01293728 b11AtomicMass = 11.0093054803 else: b10AtomicMass = nb.byName["B10"].weight b11AtomicMass = nb.byName["B11"].weight massEnrich = self.getMassEnrichmentFromNumEnrich(self.b10NumFrac, b10AtomicMass, b11AtomicMass) gBoron10, gBoron11, gCarbon = self.setNewMassFracsFromMassEnrich(massEnrichment=massEnrich) self.setMassFrac("B10", gBoron10) self.setMassFrac("B11", gBoron11) self.setMassFrac("C", gCarbon) self.refDens = self.DEFAULT_MASS_DENSITY # TD reference : Dunner, Heuvel, "Absorber Materials for control rod systems of fast breeder reactors" # Journal of nuclear materials, 124, 185-194, (1984)." self.theoreticalDensityFrac = self.DEFAULT_THEORETICAL_DENSITY_FRAC # normally is around 0.88-93. @staticmethod def getMassEnrichmentFromNumEnrich( b10NumFrac: float, b10AtomicMass: float = None, b11AtomicMass: float = None ) -> float: """Given a B10 number fraction, give the B10 weight fraction.""" if b10AtomicMass is None: b10AtomicMass = 10.01293728 if b11AtomicMass is None: b11AtomicMass = 11.0093054803 return b10NumFrac * b10AtomicMass / (b10NumFrac * b10AtomicMass + (1.0 - b10NumFrac) * b11AtomicMass) def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float: """ Return density that preserves mass when thermally expanded in 2D. Notes ----- Applies theoretical density of B4C to parent method """ return material.Material.pseudoDensity(self, Tk, Tc) * self.theoreticalDensityFrac def density(self, Tk: float = None, Tc: float = None) -> float: """ Return density that preserves mass when thermally expanded in 3D. Notes ----- Applies theoretical density of B4C to parent method """ return material.Material.density(self, Tk, Tc) * self.theoreticalDensityFrac def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float: """Boron carbide expansion. Very preliminary.""" Tc = getTc(Tc, Tk) self.checkPropertyTempRange("linear expansion percent", Tc) deltaT = Tc - 25 dLL = deltaT * 4.5e-6 return dLL * 100 ================================================ FILE: armi/materials/be9.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Beryllium is a lightweight metal with lots of interesting nuclear use-cases. It has a nice (n,2n) reaction and is an inhalation hazard. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import Material from armi.nucDirectory import thermalScattering as tsl from armi.utils.units import getTk class Be9(Material): """Beryllium.""" thermalScatteringLaws = (tsl.fromNameAndCompound("BE", tsl.BE_METAL),) propertyValidTemperature = {"linear expansion percent": ((50, 1560.0), "K")} def setDefaultMassFracs(self): self.setMassFrac("BE9", 1.0) self.refDens = 1.85 def linearExpansionPercent(self, Tk=None, Tc=None): r""" Finds the linear expansion coefficient of Be9. given T in C returns m/m-K Based on http://www-ferp.ucsd.edu/LIB/PROPS/PANOS/be.html which is in turn based on Fusion Engineering and Design . FEDEEE 5(2), 141-234 (1987). """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion percent", Tk) return 1e-4 * (8.4305 + 1.1464e-2 * Tk - 2.9752e-6 * Tk**2) ================================================ FILE: armi/materials/caH2.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Calcium Hydride. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import SimpleSolid class CaH2(SimpleSolid): """CalciumHydride.""" def setDefaultMassFracs(self): """Default mass fractions. http://atom.kaeri.re.kr/ton/ iso atomic percent abundance and atomic mass of 20-calcium | 20-Ca-40 96.941% 39.9625912 | 20-Ca-42 0.647% 41.9586183 | 20-Ca-43 0.135% 42.9587668 | 20-Ca-44 2.086% 43.9554811 | 20-Ca-46 0.004% 45.9536928 | 20-Ca-48 0.187% 47.9525335 atomic weight of H2 2.01565 weight of CaH2 42.09367285 | weight% of Ca-40 in CaH2 0.920331558 | weight% of Ca-42 in CaH2 0.006449241 | weight% of Ca-43 in CaH2 0.001377745 | weight% of Ca-44 in CaH2 0.02178264 | weight% of Ca-46 in CaH2 4.3668E-05 | weight% of Ca-48 in CaH2 0.002130278 | weight% of H2 in CaH2 0.047884869 """ self.setMassFrac("CA", 0.952115131) self.setMassFrac("H", 0.047884869) def density(self, Tk=None, Tc=None): """Mass density. http://en.wikipedia.org/wiki/Calcium_hydride Returns ------- density : float grams / cc """ return 1.70 ================================================ FILE: armi/materials/californium.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Californium is a synthetic element made in nuclear reactors. It is interesting in that it has a large spontaneous fission decay mode that produces lots of neutrons. It's often used as a neutron source. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import SimpleSolid class Californium(SimpleSolid): def setDefaultMassFracs(self): self.setMassFrac("CF252", 1.0) def density(self, Tk=None, Tc=None): """https://en.wikipedia.org/wiki/Californium.""" return 15.1 # g/cm3 ================================================ FILE: armi/materials/concrete.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Concrete. Concrete is often used to provide structural support of nuclear equipment. It can also provide radiation shielding. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import Material class Concrete(Material): """Simple concreate material. https://web.archive.org/web/20221103120449/https://physics.nist.gov/cgi-bin/Star/compos.pl?matno=144 """ def setDefaultMassFracs(self): self.setMassFrac("H", 0.010000) self.setMassFrac("C", 0.001000) self.setMassFrac("O16", 0.529107) self.setMassFrac("NA23", 0.016000) self.setMassFrac("MG", 0.002000) self.setMassFrac("AL", 0.033872) self.setMassFrac("SI", 0.337021) self.setMassFrac("K", 0.013000) self.setMassFrac("CA", 0.044000) self.setMassFrac("FE", 0.014000) def density(self, Tk=None, Tc=None): return 2.3000 # g/cm3 ================================================ FILE: armi/materials/copper.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Copper metal. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import Material from armi.utils.units import getTk class Cu(Material): propertyValidTemperature = {"linear expansion percent": ((40.43, 788.83), "K")} def setDefaultMassFracs(self): self.setMassFrac("CU63", 0.6915) self.setMassFrac("CU65", 0.3085) def density(self, Tk=None, Tc=None): return 8.913 # g/cm3 def linearExpansionPercent(self, Tk=None, Tc=None): """ Return the linear expansion percent for Copper. Notes ----- Digitized using Engauge Digitizer from Figure 21 of Thrust Chamber Life Prediction - Volume I - Mechanical and Physical Properties of High Performance Rocket Nozzle Materials (NASA CR - 134806) """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion percent", Tk) return 5.0298e-07 * Tk**2 + 1.3042e-03 * Tk - 4.3097e-01 ================================================ FILE: armi/materials/cs.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Cesium. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import Fluid from armi.utils.units import getTk class Cs(Fluid): """Cesium.""" def setDefaultMassFracs(self): self.setMassFrac("CS133", 1.0) def pseudoDensity(self, Tk=None, Tc=None): """The 2D/3D density of liquid Cesium. https://en.wikipedia.org/wiki/Caesium Notes ----- In ARMI, we define pseudoDensity() and density() as the same for Fluids. """ Tk = getTk(Tc, Tk) if Tk < self.meltingPoint(): return 1.93 # g/cm3 else: return 1.843 # g/cm3 def meltingPoint(self): return 301.7 # K ================================================ FILE: armi/materials/custom.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Custom materials are ones that you can specify all the number densities yourself. Useful for benchmarking when you have a particular specified material density. Use the isotopic input described in :ref:`bp-input-file`. The density function gets applied from custom isotopics by :py:meth:`armi.reactor.blueprints.isotopicOptions.CustomIsotopic.apply`. """ from armi.materials.material import Material class Custom(Material): """Custom Materials have user input properties.""" enrichedNuclide = "U235" def __init__(self): """ During construction, set default density to 1.0. That way, people can set number densities without having to set a density and it will work. This will generally be overwritten in practice by a constant user-input density. """ Material.__init__(self) self.customDensity = 1.0 def pseudoDensity(self, Tk=None, Tc=None): """ The density value is set in the loading input. In some cases it needs to be set after full core assemblies are populated (e.g. for CustomLocation materials), so the missing density warning will appear no matter what. """ return self.customDensity def setMassFrac(self, *args, **kwargs): if self.customDensity == 1.0: raise ValueError("Cannot set mass fractions on Custom materials unless a density is defined.") Material.setMassFrac(self, *args, **kwargs) ================================================ FILE: armi/materials/graphite.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Graphite is often used as a moderator in gas-cooled nuclear reactors. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import Material from armi.nucDirectory import thermalScattering as tsl from armi.utils import units class Graphite(Material): """ Graphite. .. [INL-EXT-16-38241] McEligot, Donald, Swank, W. David, Cottle, David L., and Valentin, Francisco I. Thermal Properties of G-348 Graphite. United States: N. p., 2016. Web. doi:10.2172/1330693. https://www.osti.gov/biblio/1330693 """ thermalScatteringLaws = (tsl.fromNameAndCompound("C", tsl.GRAPHITE_10P),) def setDefaultMassFracs(self): """ Set graphite to carbon. Room temperature density from [INL-EXT-16-38241]_, table 2. """ self.setMassFrac("C", 1.0) self.refDens = 1.8888 def linearExpansionPercent(self, Tk=None, Tc=None): """ This is dL/L0 for graphite. From [INL-EXT-16-38241]_, page 4. """ Tc = units.getTc(Tc, Tk) return 100 * (-1.454e-4 + 4.812e-6 * Tc + 1.145e-9 * Tc**2) ================================================ FILE: armi/materials/hafnium.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Hafnium is an element that has high capture cross section across multiple isotopes. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import SimpleSolid from armi.nucDirectory import nucDir class Hafnium(SimpleSolid): def setDefaultMassFracs(self): for a, abund in nucDir.getNaturalMassIsotopics("HF"): self.setMassFrac("HF{0}".format(a), abund) def density(self, Tk=None, Tc=None): r"""http://www.lenntech.com/periodic/elements/hf.htm.""" return 13.07 ================================================ FILE: armi/materials/hastelloyN.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Hastelloy-N is a high-nickel structural material invented by ORNL for handling molten fluoride salts. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import Material from armi.utils.units import getTc, getTk class HastelloyN(Material): r""" Hastelloy N alloy (UNS N10003). .. [Haynes] Haynes International, H-2052D 2020 (http://haynesintl.com/docs/default-source/pdfs/new-alloy-brochures/corrosion-resistant-alloys/brochures/n-brochure.pdf) .. [SAB] Sabharwall, et. al. Feasibility Study of Secondary Heat Exchanger Concepts for the Advanced High Temperature Reactor INL/EXT-11-23076, 2011 """ materialIntro = ( "Hastelloy N alloy is a nickel-base alloy that was invented at Oak RIdge National Laboratories " "as a container material for molten fluoride salts. It has good oxidation resistance to hot fluoride " "salts in the temperature range of 704 to 871C (1300 to 1600F)" ) propertyValidTemperature = { "thermal conductivity": ((473.15, 973.15), "K"), "heat capacity": ((373.15, 973.15), "K"), "thermal expansion": ((293.15, 1173.15), "K"), } refTempK = 293.15 def setDefaultMassFracs(self): """ Hastelloy N mass fractions. From [Haynes]_. """ self.setMassFrac("CR", 0.07) self.setMassFrac("MO", 0.16) self.setMassFrac("FE", 0.04) # max. self.setMassFrac("SI", 0.01) # max. self.setMassFrac("MN", 0.0080) # max. self.setMassFrac("V", 0.0005) # max. self.setMassFrac("C", 0.0006) self.setMassFrac("CO", 0.0020) # max. self.setMassFrac("CU", 0.0035) # max. self.setMassFrac("W", 0.005) # max. self.setMassFrac("AL", 0.0025) # max. self.setMassFrac("TI", 0.0025) # max. self.setMassFrac("NI", 1.0 - sum(self.massFrac.values())) # balance self.refDens = 8.86 def thermalConductivity(self, Tk=None, Tc=None): r""" Calculates the thermal conductivity of Hastelloy N. Second order polynomial fit to data from [Haynes]_. Parameters ---------- Tk : float Temperature in (K) Tc : float Temperature in (C) Returns ------- Hastelloy N thermal conductivity (W/m-K) """ Tc = getTc(Tc, Tk) Tk = getTk(Tc=Tc) self.checkPropertyTempRange("thermal conductivity", Tk) return 1.92857e-05 * Tc**2 + 3.12857e-03 * Tc + 1.17743e01 # W/m-K def heatCapacity(self, Tk=None, Tc=None): r""" Calculates the specific heat capacity of Hastelloy N. Sixth order polynomial fit to data from Table 2-20 [SAB]_ (R^2=0.97). Parameters ---------- Tk : float Temperature in (K) Tc : float Temperature in (C) Returns ------- Hastelloy N specific heat capacity (J/kg-C) """ Tc = getTc(Tc, Tk) Tk = getTk(Tc=Tc) self.checkPropertyTempRange("heat capacity", Tk) return ( +3.19981e02 + 2.47421e00 * Tc - 2.49306e-02 * Tc**2 + 1.32517e-04 * Tc**3 - 3.58872e-07 * Tc**4 + 4.69003e-10 * Tc**5 - 2.32692e-13 * Tc**6 ) def linearExpansionPercent(self, Tk=None, Tc=None): r""" Average thermal expansion dL/L. Used for computing hot dimensions. Parameters ---------- Tk : float temperature in (K) Tc : float Temperature in (C) Returns ------- %dLL(T) in m/m/K """ Tc = getTc(Tc, Tk) refTempC = getTc(Tk=self.refTempK) return 100.0 * self.meanCoefficientThermalExpansion(Tc=Tc) * (Tc - refTempC) def meanCoefficientThermalExpansion(self, Tk=None, Tc=None): r""" Mean coefficient of thermal expansion for Hastelloy N. Second order polynomial fit of data from [Haynes]_. Parameters ---------- Tk : float temperature in (K) Tc : float Temperature in (C) Returns ------- mean coefficient of thermal expansion in m/m/C """ Tc = getTc(Tc, Tk) Tk = getTk(Tc=Tc) self.checkPropertyTempRange("thermal expansion", Tk) return 2.60282e-12 * Tc**2 + 7.69859e-10 * Tc + 1.21036e-05 ================================================ FILE: armi/materials/ht9.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple/academic/incomplete HT9 ferritic-martensitic stainless steel material. This is a famous SFR cladding/duct material because it doesn't void swell that much. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi import materials from armi.utils import units class HT9(materials.Material): """ Simplified HT9 stainless steel. .. warning:: This is an academic-quality material. When more detail is desired, a custom material should be implemented via a user-provided plugin. .. [MFH] Metallic Fuels Handbook Hofman, G. L., Billone, M. C., Koenig, J. F., Kramer, J. M., Lambert, J. D. B., Leibowitz, L., Orechwa, Y., Pedersen, D. R., Porter, D. L., Tsai, H., and Wright, A. E. Metallic Fuels Handbook. United States: N. p., 2019. Web. doi:10.2172/1506477. https://www.osti.gov/biblio/1506477-metallic-fuels-handbook """ propertyValidTemperature = {"linear expansion": ((293, 1050), "K")} def setDefaultMassFracs(self): """ HT9 mass fractions. From E.2-1 of [MFH]_. https://www.osti.gov/biblio/1506477-metallic-fuels-handbook """ self.setMassFrac("C", 0.002) self.setMassFrac("MN", 0.005) self.setMassFrac("SI", 0.0025) self.setMassFrac("NI", 0.0055) self.setMassFrac("CR", 0.1175) self.setMassFrac("MO", 0.01) self.setMassFrac("W", 0.0055) self.setMassFrac("V", 0.0030) self.setMassFrac("FE", 1.0 - sum(self.massFrac.values())) self.refDens = 7.778 def linearExpansionPercent(self, Tk=None, Tc=None): """ Gets the linear expansion from E.2.2.2 in [MFH]_ for HT9. The ref gives dL/L0 in percent and is valid from 293 - 1050 K. """ tk = units.getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion", tk) return -0.16256 + 1.62307e-4 * tk + 1.42357e-6 * tk**2 - 5.50344e-10 * tk**3 def thermalConductivity(self, Tk=None, Tc=None): """ Thermal conductivity in W/m-K). From [MFH]_, E.2.2.3, eq 5. .. tip:: This can probably be sped up with a polynomial evaluator. """ Tk = units.getTk(Tc, Tk) return 29.65 - 6.668e-2 * Tk + 2.184e-4 * Tk**2 - 2.527e-7 * Tk**3 + 9.621e-11 * Tk**4 ================================================ FILE: armi/materials/inconel.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Inconel is a austenitic nickel-chromium superalloy. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import SimpleSolid class Inconel(SimpleSolid): references = { "mass fractions": "https://www.specialmetals.com/documents/technical-bulletins/inconel/inconel-alloy-617.pdf", "density": "https://www.specialmetals.com/documents/technical-bulletins/inconel/inconel-alloy-617.pdf", } def setDefaultMassFracs(self): self.setMassFrac("NI", 0.52197) self.setMassFrac("CR", 0.22) self.setMassFrac("CO59", 0.125) self.setMassFrac("MO", 0.09) self.setMassFrac("AL27", 0.0115) self.setMassFrac("C", 0.001) self.setMassFrac("FE", 0.015) self.setMassFrac("MN55", 0.005) self.setMassFrac("SI", 0.005) self.setMassFrac("TI", 0.003) self.setMassFrac("CU", 0.0025) self.setMassFrac("B10", 0.00003 * 0.1997) self.setMassFrac("B11", 0.00003 * (1.0 - 0.1997)) def density(self, Tk=None, Tc=None): return 8.3600 class Inconel617(Inconel): """ Note: historically the 'Inconel' material represented the high-nickel alloy Inconel 617. This material enables the user to know with certainty that this material represents Inconel 617 and doesn't break any older models. """ ================================================ FILE: armi/materials/inconel600.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Inconel600. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import Material from armi.utils.units import getTc class Inconel600(Material): propertyValidTemperature = { "heat capacity": ((20, 900), "C"), "linear expansion": ((21.0, 900.0), "C"), "linear expansion percent": ((21.0, 900.0), "C"), "thermal conductivity": ((20.0, 800.0), "C"), } references = { "mass fractions": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf", "density": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf", "thermalConductivity": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf", "specific heat": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf", "linear expansion percent": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf", "linear expansion": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf", } refTempK = 294.15 def __init__(self): Material.__init__(self) self.refDens = 8.47 # g/cc # Only density measurement presented in the reference. Presumed to be performed at 21C since # this was the reference temperature for linear expansion measurements. def setDefaultMassFracs(self): massFracs = { "NI": 0.7541, "CR": 0.1550, "FE": 0.0800, "C": 0.0008, "MN55": 0.0050, "S": 0.0001, "SI": 0.0025, "CU": 0.0025, } for element, massFrac in massFracs.items(): self.setMassFrac(element, massFrac) def thermalConductivity(self, Tk=None, Tc=None): r""" Returns the thermal conductivity of Inconel600. Parameters ---------- Tk : float, optional temperature in (K) Tc : float, optional Temperature in (C) Returns ------- thermalCond : float thermal conductivity in W/m/C """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("thermal conductivity", Tc) thermalCond = 3.4938e-6 * Tc**2 + 1.3403e-2 * Tc + 14.572 return thermalCond # W/m-C def heatCapacity(self, Tk=None, Tc=None): r""" Returns the specific heat capacity of Inconel600. Parameters ---------- Tk : float, optional Temperature in Kelvin. Tc : float, optional Temperature in degrees Celsius. Returns ------- heatCapacity : float heat capacity in J/kg/C """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("heat capacity", Tc) heatCapacity = 7.4021e-6 * Tc**2 + 0.20573 * Tc + 441.3 return heatCapacity # J/kg-C def linearExpansionPercent(self, Tk=None, Tc=None): r""" Returns percent linear expansion of Inconel600. Parameters ---------- Tk : float temperature in (K) Tc : float Temperature in (C) Returns ------- linExpPercent in %-m/m/C """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("linear expansion percent", Tc) linExpPercent = 3.722e-7 * Tc**2 + 1.303e-3 * Tc - 2.863e-2 return linExpPercent def linearExpansion(self, Tk=None, Tc=None): r""" From http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf. Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100 to convert from percent strain to strain, then differentiated with respect to temperature to find the correlation for instantaneous linear expansion. i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion correlation is 2*a/100*Tc + b/100 2*(3.722e-7/100.0)*Tc + 1.303e-3/100.0 Parameters ---------- Tk : float temperature in (K) Tc : float Temperature in (C) Returns ------- linExp in m/m/C """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("linear expansion", Tc) linExp = 7.444e-9 * Tc + 1.303e-5 return linExp ================================================ FILE: armi/materials/inconel625.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Inconel625. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import Material from armi.utils.units import getTc class Inconel625(Material): propertyValidTemperature = { "heat capacity": ((221.0, 1093.0), "C"), "linear expansion": ((21.0, 927.0), "C"), "linear expansion percent": ((21.0, 927.0), "C"), "thermal conductivity": ((21.0, 982.0), "C"), } references = { "mass fractions": "http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf", "density": "http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf", "linearExpansionPercent": "http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf", "linearExpansion": "http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf", "thermalConductivity": "http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf", "specific heat": "http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf", } refTempK = 294.15 def __init__(self): Material.__init__(self) self.refDens = 8.44 # g/cc # Only density measurement presented in the reference. # Presumed to be performed at 21C since this was the reference temperature for linear expansion measurements. def setDefaultMassFracs(self): massFracs = { "NI": 0.6188, "CR": 0.2150, "FE": 0.0250, "MO": 0.0900, "TA181": 0.0365, "C": 0.0005, "MN55": 0.0025, "SI": 0.0025, "P31": 0.0001, "S": 0.0001, "AL27": 0.0020, "TI": 0.0020, "CO59": 0.0050, } for element, massFrac in massFracs.items(): self.setMassFrac(element, massFrac) def thermalConductivity(self, Tk=None, Tc=None): r""" Returns the thermal conductivity of Inconel625. Parameters ---------- Tk : float, optional Temperature in Kelvin. Tc : float, optional Temperature in degrees Celsius. Returns ------- thermalCond : float thermal conductivity in W/m/C """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("thermal conductivity", Tc) thermalCond = 2.7474e-6 * Tc**2 + 0.012907 * Tc + 9.62532 return thermalCond # W/m-C def heatCapacity(self, Tk=None, Tc=None): """ Returns the specific heat capacity of Inconel625. Parameters ---------- Tk : float, optional Temperature in Kelvin. Tc : float, optional Temperature in degrees Celsius. Returns ------- heatCapacity : float heat capacity in J/kg/C """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("heat capacity", Tc) heatCapacity = -5.3777e-6 * Tc**2 + 0.25 * Tc + 404.26 return heatCapacity # J/kg-C def linearExpansionPercent(self, Tk=None, Tc=None): """ Returns percent linear expansion of Inconel625. Parameters ---------- Tk : float temperature in (K) Tc : float Temperature in (C) Returns ------- linExpPercent in %-m/m/C """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("linear expansion percent", Tc) linExpPercent = 5.083e-7 * Tc**2 + 1.125e-3 * Tc - 1.804e-2 return linExpPercent def linearExpansion(self, Tk=None, Tc=None): r""" From http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf. Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100 to convert from percent strain to strain, then differentiated with respect to temperature to find the correlation for instantaneous linear expansion. i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion correlation is 2*a/100*Tc + b/100 2*(5.083e-7/100.0)*Tc + 1.125e-3/100.0 Parameters ---------- Tk : float temperature in (K) Tc : float Temperature in (C) Returns ------- linExp in m/m/C """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("linear expansion", Tc) linExp = 1.0166e-8 * Tc + 1.125e-5 return linExp ================================================ FILE: armi/materials/inconel800.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Incoloy 800. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import Material from armi.utils.units import getTc class Inconel800(Material): r""" Incoloy 800/800H (UNS N08800/N08810). .. [SM] Special Metals - Incoloy alloy 800 (https://www.specialmetals.com/assets/smc/documents/alloys/incoloy/incoloy-alloy-800.pdf) """ propertyValidTemperature = {"thermal expansion": ((20.0, 800.0), "C")} refTempK = 294.15 def setDefaultMassFracs(self): """ Incoloy 800H mass fractions. From [SM]_. """ self.setMassFrac("NI", 0.325) # ave. self.setMassFrac("CR", 0.21) # ave. self.setMassFrac("C", 0.00075) # ave. 800H self.setMassFrac("MN", 0.015) # max. self.setMassFrac("S", 0.00015) # max. self.setMassFrac("SI", 0.01) # max. self.setMassFrac("CU", 0.0075) # max. self.setMassFrac("AL", 0.00375) # ave. self.setMassFrac("TI", 0.00375) # ave. self.setMassFrac("FE", 1.0 - sum(self.massFrac.values())) # balance, 0.395 min. self.refDens = 7.94 def linearExpansionPercent(self, Tk=None, Tc=None): """ Average thermal expansion dL/L. Used for computing hot dimensions. Parameters ---------- Tk : float temperature in (K) Tc : float Temperature in (C) Returns ------- %dLL(T) in m/m/K """ Tc = getTc(Tc, Tk) refTempC = getTc(Tk=self.refTempK) return 100.0 * self.meanCoefficientThermalExpansion(Tc=Tc) * (Tc - refTempC) def meanCoefficientThermalExpansion(self, Tk=None, Tc=None): """ Mean coefficient of thermal expansion for Incoloy 800. Third order polynomial fit of table 5 from [SM]_. Parameters ---------- Tk : float temperature in (K) Tc : float Temperature in (C) Returns ------- mean coefficient of thermal expansion in m/m/C """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("thermal expansion", Tc) return 2.52525e-14 * Tc**3 - 3.77814e-11 * Tc**2 + 2.06360e-08 * Tc + 1.28071e-05 ================================================ FILE: armi/materials/inconelPE16.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Inconel PE16. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi import runLog from armi.materials.material import SimpleSolid class InconelPE16(SimpleSolid): references = { "mass fractions": r"http://www.specialmetals.com/assets/documents/alloys/nimonic/nimonic-alloy-pe16.pdf", "density": r"http://www.specialmetals.com/assets/documents/alloys/nimonic/nimonic-alloy-pe16.pdf", } def setDefaultMassFracs(self): nb = self.parent.nuclideBases if self.parent else None if nb is None: ag107abundance = 0.51839001 ag109abundance = 0.48160999 b10abundance = 0.19799999 b11abundance = 0.80199997 else: ag107abundance = nb.byName["AG107"].abundance ag109abundance = nb.byName["AG109"].abundance b10abundance = nb.byName["B10"].abundance b11abundance = nb.byName["B11"].abundance massFracs = { "C": 0.0006, "SI": 0.0025, "MN55": 0.001, "S": 0.000075, "AG107": 0.0000025 * ag107abundance, "AG109": 0.0000025 * ag109abundance, "AL27": 0.012, "B10": 0.000025 * b10abundance, "B11": 0.000025 * b11abundance, "BI209": 0.0000005, "CO59": 0.01, "CR": 0.165, "CU": 0.0025, "MO": 0.033, "NI": 0.425, "PB": 0.0000075, "TI": 0.012, "ZR": 0.0003, } massFracs["FE"] = 1 - sum(massFracs.values()) # balance* # *Reference to the 'balance' of a composition does not guarantee this is exclusively of the element mentioned # but that it predominates and others are present only in minimal quantities. for element, massFrac in massFracs.items(): self.setMassFrac(element, massFrac) def density(self, Tk=None, Tc=None): runLog.warning( "PE16 mass density is not temperature dependent, using room temperature value", single=True, label="InconelPE16 density", ) return 8.00 ================================================ FILE: armi/materials/inconelX750.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Inconel X750. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import Material from armi.utils.units import getTc class InconelX750(Material): propertyValidTemperature = { "heat capacity": ((-18.0, 1093.0), "C"), "linear expansion": ((21.1, 982.2), "C"), "linear expansion percent": ((21.1, 982.2), "C"), "thermal conductivity": ((-156.7, 871.1), "C"), } references = { "mass fractions": "http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf", "density": "http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf", "thermalConductivity": "http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf", "specific heat": "http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf", "linearExpansionPercent": "http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf", "linearExpansion": "http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf", } refTempK = 294.15 def __init__(self): Material.__init__(self) self.refDens = 8.28 # g/cc # Only density measurement presented in the reference. # Presumed to be performed at 21C since this was the reference temperature for linear # expansion measurements. def setDefaultMassFracs(self): massFracs = { "NI": 0.7180, "CR": 0.1550, "FE": 0.0700, "TI": 0.0250, "AL27": 0.0070, "NB93": 0.0095, "MN55": 0.0050, "SI": 0.0025, "S": 0.0001, "CU": 0.0025, "C": 0.0004, "CO59": 0.0050, } for element, massFrac in massFracs.items(): self.setMassFrac(element, massFrac) def thermalConductivity(self, Tk=None, Tc=None): r""" Returns the thermal conductivity of InconelX750. Parameters ---------- Tk : float, optional Temperature in Kelvin. Tc : float, optional Temperature in degrees Celsius. Returns ------- thermalCond : float thermal conductivity in W/m/C """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("thermal conductivity", Tc) thermalCond = 1.4835e-6 * Tc**2 + 1.2668e-2 * Tc + 11.632 return thermalCond # W/m-C def heatCapacity(self, Tk=None, Tc=None): r""" Returns the specific heat capacity of InconelX750. Parameters ---------- Tk : float, optional Temperature in Kelvin. Tc : float, optional Temperature in degrees Celsius. Returns ------- heatCapacity : float heat capacity in J/kg/C """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("heat capacity", Tc) heatCapacity = 9.2261e-7 * Tc**3 - 9.6368e-4 * Tc**2 + 4.7778e-1 * Tc + 420.55 return heatCapacity # J/kg-C def linearExpansionPercent(self, Tk=None, Tc=None): r""" Returns percent linear expansion of InconelX750. Parameters ---------- Tk : float temperature in (K) Tc : float Temperature in (C) Returns ------- linExpPercent in %-m/m/C """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("linear expansion percent", Tc) linExpPercent = 6.8378e-7 * Tc**2 + 1.056e-3 * Tc - 1.3161e-2 return linExpPercent def linearExpansion(self, Tk=None, Tc=None): r""" From http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf. Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100 to convert from percent strain to strain, then differentiated with respect to temperature to find the correlation for instantaneous linear expansion. i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion correlation is 2*a/100*Tc + b/100 2*(6.8378e-7/100.0)*Tc + 1.056e-3/100.0 Parameters ---------- Tk : float temperature in (K) Tc : float Temperature in (C) Returns ------- linExp in m/m/C """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("linear expansion", Tc) linExp = 1.36756e-8 * Tc + 1.056e-5 return linExp ================================================ FILE: armi/materials/lead.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Lead. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials import material from armi.utils.units import getTk class Lead(material.Fluid): """Natural lead.""" propertyValidTemperature = { "density": ((600, 1700), "K"), "heat capacity": ((600, 1500), "K"), "volumetric expansion": ((600, 1700), "K"), } def volumetricExpansion(self, Tk=None, Tc=None): r"""Volumetric expansion inferred from density. NOT BASED ON MEASUREMENT. Done by V. sobolev/ J Nucl Mat 362 (2007) 235-247 """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("volumetric expansion", Tk) return 1.0 / (9516.9 - Tk) def setDefaultMassFracs(self): """Mass fractions.""" self.setMassFrac("PB", 1) def pseudoDensity(self, Tk=None, Tc=None): """Density in g/cc from V. sobolev/ J Nucl Mat 362 (2007) 235-247.""" Tk = getTk(Tc, Tk) self.checkPropertyTempRange("density", Tk) return 11.367 - 0.0011944 * Tk # pre-converted from kg/m^3 to g/cc def heatCapacity(self, Tk=None, Tc=None): """Heat capacity in J/kg/K from Sobolev.""" Tk = getTk(Tc, Tk) self.checkPropertyTempRange("heat capacity", Tk) return 162.9 - 3.022e-2 * Tk + 8.341e-6 * Tk**2 ================================================ FILE: armi/materials/leadBismuth.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Lead-Bismuth eutectic. This is a great coolant for superfast neutron reactors. It's heavy though. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ import math from armi.materials import material from armi.utils.units import getTk class LeadBismuth(material.Fluid): """Lead bismuth eutectic.""" propertyValidTemperature = { "density": ((400, 1300), "K"), "dynamic visc": ((400, 1100), "K"), "heat capacity": ((400, 1100), "K"), "thermal conductivity": ((400, 1100), "K"), "volumetric expansion": ((400, 1300), "K"), } def setDefaultMassFracs(self): r"""Mass fractions.""" self.setMassFrac("PB", 0.445) self.setMassFrac("BI209", 0.555) def pseudoDensity(self, Tk=None, Tc=None): r"""Density in g/cc from V. sobolev/ J Nucl Mat 362 (2007) 235-247.""" Tk = getTk(Tc, Tk) self.checkPropertyTempRange("density", Tk) return 11.096 - 0.0013236 * Tk # pre-converted from kg/m^3 to g/cc def dynamicVisc(self, Tk=None, Tc=None): r"""Dynamic viscosity in Pa-s from Sobolev. Accessed online at: http://www.oecd-nea.org/science/reports/2007/nea6195-handbook.html on 11/9/12 """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("dynamic visc", Tk) return 4.94e-4 * math.exp(754.1 / Tk) def heatCapacity(self, Tk=None, Tc=None): r"""Heat ccapacity in J/kg/K from Sobolev. Expected accuracy 5%.""" Tk = getTk(Tc, Tk) self.checkPropertyTempRange("heat capacity", Tk) return 159 - 2.72e-2 * Tk + 7.12e-6 * Tk**2 def thermalConductivity(self, Tk=None, Tc=None): r"""Thermal conductivity in W/m/K from Sobolev. Accessed online at: http://www.oecd-nea.org/science/reports/2007/nea6195-handbook.html on 11/9/12 """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("thermal conductivity", Tk) return 2.45 * Tk / (86.334 + 0.0511 * Tk) def volumetricExpansion(self, Tk=None, Tc=None): r"""Volumetric expansion inferred from density. NOT BASED ON MEASUREMENT. Done by V. sobolev/ J Nucl Mat 362 (2007) 235-247 """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("volumetric expansion", Tk) return 1.0 / (8383.2 - Tk) ================================================ FILE: armi/materials/lithium.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Lithium. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. Warning ------- Whenever you irradiate lithium you will get tritium. """ from armi import runLog from armi.materials import material from armi.utils.mathematics import getFloat class Lithium(material.Fluid): references = {"density": "Wikipedia"} enrichedNuclide = "LI6" def applyInputParams(self, LI_wt_frac=None, LI6_wt_frac=None, *args, **kwargs): if LI_wt_frac is not None: runLog.warning( "The 'LI_wt_frac' material modification for Lithium will be deprecated" " Update your inputs to use 'LI6_wt_frac' instead.", single=True, label="Lithium applyInputParams 1", ) if LI6_wt_frac is not None: runLog.warning( f"Both 'LI_wt_frac' and 'LI6_wt_frac' are specified for {self}. 'LI6_wt_frac' will be used.", single=True, label="Lithium applyInputParams 2", ) LI6_wt_frac = LI6_wt_frac or LI_wt_frac enrich = getFloat(LI6_wt_frac) # allow 0.0 to pass in! if enrich is not None: self.adjustMassEnrichment(LI6_wt_frac) def pseudoDensity(self, Tk=None, Tc=None): r"""Density (g/cc) from Wikipedia. Will be liquid above 180C. Notes ----- In ARMI, we define pseudoDensity() and density() as the same for Fluids. """ return 0.512 def setDefaultMassFracs(self): nb = self.parent.nuclideBases if self.parent else None if nb is None: li6abundance = 0.0759 li7abundance = 0.92410004 else: li6abundance = nb.byName["LI6"].abundance li7abundance = nb.byName["LI7"].abundance self.setMassFrac("LI6", li6abundance) self.setMassFrac("LI7", li7abundance) def meltingPoint(self): return 453.69 # K def boilingPoint(self): return 1615.0 # K def thermalConductivity(self, Tk=None, Tc=None): """Wikipedia.""" return 84.8 # W/m-K def heatCapacity(self, Tk=None, Tc=None): return 3570.0 ================================================ FILE: armi/materials/magnesium.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Magnesium. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials import material from armi.utils.units import getTk class Magnesium(material.Fluid): propertyValidTemperature = {"density": ((923, 1390), "K")} def setDefaultMassFracs(self): self.setMassFrac("MG", 1.0) def pseudoDensity(self, Tk=None, Tc=None): """Returns mass density of magnesium in g/cm3. The Liquid Temperature Range, Density and Constants of Magnesium. P.J. McGonigal. Temple University 1961. Notes ----- For Fluids, ARMI defines this 2D pseudodensity is the same as the usual 3D physical density. """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("density", Tk) return 1.834 - 2.647e-4 * Tk ================================================ FILE: armi/materials/material.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Base Material classes. Most temperatures may be specified in either K or C and the functions will convert for you. """ import functools import traceback import warnings import numpy as np from scipy.optimize import fsolve from armi import runLog from armi.nucDirectory import nuclideBases from armi.reactor.flags import TypeSpec from armi.utils import densityTools from armi.utils.units import getTc, getTk # globals FAIL_ON_RANGE = True def parentAwareDensityRedirect(f): """Wrap Material.density to warn people about potential problems. If a Material is linked to a Component, ``Material.density`` may produce different results from ``Component.density``. The component's density is considered the source of truth because it incorporates changes in volume, composition, and temperature in concert with the state of the reactor. """ @functools.wraps(f) def inner(self: "Material", *args, **kwargs) -> float: if self.parent is not None: stack = traceback.extract_stack() # last entry is here, second to last is what called this caller = stack[-2] label = f"Found call to Material.density in {caller.filename} at line {caller.lineno}" runLog.warning( f"{label}. Calls to Material.density when attached to a component have the potential to induce " "subtle differences as Component.density and Material.density can diverge.", single=True, label=label, ) return f(self, *args, **kwargs) return inner class Material: r""" A material is made up of elements or isotopes. It has bulk properties like density. .. impl:: The abstract material class. :id: I_ARMI_MAT_PROPERTIES :implements: R_ARMI_MAT_PROPERTIES The ARMI Materials library is based on the Object-Oriented Programming design approach, and uses this generic ``Material`` base class. In this class we define a large number of material properties like density, heat capacity, or linear expansion coefficient. Specific materials then subclass this base class to assign particular values to those properties. .. impl:: Materials generate nuclide mass fractions at instantiation. :id: I_ARMI_MAT_FRACS :implements: R_ARMI_MAT_FRACS An ARMI material is meant to be able to represent real world materials that might be used in the construction of a nuclear reactor. As such, they are not just individual nuclides, but practical materials like a particular concrete, steel, or water. One of the main things that will be needed to describe such a material is the exact nuclide fractions. As such, the constructor of every Material subclass attempts to set these mass fractions. Attributes ---------- parent : Component The component to which this material belongs massFrac : dict Mass fractions for all nuclides in the material keyed on the nuclide symbols refDens : float A reference density used by some materials, for instance `SimpleSolid`\ s, during thermal expansion theoreticalDensityFrac : float Fraction of the material's density in reality, which is commonly different from 1.0 in solid materials due to the manufacturing process. Can often be set from the blueprints input via the TD_frac material modification. For programmatic setting, use `adjustTD()`. Notes ----- Specific material classes may have many more attributes specific to the implementation for that material. """ def __init_subclass__(cls) -> None: # Apply the density decorator to every subclass if not hasattr(cls.density, "__wrapped__"): cls.density = parentAwareDensityRedirect(cls.density) DATA_SOURCE = "ARMI" """Indication of where the material is loaded from (may be plugin name)""" references = {} """The literature references {property : citation}""" enrichedNuclide = None """Name of enriched nuclide to be interpreted by enrichment modification methods""" modelConst = {} """Constants that may be used in interpolation functions for property lookups""" propertyValidTemperature = {} """Dictionary of valid temperatures over which the property models are valid in the format 'Property Name': ((Temperature_Lower_Limit, Temperature_Upper_Limit), Temperature_Units)""" thermalScatteringLaws = () """A tuple of :py:class:`~armi.nucDirectory.thermalScattering.ThermalScatteringLabels` instances with information about thermal scattering.""" def __init__(self): self.parent = None self.massFrac = {} self.refDens = 0.0 self.theoreticalDensityFrac = 1.0 self.cached = {} self._backupCache = None self._name = self.__class__.__name__ # call subclass implementations self.setDefaultMassFracs() def __repr__(self): return f"<Material: {self._name}>" @property def name(self): """Getter for the private name attribute of this Material.""" return self._name @name.setter def name(self, nomen): """Setter for the private name attribute of this Material. Warning ------- Some code in ARMI expects the "name" of a material matches its class name. So you use this method at your own risk. See Also -------- armi.materials.resolveMaterialClassByName """ self._name = nomen def getName(self): """Duplicate of name property, kept for backwards compatibility.""" return self._name def getChildren(self, deep=False, generationNum=1, includeMaterials=False, predicate=None): """Return empty list, representing that materials have no children.""" return [] def getChildrenWithFlags(self, typeSpec: TypeSpec, exactMatch=True): """Return empty list, representing that this object has no children.""" return [] def backUp(self): """Create and store a backup of the state.""" self._backupCache = (self.cached, self._backupCache) self.cached = {} # don't .clear(), using reference above! def restoreBackup(self, paramsToApply): """Restore the parameters from previously created backup.""" self.cached, self._backupCache = self._backupCache def clearCache(self): """Clear the cache so all new values are recomputed.""" self.cached = {} def _getCached(self, name): """Obtain a value from the cache.""" return self.cached.get(name, None) def _setCache(self, name, val): """ Set a value in the cache. See Also -------- _getCached : returns a previously-cached value """ self.cached[name] = val def duplicate(self): """Copy without needing a deepcopy.""" m = self.__class__() m.massFrac = {} for key, val in self.massFrac.items(): m.massFrac[key] = val m.parent = self.parent m.refDens = self.refDens m.theoreticalDensityFrac = self.theoreticalDensityFrac return m def linearExpansion(self, Tk: float = None, Tc: float = None) -> float: """ The instantaneous linear expansion coefficient (dL/L)/dT. This is used for reactivity coefficients, etc. but will not affect density or dimensions. See Also -------- linearExpansionPercent : average linear thermal expansion to affect dimensions and density """ raise NotImplementedError(f"{self} does not have a linear expansion property defined") def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float: """ Average thermal expansion dL/L. Used for computing hot dimensions and density. Defaults to 0.0 for materials that don't expand. Parameters ---------- Tk : float temperature in (K) Tc : float Temperature in (C) Returns ------- dLL(T) in % m/m/K See Also -------- linearExpansion : handle instantaneous thermal expansion coefficients """ return 0.0 def linearExpansionFactor(self, Tc: float, T0: float) -> float: """ Return a dL/L factor relative to T0 instead of the material-dependent reference temperature. Notes ----- For a detailed description of the linear expansion methodology, see "thermalExpansion" in the documentation. Parameters ---------- Tc : float Current (hot) temperature in C T0 : float Cold temperature in C Returns ------- dLL: float The average thermal expansion between Tc and T0. If there is no dLL, it should return 0.0. See Also -------- linearExpansionPercent """ dLLhot = self.linearExpansionPercent(Tc=Tc) dLLcold = self.linearExpansionPercent(Tc=T0) return (dLLhot - dLLcold) / (100.0 + dLLcold) def getThermalExpansionDensityReduction(self, prevTempInC: float, newTempInC: float) -> float: """Return the factor required to update thermal expansion going from temperatureInC to temperatureInCNew.""" dLL = self.linearExpansionFactor(Tc=newTempInC, T0=prevTempInC) return 1.0 / (1 + dLL) ** 2 def setDefaultMassFracs(self): """Mass fractions.""" pass def setMassFrac(self, nucName: str, massFrac: float) -> None: """ Assigns the mass fraction of a nuclide within the material. Notes ----- This will try to convert the provided ``massFrac`` into a float for assignment. If the conversion cannot occur then an error will be thrown. """ try: massFrac = float(massFrac) except Exception as ee: raise TypeError( f"Error in converting the mass fraction of {massFrac} " f"for nuclide {nucName} in {self} to a float. " f"Exception: {ee}" ) if massFrac < 0.0 or massFrac > 1.0: raise ValueError(f"Mass fraction of {massFrac} for {nucName} is not between 0 and 1.") self.massFrac[nucName] = massFrac def applyInputParams(self): """Apply material-specific material input parameters.""" pass def adjustMassEnrichment(self, massEnrichment: float) -> None: """ Adjust the enrichment of the material. See Also -------- adjustMassFrac """ self.adjustMassFrac(self.enrichedNuclide, massEnrichment) def adjustMassFrac(self, nuclideName: str, massFraction: float) -> None: """ Change the mass fraction of the specified nuclide. This adjusts the mass fraction of a specified nuclide relative to other nuclides of the same element. If there are no other nuclides within the element, then it is enriched relative to the entire material. For example, enriching U235 in UZr would enrich U235 relative to U238 and other naturally occurring uranium isotopes. Likewise, enriching ZR in UZr would enrich ZR relative to uranium. The method maintains a constant number of atoms, and adjusts ``refDens`` accordingly. Parameters ---------- nuclideName : str Name of nuclide to enrich. massFraction : float New mass fraction to achieve. """ if massFraction > 1.0 or massFraction < 0.0: raise ValueError(f"Cannot enrich to massFraction of {massFraction}, must be between 0 and 1") nucsNames = list(self.massFrac) # refDens could be zero, but cannot normalize to zero. density = self.refDens or 1.0 massDensities = np.array([self.massFrac[nuc] for nuc in nucsNames]) * density atomicMasses = np.array([nuclideBases.byName[nuc].weight for nuc in nucsNames]) # in AMU molesPerCC = massDensities / atomicMasses # item-wise division enrichedIndex = nucsNames.index(nuclideName) isoAndEles = nuclideBases.byName[nuclideName].element.nuclides allIndicesUpdated = [nucsNames.index(nuc.name) for nuc in isoAndEles if nuc.name in self.massFrac] if len(allIndicesUpdated) == 1: if isinstance( nuclideBases.byName[nuclideName], nuclideBases.NaturalNuclideBase ) or nuclideBases.isMonoIsotopicElement(nuclideName): # If there are not any other nuclides, assume we are enriching an entire element. # Consequently, allIndicesUpdated is no longer the element's indices, but the materials indices allIndicesUpdated = range(len(nucsNames)) else: raise ValueError( # could be warning if problematic f"Nuclide {nuclideName} was to be enriched in material {self}, but there were no other isotopes of " "that element. Could not assume the enrichment of the entire element as there were other possible " "isotopes that did not exist in this material." ) if massFraction == 1.0: massDensities[allIndicesUpdated] = 0.0 massDensities[enrichedIndex] = 1.0 else: balanceWeight = massDensities[allIndicesUpdated].sum() - massDensities[enrichedIndex] if balanceWeight == 0.0: onlyOneOtherFracToDetermine = len(allIndicesUpdated) == 2 if not onlyOneOtherFracToDetermine: raise ValueError( f"Material {self} has too many masses set to zero. cannot enrich {nuclideName} to " f"{massFraction}. Current mass fractions: {self.massFrac}" ) # massDensities get normalized later when conserving atoms; these are just ratios massDensities[allIndicesUpdated] = 1 - massFraction # there is only one other. massDensities[enrichedIndex] = massFraction else: # derived from solving the following equation for enrchedWeight: # massFraction = enrichedWeight / (enrichedWeight + balanceWeight) massDensities[enrichedIndex] = massFraction * balanceWeight / (1 - massFraction) # ratio is set by here but atoms not conserved yet updatedNucsMolesPerCC = massDensities[allIndicesUpdated] / atomicMasses[allIndicesUpdated] updatedNucsMolesPerCC *= molesPerCC[allIndicesUpdated].sum() / updatedNucsMolesPerCC.sum() # conserve atoms molesPerCC[allIndicesUpdated] = updatedNucsMolesPerCC updatedMassDensities = molesPerCC * atomicMasses updatedDensity = updatedMassDensities.sum() massFracs = updatedMassDensities / updatedDensity if not np.isclose(sum(massFracs), 1.0, atol=1e-10): raise RuntimeError(f"The mass fractions {massFracs} in {self} do not sum to 1.0.") self.massFrac = {nuc: weight for nuc, weight in zip(nucsNames, massFracs)} if self.refDens != 0.0: # don't update density if not assigned self.refDens = updatedDensity def volumetricExpansion(self, Tk=None, Tc=None): pass def getTemperatureAtDensity(self, targetDensity: float, tempGuessInC: float) -> float: """Get the temperature at which the perturbed density occurs (in Celsius).""" # 0 at tempertature of targetDensity densFunc = lambda temp: self.density(Tc=temp) - targetDensity # is a numpy array if fsolve is called tAtTargetDensity = float(fsolve(densFunc, tempGuessInC)[0]) return tAtTargetDensity @property def liquidPorosity(self) -> float: """Fraction of the material that is liquid void (unitless).""" return 0.0 if self.parent is None else self.parent.liquidPorosity @property def gasPorosity(self) -> float: """Fraction of the material that is gas void (unitless).""" return 0.0 if self.parent is None else self.parent.gasPorosity def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float: """ Return density that preserves mass when thermally expanded in 2D (in g/cm^3). Warning ------- This will not typically agree with ``Material.density()`` or ``Component.density()`` since this method only expands in 2 dimensions. Depending on your use of ``inputHeightsConsideredHot`` and ``Component.temperatureInC``, ``Material.psuedoDensity()`` may be a factor of (1+dLL) different than ``Material.density()`` or ``Component.density()``. In the case of fluids, density and pseudoDensity are the same as density is not driven by linear expansion, but rather an explicit density function dependent on temperature. ``Material.linearExpansionPercent()`` is zero for a fluid. See Also -------- density armi.reactor.components.component.Component.density """ Tk = getTk(Tc, Tk) dLL = self.linearExpansionPercent(Tk=Tk) if self.refDens is None: runLog.warning( f"{self} has no reference density", single=True, label="No refD " + self.getName(), ) self.refDens = 0.0 f = (1.0 + dLL / 100.0) ** 2 return self.refDens / f def pseudoDensityKgM3(self, Tk: float = None, Tc: float = None) -> float: """ Return density that preserves mass when thermally expanded in 2D in units of kg/m^3. See Also -------- density: Arguments are forwarded to the g/cc version """ return self.pseudoDensity(Tk, Tc) * 1000.0 def density(self, Tk: float = None, Tc: float = None) -> float: """ Return density that preserves mass when thermally expanded in 3D (in g/cm^3). Notes ----- Since refDens is specified at the material-dep reference case, we don't need to specify the reference temperature. It is already consistent with linearExpansion Percent. - p*(dp/p(T) + 1) =p*( p + dp(T) )/p = p + dp(T) = p(T) - dp/p = (1-(1 + dL/L)**3)/(1 + dL/L)**3 """ Tk = getTk(Tc, Tk) dLL = self.linearExpansionPercent(Tk=Tk) refD = self.refDens if refD is None: runLog.warning( "{0} has no reference density".format(self), single=True, label="No refD " + self.getName(), ) return None f = (1.0 + dLL / 100.0) ** 3 return refD / f def densityKgM3(self, Tk: float = None, Tc: float = None) -> float: """Return density that preserves mass when thermally expanded in 3D in units of kg/m^3. See Also -------- density: Arguments are forwarded to the g/cc version """ return self.density(Tk, Tc) * 1000.0 def getCorrosionRate(self, Tk: float = None, Tc: float = None) -> float: """Given a temperature, get the corrosion rate of the material (in microns/year).""" return 0.0 def yieldStrength(self, Tk: float = None, Tc: float = None) -> float: """Returns yield strength at given T in MPa.""" pass def thermalConductivity(self, Tk: float = None, Tc: float = None) -> float: """Thermal conductivity for given T (in units of W/m/K).""" pass def getProperty(self, propName: str, Tk: float = None, Tc: float = None, **kwargs) -> float: """Gets properties in a way that caches them.""" Tk = getTk(Tc, Tk) cached = self._getCached(propName) if cached and cached[0] == Tk: # only use cached value if the temperature at which it is cached is the same. return cached[1] else: # go look it up from material properties. val = getattr(self, propName)(Tk=Tk, **kwargs) # cache only one value for each property. Prevents unbounded cache explosion. self._setCache(propName, (Tk, val)) return val def getMassFrac( self, nucName=None, normalized=True, expandFissionProducts=False, ): """ Return mass fraction of nucName. Parameters ---------- nucName : str, optional Nuclide name to return ('ZR','PU239',etc.) normalized : bool, optional Return the mass fraction such that the sum of all nuclides is sum to 1.0. Default True Notes ----- self.massFrac are modified mass fractions that may not add up to 1.0 (for instance, after a axial expansion, the modified mass fracs will sum to less than one. The alternative is to put a multiplier on the density. They're mathematically equivalent. This function returns the normalized mass fraction (they will add to 1.0) as long as the mass fracs are modified only by get and setMassFrac This is a performance-critical method as it is called millions of times in a typical ARMI run. See Also -------- setMassFrac """ return self.massFrac.get(nucName, 0.0) def clearMassFrac(self) -> None: """Zero out all nuclide mass fractions.""" self.massFrac.clear() def removeNucMassFrac(self, nuc: str) -> None: self.setMassFrac(nuc, 0) try: del self.massFrac[nuc] except KeyError: # the nuc isn't in the mass Frac vector pass def checkPropertyTempRange(self, label, val): """Checks if the given property / value combination fall between the min and max valid temperatures provided in the propertyValidTemperature object. Parameters ---------- label : str The name of the function or property that is being checked. val : float The value to check whether it is between minT and maxT. Notes ----- This was designed as a convenience method for ``checkTempRange``. """ (minT, maxT) = self.propertyValidTemperature[label][0] self.checkTempRange(minT, maxT, val, label) def checkTempRange(self, minT, maxT, val, label=""): """ Checks if the given temperature (val) is between the minT and maxT temperature limits supplied. Label identifies what material type or element is being evaluated in the check. Parameters ---------- minT, maxT : float The minimum and maximum values that val is allowed to have. val : float The value to check whether it is between minT and maxT. label : str The name of the function or property that is being checked. """ if not minT <= val <= maxT: msg = "Temperature {0} out of range ({1} to {2}) for {3} {4}".format(val, minT, maxT, self.name, label) if FAIL_ON_RANGE or np.isnan(val): runLog.error(msg) raise ValueError(msg) else: runLog.warning( msg, single=True, label=f"T out of bounds for {self.name} {label}", ) def densityTimesHeatCapacity(self, Tk: float = None, Tc: float = None) -> float: """ Return heat capacity * density at a temperature. Parameters ---------- Tk : float, optional Temperature in Kelvin. Tc : float, optional Temperature in degrees Celsius Returns ------- rhoCP : float Calculated value for the HT9 density* heat capacity unit (J/m^3-K) """ Tc = getTc(Tc, Tk) rhoCp = self.density(Tc=Tc) * 1000.0 * self.heatCapacity(Tc=Tc) return rhoCp def getNuclides(self): """ Return nuclides in the component that contains this Material. Notes ----- This method is the only reason Materials still have self.parent. Essentially, we want to change that, but right now the logic for finding nuclides in the Reactor is recursive and considers Materials first. The bulk of the work in finally removing this method will come in downstream repos, where users have fully embraced this method and call it directly in many, many places. Please do not use this method, as it is being deprecated. """ warnings.warn("Material.getNuclides is being deprecated.", DeprecationWarning) return self.parent.getNuclides() def getTempChangeForDensityChange(self, Tc: float, densityFrac: float, quiet: bool = True) -> float: """Return a temperature difference for a given density perturbation.""" linearExpansion = self.linearExpansion(Tc=Tc) linearChange = densityFrac ** (-1.0 / 3.0) - 1.0 deltaT = linearChange / linearExpansion if not quiet: runLog.info( f"The linear expansion for {self.getName()} at initial temperature of {Tc} C is " f"{linearExpansion}.\nA change in density of {(densityFrac - 1.0) * 100.0} percent " "at would require a change in temperature of {deltaT} C.", single=True, ) return deltaT def heatCapacity(self, Tk=None, Tc=None): """Returns heat capacity in units of J/kg/C.""" raise NotImplementedError(f"Material {type(self).__name__} does not implement heatCapacity") def getTD(self): """Get the fraction of theoretical density for this material.""" return self.theoreticalDensityFrac def adjustTD(self, val): """Set or change the fraction of theoretical density for this material.""" self.theoreticalDensityFrac = val self.clearCache() class Fluid(Material): """A material that fills its container. Could also be a gas.""" def __init_subclass__(cls): # Undo the parent-aware density wrapping. Fluids do not expand in the same way solids, so # Fluid.density(T) is correct. This does not hold for solids because they thermally expand. if hasattr(cls.density, "__wrapped__"): cls.density = cls.density.__wrapped__ def getThermalExpansionDensityReduction(self, prevTempInC, newTempInC): """Return the factor required to update thermal expansion going from one temperature (in Celsius) to a new temperature. """ rho0 = self.pseudoDensity(Tc=prevTempInC) if not rho0: return 1.0 rho1 = self.pseudoDensity(Tc=newTempInC) return rho1 / rho0 def linearExpansion(self, Tk=None, Tc=None): """For void, lets just not allow temperature changes to change dimensions since it is a liquid it will fill its space. .. impl:: Fluid materials are not thermally expandable. :id: I_ARMI_MAT_FLUID :implements: R_ARMI_MAT_FLUID ARMI does not model thermal expansion of fluids. The ``Fluid`` superclass therefore sets the thermal expansion coefficient to zero. All fluids subclassing the ``Fluid`` material will inherit this method which sets the linear expansion coefficient to zero at all temperatures. """ return 0.0 def getTempChangeForDensityChange(self, Tc: float, densityFrac: float, quiet: bool = True) -> float: """Return a temperature difference for a given density perturbation.""" currentDensity = self.pseudoDensity(Tc=Tc) perturbedDensity = currentDensity * densityFrac tAtPerturbedDensity = self.getTemperatureAtDensity(perturbedDensity, Tc) deltaT = tAtPerturbedDensity - Tc if not quiet: runLog.info( "A change in density of {} percent in {} at an initial temperature of {} C would " "require a change in temperature of {} C.".format( (densityFrac - 1.0) * 100.0, self.getName(), Tc, deltaT ), single=True, ) return deltaT def density(self, Tk=None, Tc=None): """ Return the density at the specified temperature for 3D expansion (in g/cm^3). Notes ----- For fluids, there is no such thing as 2D expansion so pseudoDensity() is already 3D. """ return self.pseudoDensity(Tk=Tk, Tc=Tc) class SimpleSolid(Material): """ Base material for a simple material that primarily defines density. See Also -------- armi.materials.pseudoDensity: armi.materials.density: """ refTempK = 300 def __init__(self): Material.__init__(self) self.refDens = self.density(Tk=self.refTempK) def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float: """ Average thermal expansion dL/L. Used for computing hot dimensions and density. Defaults to 0.0 for materials that don't expand. Parameters ---------- Tk : float temperature in (K) Tc : float Temperature in (C) Returns ------- dLL(T) in % m/m/K Notes ----- This only method only works for Simple Solid Materials which assumes the density function returns 'free expansion' density as a function temperature """ density1 = self.density(Tk=self.refTempK) density2 = self.density(Tk=Tk, Tc=Tc) if density1 == density2: return 0 else: return 100 * ((density1 / density2) ** (1.0 / 3.0) - 1) def density(self, Tk: float = None, Tc: float = None) -> float: """Material density (in g/cm^3).""" return 0.0 def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float: """ The same method as the parent class, but with the ability to apply a non-unity theoretical density (in g/cm^3). """ return Material.pseudoDensity(self, Tk=Tk, Tc=Tc) * self.getTD() class FuelMaterial(Material): """ Material that is considered a nuclear fuel. All this really does is enable the special class 1/class 2 isotopics input option. """ class1_wt_frac = None class1_custom_isotopics = None class2_custom_isotopics = None def applyInputParams( self, class1_custom_isotopics=None, class2_custom_isotopics=None, class1_wt_frac=None, customIsotopics=None, ): """Apply optional class 1/class 2 custom enrichment input. Notes ----- This is often overridden to insert customized material modification parameters but then this parent should always be called at the end in case users want to use this style of custom input. This is only applied to materials considered fuel so we don't apply these kinds of parameters to coolants and structural material, which are often not parameterized with any kind of enrichment. """ if class1_wt_frac: if not 0 <= class1_wt_frac <= 1: raise ValueError( f"class1_wt_frac must be between 0 and 1 (inclusive). Right now it is {class1_wt_frac}." ) validIsotopics = customIsotopics.keys() errMsg = "{} '{}' not found in the defined custom isotopics." if class1_custom_isotopics not in validIsotopics: raise KeyError(errMsg.format("class1_custom_isotopics", class1_custom_isotopics)) if class2_custom_isotopics not in validIsotopics: raise KeyError(errMsg.format("class2_custom_isotopics", class2_custom_isotopics)) if class1_custom_isotopics == class2_custom_isotopics: runLog.warning( "The custom isotopics specified for the class1/class2 materials are both " f"'{class1_custom_isotopics}'. You are not actually blending anything!" ) self.class1_wt_frac = class1_wt_frac self.class1_custom_isotopics = class1_custom_isotopics self.class2_custom_isotopics = class2_custom_isotopics self._applyIsotopicsMixFromCustomIsotopicsInput(customIsotopics) def _applyIsotopicsMixFromCustomIsotopicsInput(self, customIsotopics): """ Apply a Class 1/Class 2 mixture of custom isotopics at input. Only adjust heavy metal. This may also be needed for building charge assemblies during reprocessing, but will take input from the SFP rather than from the input external feeds. """ class1Isotopics = customIsotopics[self.class1_custom_isotopics] class2Isotopics = customIsotopics[self.class2_custom_isotopics] densityTools.applyIsotopicsMix(self, class1Isotopics, class2Isotopics) def duplicate(self): """Copy without needing a deepcopy.""" m = self.__class__() m.massFrac = {} for key, val in self.massFrac.items(): m.massFrac[key] = val m.parent = self.parent m.refDens = self.refDens m.theoreticalDensityFrac = self.theoreticalDensityFrac m.class1_wt_frac = self.class1_wt_frac m.class1_custom_isotopics = self.class1_custom_isotopics m.class2_custom_isotopics = self.class2_custom_isotopics return m ================================================ FILE: armi/materials/mgO.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Magnesium Oxide. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import Material from armi.utils.units import getTc, getTk class MgO(Material): """MagnesiumOxide.""" propertyValidTemperature = { "density": ((273, 1273), "K"), "linear expansion percent": ((273.15, 1273.15), "K"), } def __init__(self): Material.__init__(self) """Same reference as linear expansion. Table II. Reference density is from Wolfram Alpha At STP (273 K) """ self.refDens = 3.58 def setDefaultMassFracs(self): """Mass fractions.""" self.setMassFrac("MG", 0.603035897) self.setMassFrac("O16", 0.396964103) def linearExpansionPercent(self, Tk=None, Tc=None): """The coefficient of expansion of magnesium oxide. Milo A. Durand Journal of Applied Physics 7, 297 (1936); doi: 10.1063/1.174539 This is based on a 3rd order polynomial fit of the data in Table I. """ Tc = getTc(Tc, Tk) Tk = getTk(Tc=Tc) self.checkPropertyTempRange("linear expansion percent", Tk) return 1.0489e-5 * Tc + 6.0458e-9 * Tc**2 - 2.6875e-12 * Tc**3 ================================================ FILE: armi/materials/mixture.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Homogenized mixture material.""" from armi import materials class _Mixture(materials.Material): """ Homogenized mixture of materials. :meta public: .. warning:: This class is meant to be used for homogenized block models for neutronics and other physics solvers. Notes ----- This material class can be used to represent a homognized mixture of materials within a block. This would be done for performance reasons. It allows ARMI to avoid copying and carrying around the detailed, explicit representation of components within a block to be used in a physics solver when that solver only needs to know the homogenized number density within a block. See Also -------- armi.reactor.blocks.HexBlock.createHomogenizedCopy """ ================================================ FILE: armi/materials/molybdenum.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Molybdenum. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import SimpleSolid class Molybdenum(SimpleSolid): def setDefaultMassFracs(self): """Moly mass fractions.""" self.setMassFrac("MO", 1.0) def density(self, Tk=None, Tc=None): return 10.28 # g/cc ================================================ FILE: armi/materials/mox.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Mixed-oxide (MOX) ceramic fuel. A definitive source for these properties is [#ornltm20002]_. .. [#ornltm20002] Thermophysical Properties of MOX and UO2 Fuels Including the Effects of Irradiation. S.G. Popov, et.al. Oak Ridge National Laboratory. ORNL/TM-2000/351 https://rsicc.ornl.gov/fmdp/tm2000-351.pdf The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi import runLog from armi.materials import material from armi.materials.uraniumOxide import UraniumOxide from armi.nucDirectory import nucDir class MOX(UraniumOxide): """ MOX fuel. Some parameters (density, thermal conductivity, etc) are inherited from UraniumOxide. These parameters are sufficiently equivalent to pure UO2 in the literature to leave them unchanged. Specific MOX mixtures may be defined in blueprints under custom isotopics. """ enrichedNuclide = "U235" def __init__(self): UraniumOxide.__init__(self) def applyInputParams(self, U235_wt_frac=None, TD_frac=None, mass_frac_PU02=None, *args, **kwargs): if U235_wt_frac is not None: self.adjustMassEnrichment(U235_wt_frac) td = TD_frac if td is not None: if td > 1.0: runLog.warning( "Theoretical density frac for {0} is {1}, which is >1".format(self, td), single=True, label="Large theoretical density", ) elif td == 0: runLog.warning( "Theoretical density frac for {self} is zero!", single=True, label="Zero theoretical density", ) self.adjustTD(td) if mass_frac_PU02 is not None: self.setMassFracPuO2(mass_frac_PU02) material.FuelMaterial.applyInputParams(self, *args, **kwargs) def getMassFracPuO2(self): massFracPu = sum([self.getMassFrac(n) for n in nucDir.getNuclideNames(elementSymbol="PU")]) massFracU = sum([self.getMassFrac(n) for n in nucDir.getNuclideNames(elementSymbol="U")]) return massFracPu / (massFracPu + massFracU) def setMassFracPuO2(self, massFracPuO2): massFracPu = sum([self.getMassFrac(n) for n in nucDir.getNuclideNames(elementSymbol="PU")]) massFracU = sum([self.getMassFrac(n) for n in nucDir.getNuclideNames(elementSymbol="U")]) total = massFracU + massFracPu for Pu in nucDir.getNuclideNames("PU"): self.setMassFrac(Pu, self.getMassFrac(Pu) / massFracPu * massFracPuO2 * total) for U in nucDir.getNuclideNames("PU"): self.setMassFrac(U, self.getMassFrac(U) / massFracU * (1 - massFracPuO2) * total) def getMolFracPuO2(self): molweightUO2 = 270.02771 # Approximation, does not include variance due to isotopes molweightPuO2 = 275.9988 # Approximation, does not include variance due to isotopes massFracPuO2 = self.getMassFracPuO2() massFracUO2 = 1 - massFracPuO2 return massFracPuO2 * molweightUO2 / massFracUO2 / molweightPuO2 def setDefaultMassFracs(self): r"""UO2 + PuO2 mixture mass fractions. Pu238: 238.0495599 g/mol Pu239: 239.0521634 g/mol Pu240: 240.0538135 g/mol Pu241: 241.0568515 g/mol Pu242: 242.0587426 g/mol Am241: 241.0568291 g/mol U-235: 235.0439299 g/mol U-238: 238.0507882 g/mol Oxygen: 15.9994 g/mol JOYO MOX mass fraction calculation: Pu mixture: 0.1% Pu238 + 76.82% Pu239 + 19.23% Pu240 + 2.66% Pu241 + 0.55% Pu242 + 0.64% Am241 Pu atomic mass: 239.326469 g/mol U mixture: 22.99% U-235 + 77.01% U-238 U atomic mass: 237.359511 g/mol UPu mixture: 17.7% Pu mixture + 82.3% U mixture UPu atomic mass: 237.70766 g/mol 2 moles of oxygen/1 mole of UPu grams of UPu = 237.70766 g/mol* 1 mol = 237.70766 g grams of oxygen= 15.9994 g/mol * 2 mol = 31.9988 g total= 269.70646 g. Mass fraction UPu : 237.70766/269.70646 = 0.881357 Mass fraction Pu mixture: 0.177*237.70766/269.70646 = 0.156000 Mass fraction U mixture: 0.823*237.70766/269.70646 = 0.725356 Mass fraction Pu238: 0.001*42.074256/269.70646 = 0.000156 Mass fraction Pu239: 0.7682*42.074256/269.70646 = 0.119839 Mass fraction Pu240: 0.1923*42.074256/269.70646 = 0.029999 Mass fraction Pu241: 0.0266*42.074256/269.70646 = 0.004150 Mass fraction Pu242: 0.0055*42.074256/269.70646 = 0.000858 Mass fraction Am241: 0.0064*42.074256/269.70646 = 0.000998 Mass fraction U-235: 0.2299*195.633404/269.70646 = 0.166759 Mass fraction U-238: 0.7701*195.633404/269.70646 = 0.558597 Mass fraction O: 31.9988/269.70646 = 0.118643 """ self.setMassFrac("PU238", 0.000156) self.setMassFrac("PU239", 0.119839) self.setMassFrac("PU240", 0.029999) self.setMassFrac("PU241", 0.004150) self.setMassFrac("PU242", 0.000858) self.setMassFrac("AM241", 0.000998) self.setMassFrac("U235", 0.166759) self.setMassFrac("U238", 0.558597) self.setMassFrac("O16", 0.118643) def meltingPoint(self): """ Melting point in K - ORNL/TM-2000/351. Melting point is a function of PuO2 mol fraction. The liquidus Tl and solidus Ts temperatures in K are given by: Tl(y) = 3120.0 - 388.1*y - 30.4*y^2 Ts(y) = 3120.0 - 655.3*y + 336.4*y^2 - 99.9*y^3 where y is the mole fraction of PuO2 This function returns the solidus temperature. Does not take into account changes in the melting temp due to burnup. """ molFracPuO2 = self.getMolFracPuO2() return 3120.0 - 655.3 * molFracPuO2 + 336.4 * molFracPuO2**2 - 99.9 * molFracPuO2**3 ================================================ FILE: armi/materials/nZ.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Niobium Zirconium Alloy. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import SimpleSolid class NZ(SimpleSolid): def setDefaultMassFracs(self): self.setMassFrac("NB93", 0.99) self.setMassFrac("ZR", 0.01) def density(self, Tk=None, Tc=None): return 8.66 # g/cc ================================================ FILE: armi/materials/potassium.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Potassium. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials import material from armi.utils.units import getTc, getTk class Potassium(material.Fluid): """ Molten pure Potassium. From Foust, O.J. Sodium-NaK Engineering Handbook Vol. 1. New York: Gordon and Breach, 1972. """ propertyValidTemperature = {"density": ((63.2, 1250), "C")} def pseudoDensity(self, Tk=None, Tc=None): r""" Calculates the density of molten Potassium in g/cc. From Foust, O.J. Sodium-NaK Engineering Handbook Vol. 1. New York: Gordon and Breach, 1972. Page 18. Notes ----- In ARMI, we define pseudoDensity() and density() as the same for Fluids. """ Tc = getTc(Tc, Tk) Tk = getTk(Tc=Tc) self.checkPropertyTempRange("density", Tc) return 0.8415 - 2.172e-4 * Tc - 2.70e-8 * Tc**2 + 4.77e-12 * Tc**3 ================================================ FILE: armi/materials/scandiumOxide.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Scandium Oxide. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import Material from armi.utils.units import getTk class Sc2O3(Material): propertyValidTemperature = {"linear expansion percent": ((273.15, 1573.15), "K")} def __init__(self): Material.__init__(self) """ https://en.wikipedia.org/wiki/Scandium_oxide """ self.refDens = 3.86 def setDefaultMassFracs(self): self.setMassFrac("SC45", 0.6520) self.setMassFrac("O16", 0.3480) def linearExpansionPercent(self, Tk=None, Tc=None): """ Return the linear expansion percent for Scandium Oxide (Scandia). Notes ----- From Table 4 of "Thermal Expansion and Phase Inversion of Rare-Earth Oxides. """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion percent", Tk) return 2.6045e-07 * Tk**2 + 4.6374e-04 * Tk - 1.4696e-01 ================================================ FILE: armi/materials/siC.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Silicon Carbide. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ import math from armi.materials.material import Material from armi.nucDirectory import thermalScattering as tsl from armi.utils.units import getTc class SiC(Material): """Silicon Carbide.""" thermalScatteringLaws = (tsl.fromNameAndCompound("C", tsl.SIC), tsl.fromNameAndCompound("SI", tsl.SIC)) references = { "heat capacity": ["Munro, Material Properties of a-SiC, J. Phys. Chem. Ref. Data, Vol. 26, No. 5, 1997"], "cumulative linear expansion": [ "Munro, Material Properties of a-SiC, J. Phys. Chem. Ref. Data, Vol. 26, No. 5, 1997" ], "density": ["Munro, Material Properties of a-SiC, J. Phys. Chem. Ref. Data, Vol. 26, No. 5, 1997"], "thermal conductivity": ["Munro, Material Properties of a-SiC, J. Phys. Chem. Ref. Data, Vol. 26, No. 5, 1997"], } propertyEquation = { "heat capacity": "1110 + 0.15*Tc - 425*math.exp(-0.003*Tc)", "cumulative linear expansion": "(4.22 + 8.33E-4*Tc-3.51*math.exp(-0.00527*Tc))*1.0E-6", "density": "(rho0*(1 + cA*(Tc - Tc0))**(-3))*1.0E3", "thermal conductivity": "(52000*math.exp(-1.24E-5*Tc))/(Tc+437)", } propertyUnits = { "melting point": "K", "heat capacity": "J kg^-1 K^-1", "cumulative linear expansion": "K^-1", "density": "kg m^-3", "thermal conductivity": "W m^-1 K^-1", } propertyNotes = {} propertyValidTemperature = { "cumulative linear expansion": ((0, 1500), "C"), "density": ((0, 1500), "C"), "heat capacity": ((0, 2000), "C"), "thermal conductivity": ((0, 2000), "C"), } refTempK = 298.15 def setDefaultMassFracs(self): self.setMassFrac("C", 0.299547726) self.setMassFrac("SI", 0.700452274) self.refDens = 3.21 def meltingPoint(self): return 3003.0 def heatCapacity(self, Tc=None, Tk=None): Tc = getTc(Tc, Tk) self.checkPropertyTempRange("heat capacity", Tc) return 1110 + 0.15 * Tc - 425 * math.exp(-0.003 * Tc) def cumulativeLinearExpansion(self, Tk=None, Tc=None): Tc = getTc(Tc, Tk) self.checkPropertyTempRange("cumulative linear expansion", Tc) return (4.22 + 8.33e-4 * Tc - 3.51 * math.exp(-0.00527 * Tc)) * 1.0e-6 def pseudoDensity(self, Tc=None, Tk=None): Tc = getTc(Tc, Tk) self.checkPropertyTempRange("density", Tc) rho0 = 3.16 Tc0 = 0.0 cA = self.cumulativeLinearExpansion(Tc=Tc) return rho0 * (1 + cA * (Tc - Tc0)) ** (-3) def thermalConductivity(self, Tc=None, Tk=None): Tc = getTc(Tc, Tk) self.checkPropertyTempRange("thermal conductivity", Tc) return (52000 * math.exp(-1.24e-5 * Tc)) / (Tc + 437) ================================================ FILE: armi/materials/sodium.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Simple sodium material. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi import runLog from armi.materials import material from armi.utils.units import getTc, getTk class Sodium(material.Fluid): """ Simplified sodium material. .. warning:: This is an academic-quality material. Bring in user-provided material properties through plugins as necessary. Most info from [ANL-RE-95-2]_ .. [ANL-RE-95-2] Fink, J.K., and Leibowitz, L. Thermodynamic and transport properties of sodium liquid and vapor. United States: N. p., 1995. Web. doi:10.2172/94649. https://www.osti.gov/biblio/94649-gXNdLI/webviewable/ """ propertyValidTemperature = { "density": ((97.85, 2230.55), "C"), "enthalpy": ((371.0, 2000.0), "K"), "thermal conductivity": ((371.5, 1500), "K"), } def setDefaultMassFracs(self): """It's just sodium.""" self.setMassFrac("NA", 1.0) self.refDens = 0.968 def pseudoDensity(self, Tk=None, Tc=None): """ Returns density of Sodium in g/cc. This is from 1.3.1 in [ANL-RE-95-2]_. Parameters ---------- Tk : float, optional temperature in degrees Kelvin Tc : float, optional temperature in degrees Celsius Returns ------- density : float mass density in g/cc """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("density", Tc) if (Tc is not None) and (Tc <= 97.72): runLog.warning( "Sodium frozen at Tc: {0}".format(Tc), label="Sodium frozen at Tc={0}".format(Tc), single=True, ) critDens = 219 # critical density f = 275.32 # g = 511.58 h = 0.5 Tcrit = 2503.7 # critical temperature return ( critDens + f * (1 - (Tc + 273.15) / Tcrit) + g * (1 - (Tc + 273.15) / Tcrit) ** h ) / 1000.0 # convert from kg/m^3 to g/cc. def specificVolumeLiquid(self, Tk=None, Tc=None): """Returns the liquid specific volume in m^3/kg of this material given Tk in K or Tc in C.""" return 1 / (1000.0 * self.pseudoDensity(Tk, Tc)) def enthalpy(self, Tk=None, Tc=None): """ Return enthalpy in J/kg. From [ANL-RE-95-2]_, Table 1.1-2. """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("enthalpy", Tk) enthalpy = -365.77 + 1.6582 * Tk - 4.2395e-4 * Tk**2 + 1.4847e-7 * Tk**3 + 2992.6 / Tk enthalpy = enthalpy * 1000 # convert from kJ/kg to kJ/kg return enthalpy def thermalConductivity(self, Tk=None, Tc=None): """ Returns thermal conductivity of Sodium. From [ANL-RE-95-2]_, Table 2.1-2 Parameters ---------- Tk : float, optional temperature in degrees Kelvin Tc : float, optional temperature in degrees Celsius Returns ------- thermalConductivity : float thermal conductivity of Sodium (W/m-K) """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("thermal conductivity", Tk) thermalConductivity = 124.67 - 0.11381 * Tk + 5.5226e-5 * Tk**2 - 1.1842e-8 * Tk**3 return thermalConductivity ================================================ FILE: armi/materials/sodiumChloride.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Sodium Chloride salt. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. Notes ----- This is a very simple description of this material. """ from armi.materials.material import SimpleSolid from armi.utils.units import getTk class NaCl(SimpleSolid): def setDefaultMassFracs(self): self.setMassFrac("NA23", 0.3934) self.setMassFrac("CL35", 0.4596) self.setMassFrac("CL37", 0.1470) def density(self, Tk=None, Tc=None): """ Return the density of Sodium Chloride. Notes ----- From equation 10 of Thermophysical Properties of NaCl NaBr and NaF by y-ray attenuation technique """ Tk = getTk(Tc, Tk) return -3.130e-04 * Tk + 2.23 ================================================ FILE: armi/materials/sulfur.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Sulfur. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi import runLog from armi.materials import material from armi.utils.mathematics import linearInterpolation from armi.utils.units import getTk class Sulfur(material.Fluid): propertyValidTemperature = { "density": ((334, 430), "K"), "volumetric expansion": ((334, 430), "K"), } def applyInputParams(self, sulfur_density_frac=None, TD_frac=None): if sulfur_density_frac is not None: runLog.warning( "The 'sulfur_density_frac' material modification for Sulfur " "will be deprecated. Update your inputs to use 'TD_frac' instead.", single=True, ) if TD_frac is not None: runLog.warning( f"Both 'sulfur_density_frac' and 'TD_frac' are specified for {self}. 'TD_frac' will be used." ) else: self.updateTD(sulfur_density_frac) if TD_frac is not None: self.updateTD(TD_frac) def updateTD(self, TD): self.fullDensFrac = float(TD) def setDefaultMassFracs(self): """Mass fractions.""" self.fullDensFrac = 1.0 self.setMassFrac("S32", 0.9493) self.setMassFrac("S33", 0.0076) self.setMassFrac("S34", 0.0429) self.setMassFrac("S36", 0.002) def pseudoDensity(self, Tk=None, Tc=None): """Density of Liquid Sulfur. Ref: P. Espeau, R. Ceolin "density of molten sulfur in the 334-508K range" Notes ----- In ARMI, we define pseudoDensity() and density() as the same for Fluids. """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("density", Tk) return (2.18835 - 0.00098187 * Tk) * (self.fullDensFrac) def volumetricExpansion(self, Tk=None, Tc=None): """ This is just a two-point interpolation. P. Espeau, R. Ceolin "density of molten sulfur in the 334-508K range" """ Tk = getTk(Tc, Tk) (Tmin, Tmax) = self.propertyValidTemperature["volumetric expansion"][0] self.checkPropertyTempRange("volumetric expansion", Tk) return linearInterpolation(x0=334, y0=5.28e-4, x1=430, y1=5.56e-4, targetX=Tk) ================================================ FILE: armi/materials/tZM.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TZM. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from numpy import interp from armi.materials.material import Material from armi.utils.units import getTc class TZM(Material): propertyValidTemperature = {"linear expansion percent": ((21.11, 1382.22), "C")} references = { "linear expansion percent": "Report on the Mechanical and Thermal Properties of Tungsten \ and TZM Sheet Produced in the Refractory Metal Sheet Rolling Program, Part 1 to Bureau \ of Naval Weapons Contract No. N600(19)-59530, Southern Research Institute" } temperatureC = [ 21.11, 456.11, 574.44, 702.22, 840.56, 846.11, 948.89, 1023.89, 1146.11, 1287.78, 1382.22, ] percentThermalExpansion = [ 0, 1.60e-01, 2.03e-01, 2.53e-01, 3.03e-01, 3.03e-01, 3.42e-01, 3.66e-01, 4.21e-01, 4.68e-01, 5.04e-01, ] def __init__(self): Material.__init__(self) self.refDens = 10.16 def setDefaultMassFracs(self): self.setMassFrac("C", 2.50749e-05) self.setMassFrac("TI", 0.002502504) self.setMassFrac("ZR", 0.000761199) self.setMassFrac("MO", 0.996711222) def linearExpansionPercent(self, Tk=None, Tc=None): """ Return linear expansion in %dL/L from interpolation of tabular data. This function is used to expand a material from its reference temperature (21C) to a particular hot temperature. Parameters ---------- Tk : float temperature in K Tc : float temperature in C Source: Report on the Mechanical and Thermal Properties of Tungsten and TZM Sheet Produced in the Refractory Metal Sheet Rolling Program, Part 1 to Bureau of Naval Weapons Contract No. N600(19)-59530, 1966 Southern Research Institute. See Table viii-b, Appendix B, page 181. """ Tc = getTc(Tc, Tk) self.checkPropertyTempRange("linear expansion percent", Tc) return interp(Tc, self.temperatureC, self.percentThermalExpansion) ================================================ FILE: armi/materials/tantalum.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tantalum. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import SimpleSolid class Tantalum(SimpleSolid): def setDefaultMassFracs(self): self.setMassFrac("TA181", 1) def density(self, Tk=None, Tc=None): return 16.6 # g/cc ================================================ FILE: armi/materials/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/materials/tests/test__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module tests the __init__.py file since it has rather unique behavior.""" import unittest from armi import materials def betterSubClassCheck(item, superClass): try: return issubclass(item, superClass) except TypeError: return False class Materials__init__Tests(unittest.TestCase): def test_canAccessClassesFromPackage(self): klasses = [kk for _, kk in vars(materials).items() if betterSubClassCheck(kk, materials.material.Material)] self.assertGreater(len(klasses), 10) def test_packageClassesEqualModuleClasses(self): self.assertEqual(materials.UraniumOxide, materials.uraniumOxide.UraniumOxide) ================================================ FILE: armi/materials/tests/test_air.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for air materials.""" import math import unittest from armi.materials.air import Air from armi.utils import densityTools from armi.utils.units import getTc """ Reference thermal physical properties from Table A.4 in Incropera, Frank P., et al. Fundamentals of heat and mass transfer. Vol. 5. New York: Wiley, 2002. """ REFERENCE_Tk = [ 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 3000, ] REFERENCE_DENSITY_KG_PER_M3 = [ 3.5562, 2.3364, 1.7458, 1.3947, 1.1614, 0.995, 0.8711, 0.774, 0.6964, 0.6329, 0.5804, 0.5356, 0.4972, 0.4643, 0.4354, 0.4097, 0.3868, 0.3666, 0.3482, 0.3166, 0.2902, 0.2679, 0.2488, 0.2322, 0.2177, 0.2049, 0.1935, 0.1833, 0.1741, 0.1658, 0.1582, 0.1513, 0.1448, 0.1389, 0.1135, ] REFERENCE_HEAT_CAPACITY_kJ_PER_KG_K = [ 1.032, 1.012, 1.007, 1.006, 1.007, 1.009, 1.014, 1.021, 1.03, 1.04, 1.051, 1.063, 1.075, 1.087, 1.099, 1.11, 1.121, 1.131, 1.141, 1.159, 1.175, 1.189, 1.207, 1.23, 1.248, 1.267, 1.286, 1.307, 1.337, 1.372, 1.417, 1.478, 1.558, 1.665, 2.726, ] REFERENCE_THERMAL_CONDUCTIVITY_mJ_PER_M_K = [ 9.34, 13.8, 18.1, 22.3, 26.3, 30, 33.8, 37.3, 40.7, 43.9, 46.9, 49.7, 52.4, 54.9, 57.3, 59.6, 62, 64.3, 66.7, 71.5, 76.3, 82, 91, 100, 106, 113, 120, 128, 137, 147, 160, 175, 196, 222, ] class TestAir(unittest.TestCase): """unit tests for air materials. .. test:: There is a base class for fluid materials. :id: T_ARMI_MAT_FLUID1 :tests: R_ARMI_MAT_FLUID """ def test_pseudoDensity(self): """ Reproduce verification results at 300K from Incropera, Frank P., et al. Fundamentals of heat and mass transfer. Vol. 5. New York: Wiley, 2002. """ air = Air() for Tk, densKgPerM3 in zip(REFERENCE_Tk, REFERENCE_DENSITY_KG_PER_M3): if Tk < 2400: error = math.fabs((air.pseudoDensityKgM3(Tk=Tk) - densKgPerM3) / densKgPerM3) self.assertLess(error, 1e-2) error = math.fabs((air.pseudoDensityKgM3(Tc=getTc(Tk=Tk)) - densKgPerM3) / densKgPerM3) self.assertLess(error, 1e-2) def test_heatCapacity(self): """ Reproduce verification results at 300K from Incropera, Frank P., et al. Fundamentals of heat and mass transfer. Vol. 5. New York: Wiley, 2002. """ air = Air() for Tk, heatCapacity in zip(REFERENCE_Tk, REFERENCE_HEAT_CAPACITY_kJ_PER_KG_K): if Tk < 1300: error = math.fabs((air.heatCapacity(Tk=Tk) - heatCapacity * 1e3) / (heatCapacity * 1e3)) self.assertLess(error, 1e-2) def test_thermalConductivity(self): """ Reproduce verification results at 300K from Incropera, Frank P., et al. Fundamentals of heat and mass transfer. Vol. 5. New York: Wiley, 2002. """ air = Air() for Tk, thermalConductivity in zip(REFERENCE_Tk, REFERENCE_THERMAL_CONDUCTIVITY_mJ_PER_M_K): if Tk > 200 and Tk < 850: error = math.fabs( (air.thermalConductivity(Tk=Tk) - thermalConductivity * 1e-3) / (thermalConductivity * 1e-3) ) self.assertLess(error, 1e-2) def test_massFrac(self): """Reproduce the number ratios results to PNNL-15870 Rev 1.""" air = Air() refC = 0.000150 refN = 0.784431 refO = 0.210748 refAR = 0.004671 nuclides, nDens = densityTools.getNDensFromMasses(air.pseudoDensity(Tk=300), air.massFrac) diff = 1e-4 error = abs(nDens[0] / sum(nDens) - refC) self.assertLess(error, diff) error = abs(nDens[1] / sum(nDens) - refN) self.assertLess(error, diff) error = abs(nDens[2] / sum(nDens) - refO) self.assertLess(error, diff) error = abs(nDens[3] / sum(nDens) - refAR) self.assertLess(error, diff) self.assertEqual(nuclides[0].decode(), "C") self.assertEqual(nuclides[1].decode(), "N") self.assertEqual(nuclides[2].decode(), "O") self.assertEqual(nuclides[3].decode(), "AR") def test_validRanges(self): air = Air() den0 = air.density(Tk=101) denf = air.density(Tk=2399) self.assertLess(denf, den0) hc0 = air.heatCapacity(Tk=101) hcf = air.heatCapacity(Tk=1299) self.assertGreater(hcf, hc0) tc0 = air.thermalConductivity(Tk=201) tcf = air.thermalConductivity(Tk=849) self.assertGreater(tcf, tc0) ================================================ FILE: armi/materials/tests/test_b4c.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for boron carbide.""" import unittest from armi.materials.b4c import B4C from armi.materials.tests.test_materials import AbstractMaterialTest class B4C_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = B4C def setUp(self): AbstractMaterialTest.setUp(self) self.mat = B4C() self.B4C_theoretical_density = B4C() self.B4C_theoretical_density.applyInputParams(theoretical_density=0.5) self.B4C_TD_frac = B4C() self.B4C_TD_frac.applyInputParams(TD_frac=0.4) self.B4C_both = B4C() self.B4C_both.applyInputParams(theoretical_density=0.5, TD_frac=0.4) def test_theoretical_pseudoDensity(self): ref = self.mat.pseudoDensity(500) reduced = self.B4C_theoretical_density.pseudoDensity(500) self.assertAlmostEqual(ref * 0.5 / B4C.DEFAULT_THEORETICAL_DENSITY_FRAC, reduced) reduced = self.B4C_TD_frac.pseudoDensity(500) self.assertAlmostEqual(ref * 0.4 / B4C.DEFAULT_THEORETICAL_DENSITY_FRAC, reduced) reduced = self.B4C_both.pseudoDensity(500) self.assertAlmostEqual(ref * 0.4 / B4C.DEFAULT_THEORETICAL_DENSITY_FRAC, reduced) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) def test_variousEdgeCases(self): with self.assertRaises(ValueError): self.mat.setNewMassFracsFromMassEnrich(-0.001) with self.assertRaises(ValueError): self.mat.setNewMassFracsFromMassEnrich(1.001) ================================================ FILE: armi/materials/tests/test_be9.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit test for Beryllium.""" import unittest from armi.materials.be9 import Be9 from armi.materials.tests import test_materials class TestBe9(test_materials.AbstractMaterialTest, unittest.TestCase): """Beryllium tests.""" MAT_CLASS = Be9 def test_pseudoDensity(self): cur = self.mat.pseudoDensity(Tc=25) ref = 1.85 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) ================================================ FILE: armi/materials/tests/test_fluids.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for fluid-specific behaviors. The ARMI framework has a lot of thermal expansion machinery that applies to all components but doesn't make sense for fluids. The tests here help show fluid materials still play nice with the rest of the framework. """ from unittest import TestCase from armi.materials.material import Fluid, Material from armi.reactor.components import Circle from armi.tests import mockRunLogs class TestFluids(TestCase): class MyFluid(Fluid): """Stand-in fluid that doesn't provide lots of functionality.""" class MySolid(Material): """Stand-in solid that doesn't provide lots of functionality.""" def test_fluidDensityWrapperNoWarning(self): """Test that Component.material.density does not raise a warning for fluids. The ARMI Framework contains a mechanism to warn users if they ask for the density of a material attached to a component. But the component is the source of truth for volume and composition. And can be thermally expanded during operation. Much of the framework operates on ``Component.density`` and other ``Component`` methods for mass accounting. However, ``comp.material.density`` does not know about the new composition or volumes and can diverge from ``component.density``. Additionally, the framework does not do any thermal expansion on fluids. So the above calls to ``component.material.density`` are warranted for fluids. """ self._checkCompDensityLogs( mat=self.MySolid(), nExpectedWarnings=1, msg="Solids should have the density warning logged.", ) self._checkCompDensityLogs( mat=self.MyFluid(), nExpectedWarnings=0, msg="Fluids should not have the density warning logged.", ) def _checkCompDensityLogs(self, mat: Material, nExpectedWarnings: int, msg: str): comp = Circle(name="test", material=mat, Tinput=20, Thot=20, id=0, od=1, mult=1) with mockRunLogs.LogCounter() as logs: comp.material.density(Tc=comp.temperatureInC) self.assertEqual(logs.messageCounts["warning"], nExpectedWarnings, msg=msg) ================================================ FILE: armi/materials/tests/test_graphite.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for graphite material.""" import math import unittest from armi.materials.graphite import Graphite class Graphite_TestCase(unittest.TestCase): MAT_CLASS = Graphite def setUp(self): self.mat = self.MAT_CLASS() def test_linearExpansionPercent(self): accuracy = 2 cur = self.mat.linearExpansionPercent(330) ref = 0.013186 self.assertAlmostEqual(cur, ref, accuracy) cur = self.mat.linearExpansionPercent(1500) ref = 0.748161 self.assertAlmostEqual(cur, ref, accuracy) cur = self.mat.linearExpansionPercent(3000) ref = 2.149009 self.assertAlmostEqual(cur, ref, accuracy) def test_propertyValidTemperature(self): self.assertEqual(len(self.mat.propertyValidTemperature), 0) def test_density(self): """Test to reproduce density measurements results in table 2 from [INL-EXT-16-38241].""" uncertainty = 0.01 for Tc, ref_rho in [ # sample G-348-1 (22.6, 1.8885), (401.6, 1.8772), (801.3, 1.8634), # sample G-348-2 (23.5, 1.9001), (401.0, 1.8888), (800.9, 1.8748), ]: test_rho = self.mat.density(Tc=Tc) error = math.fabs((ref_rho - test_rho) / ref_rho) self.assertLess(error, uncertainty) ================================================ FILE: armi/materials/tests/test_lithium.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for lithium.""" import unittest from armi.materials.lithium import Lithium from armi.materials.tests.test_materials import AbstractMaterialTest class Lithium_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = Lithium def setUp(self): AbstractMaterialTest.setUp(self) self.mat = Lithium() self.Lithium_LI_wt_frac = Lithium() self.Lithium_LI_wt_frac.applyInputParams(LI6_wt_frac=0.5) self.Lithium_LI6_wt_frac = Lithium() self.Lithium_LI6_wt_frac.applyInputParams(LI6_wt_frac=0.6) self.Lithium_both = Lithium() self.Lithium_both.applyInputParams(LI6_wt_frac=0.8) def test_Lithium_material_modifications(self): self.assertEqual(self.mat.getMassFrac("LI6"), 0.0759) self.assertAlmostEqual(self.Lithium_LI_wt_frac.getMassFrac("LI6"), 0.5, places=10) self.assertAlmostEqual(self.Lithium_LI6_wt_frac.getMassFrac("LI6"), 0.6, places=10) self.assertAlmostEqual(self.Lithium_both.getMassFrac("LI6"), 0.8, places=10) def test_pseudoDensity(self): ref = self.mat.pseudoDensity(Tc=100) self.assertAlmostEqual(ref, 0.512, delta=abs(ref * 0.001)) ref = self.mat.pseudoDensity(Tc=200) self.assertAlmostEqual(ref, 0.512, delta=abs(ref * 0.001)) def test_meltingPoint(self): ref = self.mat.meltingPoint() cur = 453.69 self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001)) def test_boilingPoint(self): ref = self.mat.boilingPoint() cur = 1615.0 self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001)) def test_heatCapacity(self): ref = self.mat.heatCapacity(Tc=100) cur = 3570.0 self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001)) ref = self.mat.heatCapacity(Tc=200) cur = 3570.0 self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001)) def test_propertyValidTemperature(self): self.assertEqual(len(self.mat.propertyValidTemperature), 0) ================================================ FILE: armi/materials/tests/test_materials.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests materials.py.""" import math import pickle import unittest from copy import deepcopy from numpy import testing from armi import context, materials, settings from armi.materials import _MATERIAL_NAMESPACE_ORDER, setMaterialNamespaceOrder from armi.reactor import blueprints from armi.utils import units class AbstractMaterialTest: """Base for material tests.""" MAT_CLASS = None VALID_TEMP_K = 500 def setUp(self): self.mat = self.MAT_CLASS() def test_isPicklable(self): """Test that all materials are picklable so we can do MPI communication of state.""" stream = pickle.dumps(self.mat) mat = pickle.loads(stream) # check a property that is sometimes interpolated. self.assertEqual(self.mat.thermalConductivity(self.VALID_TEMP_K), mat.thermalConductivity(self.VALID_TEMP_K)) def test_density(self): """Test that all materials produce a non-zero density from density.""" self.assertNotEqual(self.mat.density(self.VALID_TEMP_K), 0) def test_TD(self): """Test the material density.""" self.assertEqual(self.mat.getTD(), self.mat.theoreticalDensityFrac) self.mat.clearCache() self.mat._setCache("dummy", 666) self.assertEqual(self.mat.cached, {"dummy": 666}) self.mat.adjustTD(0.5) self.assertEqual(0.5, self.mat.theoreticalDensityFrac) self.assertEqual(self.mat.cached, {}) def test_duplicate(self): """Test the material duplication.""" mat = self.mat.duplicate() self.assertEqual(len(mat.massFrac), len(self.mat.massFrac)) for key in self.mat.massFrac: self.assertEqual(mat.massFrac[key], self.mat.massFrac[key]) self.assertEqual(mat.parent, self.mat.parent) self.assertEqual(mat.refDens, self.mat.refDens) self.assertEqual(mat.theoreticalDensityFrac, self.mat.theoreticalDensityFrac) def test_cache(self): """Test the material cache.""" self.mat.clearCache() self.assertEqual(len(self.mat.cached), 0) self.mat._setCache("Emmy", "Noether") self.assertEqual(len(self.mat.cached), 1) val = self.mat._getCached("Emmy") self.assertEqual(val, "Noether") def test_densityKgM3(self): """Test the density for kg/m^3.""" dens = self.mat.density(self.VALID_TEMP_K) densKgM3 = self.mat.densityKgM3(self.VALID_TEMP_K) self.assertEqual(dens * 1000.0, densKgM3) def test_pseudoDensityKgM3(self): """Test the pseudo density for kg/m^3.""" dens = self.mat.pseudoDensity(self.VALID_TEMP_K) densKgM3 = self.mat.pseudoDensityKgM3(self.VALID_TEMP_K) self.assertEqual(dens * 1000.0, densKgM3) def test_wrappedDensity(self): """Test that the density decorator is applied to non-fluids.""" self.assertEqual( hasattr(self.mat.density, "__wrapped__"), not isinstance(self.mat, materials.Fluid), msg=self.mat, ) class MaterialConstructionTests(unittest.TestCase): def test_material_initialization(self): """Make sure all materials can be instantiated without error.""" for matClass in materials.iterAllMaterialClassesInNamespace(materials): matClass() class MaterialFindingTests(unittest.TestCase): """Make sure materials are discoverable as designed.""" def test_findMaterial(self): """Test resolveMaterialClassByName() function. .. test:: Materials can be grabbed from a list of namespaces. :id: T_ARMI_MAT_NAMESPACE0 :tests: R_ARMI_MAT_NAMESPACE """ self.assertIs( materials.resolveMaterialClassByName("Void", namespaceOrder=["armi.materials"]), materials.Void, ) self.assertIs( materials.resolveMaterialClassByName("Void", namespaceOrder=["armi.materials.void"]), materials.Void, ) self.assertIs( materials.resolveMaterialClassByName("Void", namespaceOrder=["armi.materials.mox", "armi.materials.void"]), materials.Void, ) with self.assertRaises(ModuleNotFoundError): materials.resolveMaterialClassByName("Void", namespaceOrder=["invalid.namespace", "armi.materials.void"]) with self.assertRaises(KeyError): materials.resolveMaterialClassByName("Unobtanium", namespaceOrder=["armi.materials"]) def __validateMaterialNamespace(self): """Helper method to validate the material namespace a little.""" self.assertTrue(isinstance(_MATERIAL_NAMESPACE_ORDER, list)) self.assertGreater(len(_MATERIAL_NAMESPACE_ORDER), 0) for nameSpace in _MATERIAL_NAMESPACE_ORDER: self.assertTrue(isinstance(nameSpace, str)) @unittest.skipUnless(context.MPI_RANK == 0, "test only on root node") def test_namespacing(self): """Test loading materials with different material namespaces, to cover how they work. .. test:: Material can be found in defined packages. :id: T_ARMI_MAT_NAMESPACE1 :tests: R_ARMI_MAT_NAMESPACE .. test:: Material namespaces register materials with an order of priority. :id: T_ARMI_MAT_ORDER :tests: R_ARMI_MAT_ORDER """ # let's do a quick test of getting a material from the default namespace setMaterialNamespaceOrder(["armi.materials"]) uraniumOxide = materials.resolveMaterialClassByName("UraniumOxide", namespaceOrder=["armi.materials"]) self.assertGreater(uraniumOxide().density(500), 0) # validate the default namespace in ARMI self.__validateMaterialNamespace() # show you can add a material namespace newMats = "armi.utils.tests.test_densityTools" setMaterialNamespaceOrder(["armi.materials", newMats]) self.__validateMaterialNamespace() # in the case of duplicate materials, show that the material namespace determines # which material is chosen uraniumOxideTest = materials.resolveMaterialClassByName( "UraniumOxide", namespaceOrder=[newMats, "armi.materials"] ) for t in range(200, 600): self.assertEqual(uraniumOxideTest().density(t), 0) self.assertEqual(uraniumOxideTest().pseudoDensity(t), 0) # for safety, reset the material namespace list and order setMaterialNamespaceOrder(["armi.materials"]) class Californium_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Californium def test_pseudoDensity(self): ref = 15.1 cur = self.mat.pseudoDensity(923) self.assertEqual(cur, ref) cur = self.mat.pseudoDensity(1390) self.assertEqual(cur, ref) def test_propertyValidTemperature(self): self.assertEqual(len(self.mat.propertyValidTemperature), 0) def test_porosities(self): self.mat.parent = None self.assertEqual(self.mat.liquidPorosity, 0.0) self.assertEqual(self.mat.gasPorosity, 0.0) def test_getCorrosionRate(self): self.assertEqual(self.mat.getCorrosionRate(500), 0.0) class Cesium_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Cs def test_pseudoDensity(self): cur = self.mat.pseudoDensity(250) ref = 1.93 self.assertAlmostEqual(cur, ref, delta=ref * 0.05) cur = self.mat.pseudoDensity(450) ref = 1.843 self.assertAlmostEqual(cur, ref, delta=ref * 0.05) def test_propertyValidTemperature(self): self.assertEqual(len(self.mat.propertyValidTemperature), 0) class Magnesium_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Magnesium VALID_TEMP_K = 1000 def test_pseudoDensity(self): cur = self.mat.pseudoDensity(923) ref = 1.5897 delta = ref * 0.0001 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.pseudoDensity(1390) ref = 1.4661 delta = ref * 0.0001 self.assertAlmostEqual(cur, ref, delta=delta) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class MagnesiumOxide_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.MgO def test_pseudoDensity(self): cur = self.mat.pseudoDensity(923) ref = 3.48887 delta = ref * 0.05 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.pseudoDensity(1250) ref = 3.418434 delta = ref * 0.05 self.assertAlmostEqual(cur, ref, delta=delta) def test_linearExpansionPercent(self): cur = self.mat.linearExpansionPercent(Tc=100) ref = 0.00110667 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) cur = self.mat.linearExpansionPercent(Tc=400) ref = 0.0049909 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class Molybdenum_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Molybdenum def test_pseudoDensity(self): cur = self.mat.pseudoDensity(333) ref = 10.28 delta = ref * 0.0001 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.pseudoDensity(1390) ref = 10.28 delta = ref * 0.0001 self.assertAlmostEqual(cur, ref, delta=delta) def test_propertyValidTemperature(self): self.assertEqual(len(self.mat.propertyValidTemperature), 0) class MOX_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.MOX def test_density(self): cur = self.mat.density(333) ref = 10.926 delta = ref * 0.0001 self.assertAlmostEqual(cur, ref, delta=delta) def test_getMassFracPuO2(self): ref = 0.176067 self.assertAlmostEqual(self.mat.getMassFracPuO2(), ref, delta=ref * 0.001) def test_getMolFracPuO2(self): ref = 0.209 self.assertAlmostEqual(self.mat.getMolFracPuO2(), ref, delta=ref * 0.001) def test_getMeltingPoint(self): ref = 2996.788765 self.assertAlmostEqual(self.mat.meltingPoint(), ref, delta=ref * 0.001) def test_applyInputParams(self): massFracNameList = [ "AM241", "O16", "PU238", "PU239", "PU240", "PU241", "PU242", "U235", "U238", ] massFracRefValList = [ 0.000998, 0.118643, 0.000156, 0.119839, 0.029999, 0.00415, 0.000858, 0.166759, 0.558597, ] self.mat.applyInputParams() for name, frac in zip(massFracNameList, massFracRefValList): cur = self.mat.massFrac[name] self.assertEqual(cur, frac) # bonus code coverage for clearMassFrac() self.mat.clearMassFrac() self.assertEqual(len(self.mat.massFrac), 0) # bonus coverage for removeNucMassFrac self.mat.removeNucMassFrac("PassWithoutWarning") self.assertEqual(len(self.mat.massFrac), 0) class NaCl_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.NaCl def test_density(self): cur = self.mat.density(Tc=100) ref = 2.113204 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) cur = self.mat.density(Tc=300) ref = 2.050604 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) def test_propertyValidTemperature(self): self.assertEqual(len(self.mat.propertyValidTemperature), 0) class NiobiumZirconium_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.NZ def test_pseudoDensity(self): cur = self.mat.pseudoDensity(Tk=100) ref = 8.66 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) cur = self.mat.pseudoDensity(Tk=1390) ref = 8.66 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) def test_propertyValidTemperature(self): self.assertEqual(len(self.mat.propertyValidTemperature), 0) class Potassium_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Potassium def test_pseudoDensity(self): cur = self.mat.pseudoDensity(Tc=100) ref = 0.8195 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.pseudoDensity(Tc=333) ref = 0.7664 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.pseudoDensity(Tc=500) ref = 0.7267 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.pseudoDensity(Tc=750) ref = 0.6654 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.pseudoDensity(Tc=1200) ref = 0.5502 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class ScandiumOxide_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Sc2O3 def test_pseudoDensity(self): cur = self.mat.pseudoDensity(Tc=25) ref = 3.86 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) def test_linearExpansionPercent(self): cur = self.mat.linearExpansionPercent(Tc=100) ref = 0.0623499 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) cur = self.mat.linearExpansionPercent(Tc=400) ref = 0.28322 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class Sodium_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Sodium def test_pseudoDensity(self): cur = self.mat.pseudoDensity(372) ref = 0.92546 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.pseudoDensity(1700) ref = 0.597 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) def test_specificVolumeLiquid(self): cur = self.mat.specificVolumeLiquid(372) ref = 0.0010805 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.specificVolumeLiquid(1700) ref = 0.001674 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) def test_enthalpy(self): cur = self.mat.enthalpy(372) ref = 208100.1914 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.enthalpy(1700) ref = 1959147.963 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) def test_thermalConductivity(self): cur = self.mat.thermalConductivity(372) ref = 89.36546 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.thermalConductivity(1500) ref = 38.24675 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class Tantalum_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Tantalum def test_pseudoDensity(self): cur = self.mat.pseudoDensity(Tc=100) ref = 16.6 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) cur = self.mat.pseudoDensity(Tc=300) ref = 16.6 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) def test_propertyValidTemperature(self): self.assertEqual(len(self.mat.propertyValidTemperature), 0) class ThoriumUraniumMetal_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.ThU def test_pseudoDensity(self): cur = self.mat.pseudoDensity(Tc=100) ref = 11.68 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) cur = self.mat.pseudoDensity(Tc=300) ref = 11.68 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) def test_meltingPoint(self): cur = self.mat.meltingPoint() ref = 2025.0 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) def test_thermalConductivity(self): cur = self.mat.thermalConductivity(Tc=100) ref = 43.1 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) cur = self.mat.thermalConductivity(Tc=300) ref = 43.1 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) def test_linearExpansion(self): cur = self.mat.linearExpansion(Tc=100) ref = 11.9e-6 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) cur = self.mat.linearExpansion(Tc=300) ref = 11.9e-6 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) def test_propertyValidTemperature(self): self.assertEqual(len(self.mat.propertyValidTemperature), 1) class Uranium_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Uranium def test_applyInputParams(self): # check the defaults when applyInputParams is applied without arguments U235_wt_frac_default = 0.0071136523 self.mat.applyInputParams() self.assertAlmostEqual(self.mat.massFrac["U235"], U235_wt_frac_default) densityTemp = materials.Uranium._densityTableK[0] density0 = self.mat.density(Tk=materials.Uranium._densityTableK[0]) expectedDensity = materials.Uranium._densityTable[0] self.assertEqual(density0, expectedDensity) newWtFrac = 1.0 newTDFrac = 0.5 self.mat.applyInputParams(U235_wt_frac=newWtFrac, TD_frac=newTDFrac) self.assertEqual(self.mat.massFrac["U235"], newWtFrac) self.assertEqual(self.mat.density(Tk=densityTemp), expectedDensity * newTDFrac) self.assertAlmostEqual(self.mat.pseudoDensity(Tk=densityTemp), 9.415418593432646) def test_thermalConductivity(self): cur = self.mat.thermalConductivity(Tc=100) ref = 28.489312629207500293659904855 self.assertAlmostEqual(cur, ref, delta=10e-10) cur = self.mat.thermalConductivity(Tc=300) ref = 32.789271449207497255429188954 self.assertAlmostEqual(cur, ref, delta=10e-10) cur = self.mat.thermalConductivity(Tc=500) ref = 37.561790269207499193271360127 self.assertAlmostEqual(cur, ref, delta=10e-10) cur = self.mat.thermalConductivity(Tc=700) ref = 42.806869089207502554472739575 self.assertAlmostEqual(cur, ref, delta=10e-10) cur = self.mat.thermalConductivity(Tc=900) ref = 48.524507909207507339033327298 self.assertAlmostEqual(cur, ref, delta=10e-10) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) # ensure that material properties check the bounds and that the bounds # align with what is expected for propName, methodName in zip( [ "thermal conductivity", "heat capacity", "density", "linear expansion", "linear expansion percent", ], [ "thermalConductivity", "heatCapacity", "density", "linearExpansion", "linearExpansionPercent", ], ): lowerBound = self.mat.propertyValidTemperature[propName][0][0] upperBound = self.mat.propertyValidTemperature[propName][0][1] with self.assertRaises(ValueError): getattr(self.mat, methodName)(lowerBound - 1) with self.assertRaises(ValueError): getattr(self.mat, methodName)(upperBound + 1) def test_pseudoDensity(self): cur = self.mat.pseudoDensity(Tc=500) ref = 18.74504534852846 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) cur = self.mat.pseudoDensity(Tc=1000) ref = 18.1280492780791 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) class UraniumOxide_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.UraniumOxide def test_adjustMassEnrichment(self): o16 = 15.999304875697801 u235 = 235.043929425 u238 = 238.050788298 self.mat.adjustMassEnrichment(0.02) gPerMol = 2 * o16 + 0.02 * u235 + 0.98 * u238 massFracs = self.mat.massFrac testing.assert_allclose(massFracs["O"], 2 * o16 / gPerMol, rtol=5e-4) testing.assert_allclose(massFracs["U235"], 0.02 * (u235 * 0.02 + u238 * 0.98) / gPerMol, rtol=5e-4) testing.assert_allclose(massFracs["U238"], 0.98 * (u235 * 0.02 + u238 * 0.98) / gPerMol, rtol=5e-4) self.mat.adjustMassEnrichment(0.2) massFracs = self.mat.massFrac gPerMol = 2 * o16 + 0.8 * u238 + 0.2 * u235 testing.assert_allclose(massFracs["O"], 2 * o16 / gPerMol, rtol=5e-4) testing.assert_allclose(massFracs["U235"], 0.2 * (u235 * 0.2 + u238 * 0.8) / gPerMol, rtol=5e-4) testing.assert_allclose(massFracs["U238"], 0.8 * (u235 * 0.2 + u238 * 0.8) / gPerMol, rtol=5e-4) def test_meltingPoint(self): cur = self.mat.meltingPoint() self.assertEqual(cur, 3123.0) def test_density(self): # Reference data taken from ORNL/TM-2000/351. "Thermophysical Properties of MOX and UO2 # Fuels Including the Effects of Irradiation.", Popov, et al. Table 3.2 "Parameters of # thermal expansion of stoichiometric MOX fuel and density of UO2 as a function of # temperature" cur = self.mat.density(Tk=700) ref = 1.0832e4 * 0.001 # Convert to grams/cc delta = ref * 0.02 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.density(Tk=2600) ref = 9.9698e3 * 0.001 # Convert to grams/cc delta = ref * 0.02 self.assertAlmostEqual(cur, ref, delta=delta) def test_thermalConductivity(self): cur = self.mat.thermalConductivity(600) ref = 4.864 accuracy = 3 self.assertAlmostEqual(cur, ref, accuracy) cur = self.mat.thermalConductivity(1800) ref = 2.294 accuracy = 3 self.assertAlmostEqual(cur, ref, accuracy) cur = self.mat.thermalConductivity(2700) ref = 1.847 accuracy = 3 self.assertAlmostEqual(cur, ref, accuracy) def test_linearExpansion(self): cur = self.mat.linearExpansion(300) ref = 9.93e-6 accuracy = 2 self.assertAlmostEqual(cur, ref, accuracy) cur = self.mat.linearExpansion(1500) ref = 1.0639e-5 accuracy = 2 self.assertAlmostEqual(cur, ref, accuracy) cur = self.mat.linearExpansion(3000) ref = 1.5821e-5 accuracy = 2 self.assertAlmostEqual(cur, ref, accuracy) def test_linearExpansionPercent(self): cur = self.mat.linearExpansionPercent(Tk=500) ref = 0.222826 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) cur = self.mat.linearExpansionPercent(Tk=950) ref = 0.677347 self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001)) def test_heatCapacity(self): """Check against Figure 4.2 from ORNL 2000-1723 EFG.""" self.assertAlmostEqual(self.mat.heatCapacity(300), 230.0, delta=20) self.assertAlmostEqual(self.mat.heatCapacity(1000), 320.0, delta=20) self.assertAlmostEqual(self.mat.heatCapacity(2000), 380.0, delta=20) def test_getTemperatureAtDensity(self): expectedTemperature = 100.0 tAtTargetDensity = self.mat.getTemperatureAtDensity(self.mat.density(Tc=expectedTemperature), 30.0) self.assertAlmostEqual(expectedTemperature, tAtTargetDensity) def test_getDensityExpansion3D(self): expectedTemperature = 100.0 ref_density = 10.86792660463439e3 test_density = self.mat.densityKgM3(Tc=expectedTemperature) error = math.fabs((ref_density - test_density) / ref_density) self.assertLess(error, 0.005) def test_removeNucMassFrac(self): self.mat.removeNucMassFrac("O") massFracs = [str(k) for k in self.mat.massFrac.keys()] self.assertListEqual(["U235", "U238"], massFracs) def test_densityTimesHeatCapactiy(self): Tc = 500.0 expectedRhoCp = self.mat.density(Tc=Tc) * 1000.0 * self.mat.heatCapacity(Tc=Tc) self.assertAlmostEqual(expectedRhoCp, self.mat.densityTimesHeatCapacity(Tc=Tc)) def test_getTempChangeForDensityChange(self): Tc = 500.0 linearExpansion = self.mat.linearExpansion(Tc=Tc) densityFrac = 1.001 linearChange = densityFrac ** (-1.0 / 3.0) - 1.0 expectedDeltaT = linearChange / linearExpansion actualDeltaT = self.mat.getTempChangeForDensityChange(Tc, densityFrac, quiet=False) self.assertAlmostEqual(expectedDeltaT, actualDeltaT) def test_duplicate(self): """Test the material duplication. .. test:: Materials shall calc mass fracs at init. :id: T_ARMI_MAT_FRACS4 :tests: R_ARMI_MAT_FRACS """ duplicateU = self.mat.duplicate() for key in self.mat.massFrac: self.assertEqual(duplicateU.massFrac[key], self.mat.massFrac[key]) duplicateMassFrac = deepcopy(self.mat.massFrac) for key in self.mat.massFrac.keys(): self.assertEqual(duplicateMassFrac[key], self.mat.massFrac[key]) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) def test_applyInputParams(self): UO2_TD = materials.UraniumOxide() original = UO2_TD.density(500) UO2_TD.applyInputParams(TD_frac=0.1) new = UO2_TD.density(500) ratio = new / original self.assertAlmostEqual(ratio, 0.1) UO2_TD = materials.UraniumOxide() original = UO2_TD.pseudoDensity(500) UO2_TD.applyInputParams(TD_frac=0.1) new = UO2_TD.pseudoDensity(500) ratio = new / original self.assertAlmostEqual(ratio, 0.1) class Thorium_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Thorium def test_setDefaultMassFracs(self): """ Test default mass fractions. .. test:: The materials generate nuclide mass fractions. :id: T_ARMI_MAT_FRACS0 :tests: R_ARMI_MAT_FRACS """ self.mat.setDefaultMassFracs() cur = self.mat.massFrac ref = {"TH232": 1.0} self.assertEqual(cur, ref) def test_pseudoDensity(self): cur = self.mat.pseudoDensity(30) ref = 11.68 accuracy = 4 self.assertAlmostEqual(cur, ref, accuracy) def test_linearExpansion(self): cur = self.mat.linearExpansion(400) ref = 11.9e-6 accuracy = 4 self.assertAlmostEqual(cur, ref, accuracy) def test_thermalConductivity(self): cur = self.mat.thermalConductivity(400) ref = 43.1 accuracy = 4 self.assertAlmostEqual(cur, ref, accuracy) def test_meltingPoint(self): cur = self.mat.meltingPoint() ref = 2025.0 accuracy = 4 self.assertAlmostEqual(cur, ref, accuracy) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class ThoriumOxide_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.ThoriumOxide def test_density(self): cur = self.mat.density(Tc=25) ref = 10.00 accuracy = 4 self.assertAlmostEqual(cur, ref, accuracy) # make sure that material modifications are correctly applied self.mat.applyInputParams(TD_frac=0.1) cur = self.mat.density(Tc=25) self.assertAlmostEqual(cur, ref * 0.1, accuracy) def test_linearExpansion(self): cur = self.mat.linearExpansion(400) ref = 9.67e-6 accuracy = 4 self.assertAlmostEqual(cur, ref, accuracy) def test_thermalConductivity(self): cur = self.mat.thermalConductivity(400) ref = 6.20 accuracy = 4 self.assertAlmostEqual(cur, ref, accuracy) def test_meltingPoint(self): cur = self.mat.meltingPoint() ref = 3643.0 accuracy = 4 self.assertAlmostEqual(cur, ref, accuracy) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class Void_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Void def test_pseudoDensity(self): """This material has a no pseudo-density.""" self.mat.setDefaultMassFracs() cur = self.mat.pseudoDensity() self.assertEqual(cur, 0.0) def test_density(self): """This material has no density.""" self.assertEqual(self.mat.density(500), 0) self.mat.setDefaultMassFracs() cur = self.mat.density() self.assertEqual(cur, 0.0) def test_linearExpansion(self): """This material does not expand linearly.""" cur = self.mat.linearExpansion(400) ref = 0.0 self.assertEqual(cur, ref) def test_propertyValidTemperature(self): """This material has no valid temperatures.""" self.assertEqual(len(self.mat.propertyValidTemperature), 0) class Mixture_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials._Mixture def test_density(self): """This material has no density function.""" self.assertEqual(self.mat.density(500), 0) def test_setDefaultMassFracs(self): """ Test default mass fractions. .. test:: The materials generate nuclide mass fractions. :id: T_ARMI_MAT_FRACS1 :tests: R_ARMI_MAT_FRACS """ self.mat.setDefaultMassFracs() cur = self.mat.pseudoDensity(500) self.assertEqual(cur, 0.0) def test_linearExpansion(self): with self.assertRaises(NotImplementedError): _cur = self.mat.linearExpansion(400) def test_propertyValidTemperature(self): self.assertEqual(len(self.mat.propertyValidTemperature), 0) class Lead_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Lead VALID_TEMP_K = 600 def test_volumetricExpansion(self): self.assertAlmostEqual( self.mat.volumetricExpansion(800), 1.1472e-4, 4, msg="\n\nIncorrect Lead volumetricExpansion(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format( self.mat.volumetricExpansion(800), 1.1472e-4 ), ) self.assertAlmostEqual( self.mat.volumetricExpansion(1200), 1.20237e-4, 4, msg="\n\nIncorrect Lead volumetricExpansion(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format( self.mat.volumetricExpansion(1200), 1.20237e-4 ), ) def test_linearExpansion(self): """Unit tests for lead materials linear expansion. .. test:: Fluid materials do not linearly expand, at any temperature. :id: T_ARMI_MAT_FLUID2 :tests: R_ARMI_MAT_FLUID """ for t in range(300, 901, 25): cur = self.mat.linearExpansion(t) self.assertEqual(cur, 0) def test_setDefaultMassFracs(self): """ Test default mass fractions. .. test:: The materials generate nuclide mass fractions. :id: T_ARMI_MAT_FRACS2 :tests: R_ARMI_MAT_FRACS """ self.mat.setDefaultMassFracs() cur = self.mat.massFrac ref = {"PB": 1} self.assertEqual(cur, ref) def test_pseudoDensity(self): cur = self.mat.pseudoDensity(634.39) ref = 10.6120 delta = ref * 0.05 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.pseudoDensity(1673.25) ref = 9.4231 delta = ref * 0.05 self.assertAlmostEqual(cur, ref, delta=delta) def test_heatCapacity(self): cur = self.mat.heatCapacity(1200) ref = 138.647 delta = ref * 0.05 self.assertAlmostEqual(cur, ref, delta=delta) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class LeadBismuth_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.LeadBismuth def test_setDefaultMassFracs(self): """ Test default mass fractions. .. test:: The materials generate nuclide mass fractions. :id: T_ARMI_MAT_FRACS3 :tests: R_ARMI_MAT_FRACS """ self.mat.setDefaultMassFracs() cur = self.mat.massFrac ref = {"BI209": 0.555, "PB": 0.445} self.assertEqual(cur, ref) def test_pseudoDensity(self): cur = self.mat.pseudoDensity(404.77) ref = 10.5617 delta = ref * 0.05 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.pseudoDensity(1274.20) ref = 9.3627 delta = ref * 0.05 self.assertAlmostEqual(cur, ref, delta=delta) def test_volumetricExpansion(self): cur = self.mat.volumetricExpansion(400) ref = 1.2526e-4 accuracy = 4 self.assertAlmostEqual(cur, ref, accuracy) cur = self.mat.volumetricExpansion(800) ref = 1.3187e-4 accuracy = 4 self.assertAlmostEqual(cur, ref, accuracy) def test_heatCapacity(self): cur = self.mat.heatCapacity(400) ref = 149.2592 delta = ref * 0.05 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.heatCapacity(800) ref = 141.7968 delta = ref * 0.05 self.assertAlmostEqual(cur, ref, delta=delta) def test_getTempChangeForDensityChange(self): Tc = 800.0 densityFrac = 1.001 currentDensity = self.mat.pseudoDensity(Tc=Tc) perturbedDensity = currentDensity * densityFrac tAtPerturbedDensity = self.mat.getTemperatureAtDensity(perturbedDensity, Tc) expectedDeltaT = tAtPerturbedDensity - Tc actualDeltaT = self.mat.getTempChangeForDensityChange(Tc, densityFrac, quiet=False) self.assertAlmostEqual(expectedDeltaT, actualDeltaT) def test_dynamicVisc(self): ref = self.mat.dynamicVisc(Tc=150) cur = 0.0029355 self.assertAlmostEqual(ref, cur, delta=ref * 0.001) ref = self.mat.dynamicVisc(Tc=200) cur = 0.0024316 self.assertAlmostEqual(ref, cur, delta=ref * 0.001) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class Copper_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Cu def test_setDefaultMassFracs(self): cur = self.mat.massFrac ref = {"CU63": 0.6915, "CU65": 0.3085} self.assertEqual(cur, ref) def test_densityNeverChanges(self): for tk in [200.0, 400.0, 800.0, 1111.1]: cur = self.mat.density(tk) self.assertAlmostEqual(cur, 8.913, 4) def test_linearExpansionPercent(self): temps = [100.0, 200.0, 600.0] expansions = [-0.2955, -0.1500, 0.5326] for i, temp in enumerate(temps): cur = self.mat.linearExpansionPercent(Tk=temp) self.assertAlmostEqual(cur, expansions[i], 4) def test_getChildren(self): self.assertEqual(len(self.mat.getChildren()), 0) def test_getChildrenWithFlags(self): self.assertEqual(len(self.mat.getChildrenWithFlags("anything")), 0) class Sulfur_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Sulfur VALID_TEMP_K = 400 def test_setDefaultMassFracs(self): cur = self.mat.massFrac ref = {"S34": 0.0429, "S36": 0.002, "S33": 0.0076, "S32": 0.9493} self.assertEqual(cur, ref) def test_pseudoDensity(self): cur = self.mat.pseudoDensity(400) ref = 1.7956 accuracy = 4 self.assertAlmostEqual(cur, ref, accuracy) def test_volumetricExpansion(self): cur = self.mat.volumetricExpansion(334) ref = 5.28e-4 accuracy = 4 self.assertAlmostEqual(cur, ref, accuracy) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class Zr_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Zr def test_thermalConductivity(self): cur = self.mat.thermalConductivity(372.7273) ref = 19.8718698709447 self.assertAlmostEqual(cur, ref) cur = self.mat.thermalConductivity(1172.727) ref = 23.193177102455 self.assertAlmostEqual(cur, ref) def test_linearExpansion(self): cur = self.mat.linearExpansion(400) ref = 5.9e-6 delta = ref * 0.05 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.linearExpansion(800) ref = 7.9e-6 delta = ref * 0.05 self.assertAlmostEqual(cur, ref, delta=delta) def test_linearExpansionPercent(self): testTemperaturesInK = [ 293, 400, 500, 600, 700, 800, 900, 1000, 1100, 1137, 1200, 1400, 1600, 1800, ] expectedLinearExpansionValues = [ 0.0007078312624, 0.0602048, 0.123025, 0.1917312, 0.2652626, 0.3425584, 0.4225578, 0.5042, 0.5864242, 0.481608769233, 0.5390352, 0.7249496, 0.9221264, 1.1380488, ] for i, temp in enumerate(testTemperaturesInK): Tk = temp Tc = temp - units.C_TO_K self.assertAlmostEqual(self.mat.linearExpansionPercent(Tc=Tc), expectedLinearExpansionValues[i]) self.assertAlmostEqual(self.mat.linearExpansionPercent(Tk=Tk), expectedLinearExpansionValues[i]) def test_pseudoDensity(self): testTemperaturesInK = [ 293, 298.15, 400, 500, 600, 700, 800, 900, 1000, 1100, 1137, 1200, 1400, 1600, 1800, ] expectedDensityValues = [ 6.56990469455, 6.56955491852, 6.56209393299, 6.55386200572, 6.54487650252, 6.53528040809, 6.52521578203, 6.51482358662, 6.50424356114, 6.49361414192, 6.50716858169, 6.49973710507, 6.47576529821, 6.45048593916, 6.4229727005, ] for i, temp in enumerate(testTemperaturesInK): Tk = temp Tc = temp - units.C_TO_K self.assertAlmostEqual(self.mat.pseudoDensity(Tc=Tc), expectedDensityValues[i]) self.assertAlmostEqual(self.mat.pseudoDensity(Tk=Tk), expectedDensityValues[i]) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class Inconel_TestCase(AbstractMaterialTest, unittest.TestCase): def setUp(self): self.Inconel = materials.Inconel() self.Inconel800 = materials.Inconel800() self.InconelPE16 = materials.InconelPE16() self.mat = self.Inconel def tearDown(self): self.Inconel = None self.Inconel800 = None self.InconelPE16 = None def test_setDefaultMassFracs(self): self.Inconel.setDefaultMassFracs() self.Inconel800.setDefaultMassFracs() self.InconelPE16.setDefaultMassFracs() self.assertAlmostEqual(self.Inconel.getMassFrac("MO"), 0.09) self.assertAlmostEqual(self.Inconel800.getMassFrac("AL"), 0.00375) self.assertAlmostEqual(self.InconelPE16.getMassFrac("CR"), 0.165) def test_pseudoDensity(self): self.assertEqual(self.Inconel.pseudoDensity(Tc=25), 8.3600) self.assertEqual(self.Inconel800.pseudoDensity(Tc=21.0), 7.94) self.assertEqual(self.InconelPE16.pseudoDensity(Tc=25), 8.00) def test_Iconel800_linearExpansion(self): TcList = [100, 200, 300, 400, 500, 600, 700, 800] refList = [ 0.11469329415, 0.27968864560, 0.454195022850, 0.63037690440, 0.80645936875, 0.98672809440, 1.18152935985, 1.4072700436, ] for Tc, val in zip(TcList, refList): cur = self.Inconel800.linearExpansionPercent(Tc=Tc) ref = val errorMsg = "\n\nIncorrect Inconel 800 linearExpansionPercent()\nReceived:{}\nExpected:{}\n".format(cur, ref) self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg) def test_propertyValidTemperature(self): self.assertEqual(len(self.Inconel.propertyValidTemperature), 0) self.assertGreater(len(self.Inconel800.propertyValidTemperature), 0) self.assertEqual(len(self.InconelPE16.propertyValidTemperature), 0) self.assertEqual(len(self.mat.propertyValidTemperature), 0) class Inconel600_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Inconel600 def test_00_setDefaultMassFracs(self): massFracNameList = ["NI", "CR", "FE", "C", "MN55", "S", "SI", "CU"] massFracRefValList = [ 0.7541, 0.1550, 0.0800, 0.0008, 0.0050, 0.0001, 0.0025, 0.0025, ] for name, frac in zip(massFracNameList, massFracRefValList): cur = self.mat.getMassFrac(name) ref = frac self.assertAlmostEqual(cur, ref) def test_01_linearExpansionPercent(self): TcList = [100, 200, 300, 400, 500, 600, 700, 800] refList = [ 0.105392, 0.24685800000000002, 0.39576799999999995, 0.552122, 0.7159199999999999, 0.8871619999999999, 1.065848, 1.251978, ] for Tc, val in zip(TcList, refList): cur = self.mat.linearExpansionPercent(Tc=Tc) ref = val errorMsg = ( "\n\nIncorrect Inconel 600 linearExpansionPercent(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format( cur, ref ) ) self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg) def test_02_linearExpansion(self): TcList = [100, 200, 300, 400, 500, 600, 700, 800] refList = [ 1.3774400000000001e-05, 1.45188e-05, 1.52632e-05, 1.60076e-05, 1.6752e-05, 1.74964e-05, 1.82408e-05, 1.8985200000000002e-05, ] for Tc, val in zip(TcList, refList): cur = self.mat.linearExpansion(Tc=Tc) ref = val errorMsg = "\n\nIncorrect Inconel 600 linearExpansion(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format( cur, ref ) self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg) def test_03_pseudoDensity(self): TcList = [100, 200, 300, 400, 500, 600, 700, 800] refList = [ 8.452174779681522, 8.428336592376965, 8.40335281361706, 8.377239465159116, 8.35001319823814, 8.321691270531865, 8.292291522488402, 8.261832353071625, ] for Tc, val in zip(TcList, refList): cur = self.mat.pseudoDensity(Tc=Tc) ref = val errorMsg = "\n\nIncorrect Inconel 600 pseudoDensity(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format( cur, ref ) self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg) def test_heatCapacity(self): ref = self.mat.heatCapacity(Tc=100) cur = 461.947021 self.assertAlmostEqual(ref, cur, delta=cur * 0.001) ref = self.mat.heatCapacity(Tc=200) cur = 482.742084 self.assertAlmostEqual(ref, cur, delta=cur * 0.001) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class Inconel625_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Inconel625 def test_00_setDefaultMassFracs(self): massFracNameList = [ "NI", "CR", "FE", "MO", "TA181", "C", "MN55", "SI", "P31", "S", "AL27", "TI", "CO59", ] massFracRefValList = [ 0.6188, 0.2150, 0.0250, 0.0900, 0.0365, 0.0005, 0.0025, 0.0025, 0.0001, 0.0001, 0.0020, 0.0020, 0.0050, ] for name, frac in zip(massFracNameList, massFracRefValList): cur = self.mat.getMassFrac(name) ref = frac self.assertAlmostEqual(cur, ref) def test_01_linearExpansionPercent(self): TcList = [100, 200, 300, 400, 500, 600, 700, 800] refList = [ 0.09954299999999999, 0.22729199999999997, 0.36520699999999995, 0.513288, 0.671535, 0.8399479999999999, 1.018527, 1.207272, ] for Tc, val in zip(TcList, refList): cur = self.mat.linearExpansionPercent(Tc=Tc) ref = val errorMsg = ( "\n\nIncorrect Inconel 625 linearExpansionPercent(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format( cur, ref ) ) self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg) def test_02_linearExpansion(self): TcList = [100, 200, 300, 400, 500, 600, 700, 800] refList = [ 1.22666e-05, 1.32832e-05, 1.4299800000000002e-05, 1.53164e-05, 1.6333e-05, 1.73496e-05, 1.83662e-05, 1.93828e-05, ] for Tc, val in zip(TcList, refList): cur = self.mat.linearExpansion(Tc=Tc) ref = val errorMsg = "\n\nIncorrect Inconel 625 linearExpansion(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format( cur, ref ) self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg) def test_03_pseudoDensity(self): TcList = [100, 200, 300, 400, 500, 600, 700, 800] refList = [ 8.423222197446128, 8.401763522409897, 8.378689129846913, 8.354019541533887, 8.327776582263244, 8.299983337593213, 8.270664109510587, 8.239844370152333, ] for Tc, val in zip(TcList, refList): cur = self.mat.pseudoDensity(Tc=Tc) ref = val errorMsg = "\n\nIncorrect Inconel 625 pseudoDensity(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format( cur, ref ) self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg) def test_heatCapacity(self): ref = self.mat.heatCapacity(Tc=300) cur = 478.776007 self.assertAlmostEqual(ref, cur, delta=cur * 0.001) ref = self.mat.heatCapacity(Tc=400) cur = 503.399568 self.assertAlmostEqual(ref, cur, delta=cur * 0.001) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class InconelX750_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.InconelX750 def test_00_setDefaultMassFracs(self): massFracNameList = [ "NI", "CR", "FE", "TI", "AL27", "NB93", "MN55", "SI", "S", "CU", "C", "CO59", ] massFracRefValList = [ 0.7180, 0.1550, 0.0700, 0.0250, 0.0070, 0.0095, 0.0050, 0.0025, 0.0001, 0.0025, 0.0004, 0.0050, ] for name, frac in zip(massFracNameList, massFracRefValList): cur = self.mat.getMassFrac(name) ref = frac self.assertAlmostEqual(cur, ref) def test_01_linearExpansionPercent(self): TcList = [100, 200, 300, 400, 500, 600, 700, 800] refList = [ 0.09927680000000001, 0.2253902, 0.36517920000000004, 0.5186438000000001, 0.6857840000000001, 0.8665998000000001, 1.0610912000000001, 1.2692582000000001, ] for Tc, val in zip(TcList, refList): cur = self.mat.linearExpansionPercent(Tc=Tc) ref = val errorMsg = ( "\n\nIncorrect Inconel X750 linearExpansionPercent(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format( cur, ref ) ) self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg) def test_02_linearExpansion(self): TcList = [100, 200, 300, 400, 500, 600, 700, 800] refList = [ 1.1927560000000001e-05, 1.329512e-05, 1.466268e-05, 1.603024e-05, 1.73978e-05, 1.876536e-05, 2.013292e-05, 2.150048e-05, ] for Tc, val in zip(TcList, refList): cur = self.mat.linearExpansion(Tc=Tc) ref = val errorMsg = "\n\nIncorrect Inconel X750 linearExpansion(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format( cur, ref ) self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg) def test_03_pseudoDensity(self): TcList = [100, 200, 300, 400, 500, 600, 700, 800] refList = [ 8.263584211566972, 8.242801193765645, 8.219855974833411, 8.194776170511199, 8.167591802868142, 8.138335221416156, 8.107041018806447, 8.073745941486463, ] for Tc, val in zip(TcList, refList): cur = self.mat.pseudoDensity(Tc=Tc) ref = val errorMsg = "\n\nIncorrect Inconel X750 pseudoDensity(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format( cur, ref ) self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg) def test_heatCapacity(self): ref = self.mat.heatCapacity(Tc=100) cur = 459.61381 self.assertAlmostEqual(ref, cur, delta=cur * 0.001) ref = self.mat.heatCapacity(Tc=200) cur = 484.93968 self.assertAlmostEqual(ref, cur, delta=cur * 0.001) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class Alloy200_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Alloy200 def test_nickleContent(self): """Assert alloy 200 has more than 99% nickel per its spec.""" self.assertGreater(self.mat.massFrac["NI"], 0.99) def test_linearExpansion(self): ref = self.mat.linearExpansion(Tc=100) cur = 13.3e-6 self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001)) def test_linearExpansionHotter(self): ref = self.mat.linearExpansion(Tk=873.15) cur = 15.6e-6 self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001)) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class CaH2_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.CaH2 def test_pseudoDensity(self): cur = 1.7 ref = self.mat.pseudoDensity(Tc=100) self.assertAlmostEqual(cur, ref, ref * 0.01) ref = self.mat.pseudoDensity(Tc=300) self.assertAlmostEqual(cur, ref, ref * 0.01) def test_propertyValidTemperature(self): self.assertEqual(len(self.mat.propertyValidTemperature), 0) class Hafnium_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Hafnium def test_pseudoDensity(self): cur = 13.07 ref = self.mat.pseudoDensity(Tc=100) self.assertAlmostEqual(cur, ref, ref * 0.01) ref = self.mat.pseudoDensity(Tc=300) self.assertAlmostEqual(cur, ref, ref * 0.01) def test_propertyValidTemperature(self): self.assertEqual(len(self.mat.propertyValidTemperature), 0) class HastelloyN_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.HastelloyN def test_thermalConductivity(self): TcList = [200, 300, 400, 500, 600, 700] refList = [ 13.171442, 14.448584, 16.11144, 18.16001, 20.594294, 23.414292, ] for Tc, val in zip(TcList, refList): cur = self.mat.thermalConductivity(Tc=Tc) ref = val errorMsg = "\n\nIncorrect Hastelloy N thermalConductivity()\nReceived:{}\nExpected:{}\n".format(cur, ref) self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg) def test_heatCapacity(self): TcList = [100, 200, 300, 400, 500, 600, 700] refList = [ 419.183138, 438.728472, 459.630622, 464.218088, 480.092250, 556.547128, 573.450902, ] for Tc, val in zip(TcList, refList): cur = self.mat.heatCapacity(Tc=Tc) ref = val errorMsg = "\n\nIncorrect Hastelloy N heatCapacity()\nReceived:{}\nExpected:{}\n".format(cur, ref) self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg) def test_linearExpansionPercent(self): TcList = [100, 200, 300, 400, 500, 600, 700, 800] refList = [ 0.0976529128, 0.2225103228, 0.351926722, 0.4874638024, 0.630683256, 0.7831467748, 0.9464160508, 1.122052776, ] for Tc, val in zip(TcList, refList): cur = self.mat.linearExpansionPercent(Tc=Tc) ref = val errorMsg = "\n\nIncorrect Hastelloy N linearExpansionPercent()\nReceived:{}\nExpected:{}\n".format(cur, ref) self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg) def test_meanCoefficientThermalExpansion(self): TcList = [100, 200, 300, 400, 500, 600, 700, 800] refList = [ 1.22066141e-05, 1.23616846e-05, 1.25688115e-05, 1.28279948e-05, 1.31392345e-05, 1.35025306e-05, 1.39178831e-05, 1.4385292e-05, ] for Tc, val in zip(TcList, refList): cur = self.mat.meanCoefficientThermalExpansion(Tc=Tc) ref = val errorMsg = "\n\nIncorrect Hastelloy N meanCoefficientThermalExpansion()\nReceived:{}\nExpected:{}\n".format( cur, ref ) self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class TZM_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.TZM def test_00_applyInputParams(self): massFracNameList = ["C", "TI", "ZR", "MO"] massFracRefValList = [2.50749e-05, 0.002502504, 0.000761199, 0.996711222] self.mat.applyInputParams() for name, frac in zip(massFracNameList, massFracRefValList): cur = self.mat.massFrac[name] ref = frac self.assertEqual(cur, ref) def test_01_pseudoDensity(self): ref = 10.16 # g/cc cur = self.mat.pseudoDensity(Tc=21.11) self.assertEqual(cur, ref) def test_02_linearExpansionPercent(self): TcList = [ 21.11, 456.11, 574.44, 702.22, 840.56, 846.11, 948.89, 1023.89, 1146.11, 1287.78, 1382.22, ] refList = [ 0.0, 1.60e-01, 2.03e-01, 2.53e-01, 3.03e-01, 3.03e-01, 3.42e-01, 3.66e-01, 4.21e-01, 4.68e-01, 5.04e-01, ] for Tc, val in zip(TcList, refList): cur = self.mat.linearExpansionPercent(Tc=Tc) ref = val errorMsg = "\n\nIncorrect TZM linearExpansionPercent(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format( cur, ref ) self.assertAlmostEqual(cur, ref, delta=10e-3, msg=errorMsg) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class YttriumOxide_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.Y2O3 def test_pseudoDensity(self): cur = 5.03 ref = self.mat.pseudoDensity(Tc=25) self.assertAlmostEqual(cur, ref, 2) def test_linearExpansionPercent(self): ref = self.mat.linearExpansionPercent(Tc=100) cur = 0.069662 self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001)) ref = self.mat.linearExpansionPercent(Tc=100) cur = 0.0696622 self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001)) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class ZincOxide_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = materials.ZnO def test_density(self): cur = 5.61 ref = self.mat.density(Tk=10.12) self.assertAlmostEqual(cur, ref, 2) def test_linearExpansionPercent(self): ref = self.mat.linearExpansionPercent(Tc=100) cur = 0.04899694350661124 self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001)) ref = self.mat.linearExpansionPercent(Tc=300) cur = 0.15825020246870625 self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001)) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) class FuelMaterial_TestCase(unittest.TestCase): baseInput = r""" nuclide flags: U: {burn: false, xs: true} ZR: {burn: false, xs: true} custom isotopics: customIsotopic1: input format: mass fractions density: 1 U: 1 customIsotopic2: input format: mass fractions density: 1 ZR: 1 blocks: fuel: &block_fuel fuel1: &component_fuel_fuel1 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 fuel2: &component_fuel_fuel2 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 assemblies: fuel a: &assembly_a specifier: IC blocks: [*block_fuel] height: [1.0] axial mesh points: [1] xs types: [A] """ def loadAssembly(self, materialModifications): yamlString = self.baseInput + "\n" + materialModifications design = blueprints.Blueprints.load(yamlString) design._prepConstruction(settings.Settings()) return design.assemblies["fuel a"] def test_class1Class2_class1_wt_frac(self): # should error because class1_wt_frac not in (0,1) with self.assertRaises(ValueError): _a = self.loadAssembly( """ material modifications: class1_wt_frac: [2.0] class1_custom_isotopics: [customIsotopic1] class2_custom_isotopics: [customIsotopic2] """ ) def test_class1Class2_classX_custom_isotopics(self): # should error because class1_custom_isotopics doesn't exist with self.assertRaises(KeyError): _a = self.loadAssembly( """ material modifications: class1_wt_frac: [0.5] class1_custom_isotopics: [fakeIsotopic] class2_custom_isotopics: [customIsotopic2] """ ) # should error because class2_custom_isotopics doesn't exist with self.assertRaises(KeyError): _a = self.loadAssembly( """ material modifications: class1_wt_frac: [0.5] class1_custom_isotopics: [customIsotopic1] class2_custom_isotopics: [fakeIsotopic] """ ) ================================================ FILE: armi/materials/tests/test_sic.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for SiC.""" import unittest from armi.materials.siC import SiC from armi.materials.tests import test_materials class TestSiC(test_materials.AbstractMaterialTest, unittest.TestCase): """SiC tests.""" MAT_CLASS = SiC def test_pseudoDensity(self): cur = self.mat.pseudoDensity(Tc=25) ref = 3.159 delta = ref * 0.001 self.assertAlmostEqual(cur, ref, delta=delta) def test_meltingPoint(self): cur = self.mat.meltingPoint() ref = 3003 delta = ref * 0.0001 self.assertAlmostEqual(cur, ref, delta=delta) def test_heatCapacity(self): delta = 0.0001 cur = self.mat.heatCapacity(300) ref = 982.20789 self.assertAlmostEqual(cur, ref, delta=delta) cur = self.mat.heatCapacity(1500) ref = 1330.27867 self.assertAlmostEqual(cur, ref, delta=delta) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) ================================================ FILE: armi/materials/tests/test_sulfur.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for sulfur.""" import unittest from armi.materials.sulfur import Sulfur from armi.materials.tests.test_materials import AbstractMaterialTest class Sulfur_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = Sulfur VALID_TEMP_K = 400 def setUp(self): AbstractMaterialTest.setUp(self) self.mat = Sulfur() self.Sulfur_sulfur_density_frac = Sulfur() self.Sulfur_sulfur_density_frac.applyInputParams(sulfur_density_frac=0.5) self.Sulfur_TD_frac = Sulfur() self.Sulfur_TD_frac.applyInputParams(TD_frac=0.4) self.Sulfur_both = Sulfur() self.Sulfur_both.applyInputParams(sulfur_density_frac=0.5, TD_frac=0.4) def test_sulfur_density_frac(self): tk = 410 ref = self.mat.pseudoDensity(tk) reduced = self.Sulfur_sulfur_density_frac.pseudoDensity(tk) self.assertAlmostEqual(ref * 0.5, reduced) reduced = self.Sulfur_TD_frac.pseudoDensity(tk) self.assertAlmostEqual(ref * 0.4, reduced) reduced = self.Sulfur_both.pseudoDensity(tk) self.assertAlmostEqual(ref * 0.4, reduced) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) ================================================ FILE: armi/materials/tests/test_thoriumOxide.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for ThO2.""" import unittest from armi.materials.tests.test_materials import AbstractMaterialTest from armi.materials.thoriumOxide import ThoriumOxide class ThoriumOxide_TestCase(AbstractMaterialTest, unittest.TestCase): MAT_CLASS = ThoriumOxide def setUp(self): AbstractMaterialTest.setUp(self) self.mat = ThoriumOxide() self.ThoriumOxide_TD_frac = ThoriumOxide() self.ThoriumOxide_TD_frac.applyInputParams(TD_frac=0.4) def test_theoretical_pseudoDensity(self): ref = self.mat.pseudoDensity(500) reduced = self.ThoriumOxide_TD_frac.pseudoDensity(500) self.assertAlmostEqual(ref * 0.4, reduced) def test_linearExpansionPercent(self): self.assertAlmostEqual(self.mat.linearExpansionPercent(Tk=500), 0.195334) def test_propertyValidTemperature(self): self.assertGreater(len(self.mat.propertyValidTemperature), 0) ================================================ FILE: armi/materials/tests/test_uZr.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for simplified UZr material.""" import pickle from unittest import TestCase from armi.materials.uZr import UZr class UZR_TestCase(TestCase): MAT_CLASS = UZr def setUp(self): self.mat = self.MAT_CLASS() def test_isPicklable(self): """Test that materials are picklable so we can do MPI communication of state. .. test:: Test the material base class has temp-dependent thermal conductivity curves. :id: T_ARMI_MAT_PROPERTIES0 :tests: R_ARMI_MAT_PROPERTIES """ stream = pickle.dumps(self.mat) mat = pickle.loads(stream) # check a property that is sometimes interpolated. self.assertEqual(self.mat.thermalConductivity(500), mat.thermalConductivity(500)) def test_TD(self): """Test the material theoretical density.""" self.assertEqual(self.mat.getTD(), self.mat.theoreticalDensityFrac) self.mat.clearCache() self.mat._setCache("dummy", 666) self.assertEqual(self.mat.cached, {"dummy": 666}) self.mat.adjustTD(0.5) self.assertEqual(0.5, self.mat.theoreticalDensityFrac) self.assertEqual(self.mat.cached, {}) def test_duplicate(self): """Test the material duplication. .. test:: Materials shall calc mass fracs at init. :id: T_ARMI_MAT_FRACS5 :tests: R_ARMI_MAT_FRACS """ mat = self.mat.duplicate() self.assertEqual(len(mat.massFrac), len(self.mat.massFrac)) for key in self.mat.massFrac: self.assertEqual(mat.massFrac[key], self.mat.massFrac[key]) self.assertEqual(mat.parent, self.mat.parent) self.assertEqual(mat.refDens, self.mat.refDens) self.assertEqual(mat.theoreticalDensityFrac, self.mat.theoreticalDensityFrac) def test_cache(self): """Test the material cache.""" self.mat.clearCache() self.assertEqual(len(self.mat.cached), 0) self.mat._setCache("Emmy", "Noether") self.assertEqual(len(self.mat.cached), 1) val = self.mat._getCached("Emmy") self.assertEqual(val, "Noether") def test_densityKgM3(self): """Test the density for kg/m^3. .. test:: Test the material base class has temp-dependent density. :id: T_ARMI_MAT_PROPERTIES2 :tests: R_ARMI_MAT_PROPERTIES """ dens = self.mat.density(500) densKgM3 = self.mat.densityKgM3(500) self.assertEqual(dens * 1000.0, densKgM3) def test_pseudoDensityKgM3(self): """Test the pseudo density for kg/m^3. .. test:: Test the material base class has temp-dependent 2D density. :id: T_ARMI_MAT_PROPERTIES3 :tests: R_ARMI_MAT_PROPERTIES """ dens = self.mat.pseudoDensity(500) densKgM3 = self.mat.pseudoDensityKgM3(500) self.assertEqual(dens * 1000.0, densKgM3) def test_density(self): """Test that all materials produce a zero density from density. .. test:: Test the material base class has temp-dependent density. :id: T_ARMI_MAT_PROPERTIES1 :tests: R_ARMI_MAT_PROPERTIES """ self.assertNotEqual(self.mat.density(500), 0) cur = self.mat.density(400) ref = 15.94 delta = ref * 0.01 self.assertAlmostEqual(cur, ref, delta=delta) def test_propertyValidTemperature(self): self.assertEqual(len(self.mat.propertyValidTemperature), 0) ================================================ FILE: armi/materials/tests/test_water.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for water materials.""" import unittest from armi.materials.water import SaturatedSteam, SaturatedWater, Water class TestWater(unittest.TestCase): """Unit tests for water materials.""" def test_waterAtFreezing(self): """ Reproduce verification results from IAPWS-IF97 for water at 0C. http://www.iapws.org/relguide/supsat.pdf .. test:: There is a base class for fluid materials. :id: T_ARMI_MAT_FLUID0 :tests: R_ARMI_MAT_FLUID """ water = SaturatedWater() steam = SaturatedSteam() Tk = 273.16 ref_vapor_pressure = 611.657 ref_dp_dT = 44.436693 ref_saturated_water_rho = 999.789 ref_saturated_steam_rho = 0.00485426 ref_alpha = -11.529101 ref_saturated_water_enthalpy = 0.611786 ref_saturated_steam_enthalpy = 2500.5e3 ref_phi = -0.04 ref_saturated_water_entropy = 0 ref_saturated_steam_entropy = 9.154e3 self.assertAlmostEqual(ref_vapor_pressure, water.vaporPressure(Tk=Tk), 3) self.assertAlmostEqual(ref_vapor_pressure, steam.vaporPressure(Tk=Tk), 3) self.assertAlmostEqual(ref_dp_dT, water.vaporPressurePrime(Tk=Tk), 3) self.assertAlmostEqual(ref_dp_dT, steam.vaporPressurePrime(Tk=Tk), 3) self.assertAlmostEqual(ref_saturated_water_rho, water.pseudoDensityKgM3(Tk=Tk), 0) self.assertAlmostEqual(ref_saturated_steam_rho, steam.pseudoDensityKgM3(Tk=Tk), 0) self.assertAlmostEqual(ref_alpha, water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3) self.assertAlmostEqual(ref_alpha, steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3) self.assertAlmostEqual(ref_saturated_water_enthalpy, water.enthalpy(Tk=Tk), 2) self.assertAlmostEqual(ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2) self.assertAlmostEqual(ref_phi, water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2) self.assertAlmostEqual(ref_phi, steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2) self.assertAlmostEqual(ref_saturated_water_entropy, water.entropy(Tk=Tk), 3) self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3) def test_waterAtBoiling(self): """ Reproduce verification results from IAPWS-IF97 for water at 100C. http://www.iapws.org/relguide/supsat.pdf """ water = SaturatedWater() steam = SaturatedSteam() Tk = 373.1243 ref_vapor_pressure = 0.101325e6 ref_dp_dT = 3.616e3 ref_saturated_water_rho = 958.365 ref_saturated_steam_rho = 0.597586 ref_alpha = 417.65e3 ref_saturated_water_enthalpy = 417.05e3 ref_saturated_steam_enthalpy = 2675.7e3 ref_phi = 1.303e3 ref_saturated_water_entropy = 1.307e3 ref_saturated_steam_entropy = 7.355e3 self.assertAlmostEqual(ref_vapor_pressure / water.vaporPressure(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_vapor_pressure / steam.vaporPressure(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_dp_dT / water.vaporPressurePrime(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_dp_dT / steam.vaporPressurePrime(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_saturated_water_rho, water.pseudoDensityKgM3(Tk=Tk), 0) self.assertAlmostEqual(ref_saturated_steam_rho, steam.pseudoDensityKgM3(Tk=Tk), 0) self.assertAlmostEqual(ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk), 1, 2) self.assertAlmostEqual(ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2) self.assertAlmostEqual(ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_saturated_water_entropy / water.entropy(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3) def test_waterAtCritcalPoint(self): """ Reproduce verification results from IAPWS-IF97 for water at 647.096K. http://www.iapws.org/relguide/supsat.pdf """ water = SaturatedWater() steam = SaturatedSteam() Tk = 647.096 ref_vapor_pressure = 22.064e6 ref_dp_dT = 268e3 ref_saturated_water_rho = 322 ref_saturated_steam_rho = 322 ref_alpha = 1548e3 ref_saturated_water_enthalpy = 2086.6e3 ref_saturated_steam_enthalpy = 2086.6e3 ref_phi = 3.578e3 ref_saturated_water_entropy = 4.410e3 ref_saturated_steam_entropy = 4.410e3 self.assertAlmostEqual(ref_vapor_pressure / water.vaporPressure(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_vapor_pressure / steam.vaporPressure(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_dp_dT / water.vaporPressurePrime(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_dp_dT / steam.vaporPressurePrime(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_saturated_water_rho, water.pseudoDensityKgM3(Tk=Tk), 0) self.assertAlmostEqual(ref_saturated_steam_rho, steam.pseudoDensityKgM3(Tk=Tk), 0) self.assertAlmostEqual(ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk), 1, 2) self.assertAlmostEqual(ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2) self.assertAlmostEqual(ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_saturated_water_entropy / water.entropy(Tk=Tk), 1, 3) self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3) def test_massFrac(self): for water in [SaturatedWater(), SaturatedSteam()]: massFracO = water.getMassFrac("O") massFracH = water.getMassFrac("H") self.assertAlmostEqual(massFracO, 0.888, places=3) self.assertAlmostEqual(massFracO + massFracH, 1.0) def test_propertyValidTemperature(self): water = SaturatedWater() self.assertEqual(len(water.propertyValidTemperature), 0) steam = SaturatedSteam() self.assertEqual(len(steam.propertyValidTemperature), 0) def test_validateNames(self): water = Water() self.assertEqual(water.name, "Water") sat = SaturatedWater() self.assertEqual(sat.name, "SaturatedWater") steam = SaturatedSteam() self.assertEqual(steam.name, "SaturatedSteam") ================================================ FILE: armi/materials/thU.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Thorium Uranium metal. Data is from [IAEA-TECDOC-1450]_. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi import runLog from armi.materials.material import FuelMaterial from armi.utils.units import getTk class ThU(FuelMaterial): enrichedNuclide = "U233" propertyValidTemperature = {"linear expansion": ((30, 600), "K")} def __init__(self): FuelMaterial.__init__(self) # density in g/cc from IAEA TE 1450 self.refDens = 11.68 def getEnrichment(self): return self.getMassFrac("U233") / (self.getMassFrac("U233") + self.getMassFrac("TH232")) def applyInputParams(self, U233_wt_frac=None, *args, **kwargs): runLog.warning( "Material {} has not yet been tested for accuracy".format("ThU"), single=True, label="ThU applyInputParams", ) if U233_wt_frac is not None: self.adjustMassEnrichment(U233_wt_frac) FuelMaterial.applyInputParams(self, *args, **kwargs) def setDefaultMassFracs(self): self.setMassFrac("TH232", 1.0) self.setMassFrac("U233", 0.0) def linearExpansion(self, Tk=None, Tc=None): """Linear expansion in m/m/K from IAEA TE 1450.""" Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion", Tk) return 11.9e-6 def thermalConductivity(self, Tk=None, Tc=None): """Thermal conductivity in W/m-K from IAEA TE 1450.""" Tk = getTk(Tc, Tk) return 43.1 def meltingPoint(self): """Melting point in K from IAEA TE 1450.""" return 2025.0 ================================================ FILE: armi/materials/thorium.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Thorium Metal. Data is from [IAEA-TECDOC-1450]_. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import FuelMaterial from armi.utils.units import getTk class Thorium(FuelMaterial): propertyValidTemperature = {"linear expansion": ((30, 600), "K")} def __init__(self): FuelMaterial.__init__(self) self.refDens = 11.68 def setDefaultMassFracs(self): self.setMassFrac("TH232", 1.0) def linearExpansion(self, Tk=None, Tc=None): r"""Linear Expansion in m/m/K from IAEA TECDOC 1450.""" Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion", Tk) return 11.9e-6 def thermalConductivity(self, Tk=None, Tc=None): r"""W/m-K from IAEA TE 1450.""" return 43.1 def meltingPoint(self): """Melting point in K from IAEA TE 1450.""" return 2025.0 ================================================ FILE: armi/materials/thoriumOxide.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Thorium Oxide solid ceramic. Data is from [IAEA-TECDOC-1450]_. .. [IAEA-TECDOC-1450] Thorium fuel cycle -- Potential benefits and challenges, IAEA-TECDOC-1450 (2005). https://www-pub.iaea.org/mtcd/publications/pdf/te_1450_web.pdf The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi import runLog from armi.materials.material import FuelMaterial, Material, SimpleSolid from armi.utils.units import getTk class ThoriumOxide(FuelMaterial, SimpleSolid): propertyValidTemperature = {"linear expansion": ((298, 1223), "K")} def __init__(self): Material.__init__(self) self.refDens = 10.00 def applyInputParams(self, TD_frac=None, *args, **kwargs): if TD_frac is not None: if TD_frac > 1.0: runLog.warning( f"Theoretical density frac for {self} is {TD_frac}, which is >1", single=True, label="Large theoretical density", ) elif TD_frac == 0: runLog.warning( f"Theoretical density frac for {self} is zero!", single=True, label="Zero theoretical density", ) elif TD_frac < 0: runLog.error( "TD_frac is entered as negative. This is not allowed!", single=True, label="Negative TD_frac", ) self.adjustTD(TD_frac) FuelMaterial.applyInputParams(self, *args, **kwargs) def setDefaultMassFracs(self): r"""ThO2 mass fractions. Using Pure Th-232. 100% 232. Thorium: 232.030806 g/mol Oxygen: 15.9994 g/mol 2 moles of oxygen/1 mole of Thorium grams of Th-232 = 232.030806 g/mol* 1 mol = 232.030806 g grams of Oxygen = 15.9994 g/mol* 2 mol = 31.9988 g total=264.029606 g. Mass fractions are computed from this. """ self.setMassFrac("TH232", 0.8788) self.setMassFrac("O16", 0.1212) def linearExpansion(self, Tk=None, Tc=None): r"""Linear expansion in m/m/K from IAEA TE 1450.""" Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion", Tk) return 9.67e-6 def linearExpansionPercent(self, Tk=None, Tc=None): """ Approximate the linear thermal expansion percent from the linear expansion coefficient, taking 298K as the reference temperature. """ Tk = getTk(Tc=Tc, Tk=Tk) linearExpansionCoef = self.linearExpansion(Tk=Tk) return 100 * (linearExpansionCoef * (Tk - 298)) def thermalConductivity(self, Tk=None, Tc=None): r"""Thermal conductivity in W/m-K from IAEA TE 1450.""" return 6.20 def meltingPoint(self): r"""Melting point in K from IAEA TE 1450.""" return 3643.0 def density(self, Tk=None, Tc=None): return Material.density(self, Tk, Tc) * self.getTD() class ThO2(ThoriumOxide): """Another name for ThoriumOxide.""" pass ================================================ FILE: armi/materials/uZr.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simplified UZr alloy. This is a notional U-10Zr material based on [Chandrabhanu]_. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials import material from armi.utils import units class UZr(material.FuelMaterial): """ Simplified UZr fuel alloy. .. warning:: This is an academic-quality material. Only the 10% Zr-frac properties are present. If you use a Zr-frac other than 10%, these properties will be incorrect. Bring in user-provided materials via plugins when necessary. .. [Chandrabhanu] Chandrabhanu Basak, G.J. Prasad, H.S. Kamath, N. Prabhu, An evaluation of the properties of As-cast U-rich UZr alloys, Journal of Alloys and Compounds, Volume 480, Issue 2, 2009, Pages 857-862, ISSN 0925-8388, https://doi.org/10.1016/j.jallcom.2009.02.077. """ enrichedNuclide = "U235" zrFracDefault = 0.10 uFracDefault = 1.0 - zrFracDefault def __init__(self): material.Material.__init__(self) def setDefaultMassFracs(self): """U-Pu-Zr mass fractions.""" u235Enrichment = 0.1 self.setMassFrac("ZR", self.zrFracDefault) self.setMassFrac("U235", u235Enrichment * self.uFracDefault) self.setMassFrac("U238", (1.0 - u235Enrichment) * self.uFracDefault) self._calculateReferenceDensity(self.zrFracDefault, self.uFracDefault) def applyInputParams(self, U235_wt_frac=None, ZR_wt_frac=None, *args, **kwargs): """Apply user input.""" ZR_wt_frac = self.zrFracDefault if ZR_wt_frac is None else ZR_wt_frac U235_wt_frac = 0.1 if U235_wt_frac is None else U235_wt_frac uFrac = 1.0 - ZR_wt_frac self.setMassFrac("ZR", ZR_wt_frac) self.setMassFrac("U235", U235_wt_frac * uFrac) self.setMassFrac("U238", (1.0 - U235_wt_frac) * uFrac) self._calculateReferenceDensity(ZR_wt_frac, uFrac) material.FuelMaterial.applyInputParams(self, *args, **kwargs) def _calculateReferenceDensity(self, zrFrac, uFrac): """Calculates the reference mass density in g/cc of a U-Pu-Zr alloy at 293K with Vergard's law.""" # use Vergard's law to mix densities by weight fraction at 293K u0 = 19.1 zr0 = 6.52 specificVolume = uFrac / u0 + zrFrac / zr0 self.refDens = 1.0 / specificVolume def linearExpansionPercent(self, Tk=None, Tc=None): """Gets the linear expansion from eq. 3 in [Chandrabhanu]_ for U-10Zr.""" tk = units.getTk(Tc, Tk) tk2 = tk * tk tk3 = tk2 * tk return -0.73 + 3.489e-3 * tk - 5.154e-6 * tk2 + 4.39e-9 * tk3 ================================================ FILE: armi/materials/uranium.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Uranium metal. Much info is from [AAAFuels]_. .. [AAAFuels] Kim, Y S, and Hofman, G L. AAA fuels handbook.. United States: N. p., 2003. Web. doi:10.2172/822554. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from numpy import interp from armi import runLog from armi.materials.material import FuelMaterial from armi.utils.units import getTk class Uranium(FuelMaterial): enrichedNuclide = "U235" materialIntro = "" propertyNotes = {"thermal conductivity": ""} propertyRawData = {"thermal conductivity": ""} propertyUnits = {"thermal conductivity": "W/m-K", "heat capacity": "J/kg-K"} propertyEquation = {"thermal conductivity": "21.73 + 0.01591T + 5.907×10<super>-6</super>T<super>2</super>"} _heatCapacityTableK = [ 298, 300, 400, 500, 600, 700, 800, 900, 941.9, 942, 1000, 1048.9, 1049, 1100, 1200, 1300, 1400, 1407.9, 1408, 1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2400, ] _heatCapacityTable = [ 27.665, 27.700, 29.684, 31.997, 34.762, 38.021, 41.791, 46.081, 48.038, 42.928, 42.928, 42.928, 38.284, 38.284, 38.284, 38.284, 38.284, 38.284, 48.660, 48.660, 48.660, 48.660, 48.660, 48.660, 48.660, 48.660, 48.660, 48.660, ] # J/K/mol _densityTableK = [ 293, 400, 500, 600, 700, 800, 900, 940.9, 941, 1000, 1047.9, 1048, 1100, 1200, 1400, 1407.9, 1408, 1500, 1600, ] _densityTable = [ 19.07, 18.98, 18.89, 18.79, 18.68, 18.55, 18.41, 18.39, 18.16, 18.11, 18.07, 17.94, 17.88, 17.76, 17.53, 17.52, 16.95, 16.84, 16.71, ] # g/cc _linearExpansionPercent = [ 0.000, 0.157, 0.315, 0.494, 0.697, 0.924, 1.186, 1.300, 1.635, 1.737, 1.820, 2.050, 2.168, 2.398, 2.855, 2.866, 4.006, 4.232, 4.502, ] # % _linearExpansionTable = [ 13.9, 15.2, 16.9, 19.0, 21.4, 24.3, 27.7, 29.1, 17.3, 17.3, 17.3, 22.9, 22.9, 22.9, 22.9, 22.9, 25.5, 25.5, 25.5, ] # 1e6/K propertyValidTemperature = { "thermal conductivity": ((255.4, 1173.2), "K"), "heat capacity": ((_heatCapacityTableK[0], _heatCapacityTableK[-1]), "K"), "density": ((_densityTableK[0], _densityTableK[-1]), "K"), "linear expansion": ((_densityTableK[0], _densityTableK[-1]), "K"), "linear expansion percent": ((_densityTableK[0], _densityTableK[-1]), "K"), } references = { "thermal conductivity": ["AAA Fuels Handbook by YS Kim and G.L. Hofman, ANL, Section 6.1.1"], "heat capacity": ["AAA Fuels Handbook by YS Kim and GL Hofman, Table 2-14"], "melting point": ["AAA Fuels Handbook by YS Kim and GL Hofman, Table 2-13"], "density": ["Metallic Fuels Handbook, ANL-NSE-3, Table B.3.3-1"], "linear expansion": ["Metallic Fuels Handbook, ANL-NSE-3, Table B.3.3-1"], "linear expansion percent": ["Metallic Fuels Handbook, ANL-NSE-3, Table B.3.3-1"], } def thermalConductivity(self, Tk: float = None, Tc: float = None) -> float: """The thermal conductivity of pure U in W-m/K.""" Tk = getTk(Tc, Tk) self.checkPropertyTempRange("thermal conductivity", Tk) kU = 21.73 + (0.01591 * Tk) + (0.000005907 * Tk**2) return kU def heatCapacity(self, Tk: float = None, Tc: float = None) -> float: """Heat capacity in J/kg-K.""" Tk = getTk(Tc, Tk) self.checkPropertyTempRange("heat capacity", Tk) return interp(Tk, self._heatCapacityTableK, self._heatCapacityTable) def setDefaultMassFracs(self) -> None: nb = self.parent.nuclideBases if self.parent else None if nb is None: u235Weight = 235.043929425 u238Weight = 238.050788298 u235Abundance = 0.007204 else: u235Weight = nb.byLabel["U235"].weight u238Weight = nb.byLabel["U238"].weight u235Abundance = nb.byLabel["U235"].abundance u238Abundance = 1.0 - u235Abundance # neglect U234 and keep U235 at natural level gramsIn1Mol = u235Abundance * u235Weight + u238Abundance * u238Weight self.setMassFrac("U235", u235Weight * u235Abundance / gramsIn1Mol) self.setMassFrac("U238", u238Weight * u238Abundance / gramsIn1Mol) self.refDens = 19.07 def applyInputParams(self, U235_wt_frac: float = None, TD_frac: float = None, *args, **kwargs): if U235_wt_frac is not None: self.adjustMassEnrichment(U235_wt_frac) td = TD_frac if td is not None: if td > 1.0: runLog.warning( f"Theoretical density frac for {self} is {td}, which is >1", single=True, label="Large theoretical density", ) elif td == 0: runLog.warning( f"Theoretical density frac for {self} is zero!", single=True, label="Zero theoretical density", ) self.adjustTD(td) FuelMaterial.applyInputParams(self, *args, **kwargs) def meltingPoint(self): """Melting point in K.""" return 1408 def density(self, Tk: float = None, Tc: float = None) -> float: """Density in g/cc.""" Tk = getTk(Tc, Tk) self.checkPropertyTempRange("density", Tk) return interp(Tk, self._densityTableK, self._densityTable) * self.getTD() def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float: """2D-expanded density in g/cc.""" return super().pseudoDensity(Tk=Tk, Tc=Tc) * self.getTD() def linearExpansion(self, Tk: float = None, Tc: float = None) -> float: """Linear expansion coefficient in 1/K.""" Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion", Tk) return interp(Tk, self._densityTableK, self._linearExpansionTable) / 1e6 def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float: """Linear expansion percent.""" Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion percent", Tk) return interp(Tk, self._densityTableK, self._linearExpansionPercent) ================================================ FILE: armi/materials/uraniumOxide.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Uranium Oxide properties. UO2 is a common ceramic nuclear fuel form. It's properties are well known. This mostly uses data from [#ornltm2000]_. .. [#ornltm2000] Thermophysical Properties of MOX and UO2 Fuels Including the Effects of Irradiation. S.G. Popov, et.al. Oak Ridge National Laboratory. ORNL/TM-2000/351 https://rsicc.ornl.gov/fmdp/tm2000-351.pdf The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ import collections import math from numpy import interp from armi import runLog from armi.materials import material from armi.nucDirectory import thermalScattering as tsl from armi.utils.units import getTk HeatCapacityConstants = collections.namedtuple("HeatCapacityConstants", ["c1", "c2", "c3", "theta", "Ea"]) class UraniumOxide(material.FuelMaterial, material.SimpleSolid): enrichedNuclide = "U235" REFERENCE_TEMPERATURE = 27 # ORNL/TM-2000/351 section 4.3 heatCapacityConstants = HeatCapacityConstants(c1=302.27, c2=8.463e-3, c3=8.741e7, theta=548.68, Ea=18531.7) __meltingPoint = 3123.0 propertyUnits = {"heat capacity": "J/mol-K"} propertyValidTemperature = { "density": ((293.15, 3100), "K"), "heat capacity": ((298.15, 3120), "K"), "linear expansion": ((273, 3120), "K"), "linear expansion percent": ((273, __meltingPoint), "K"), "thermal conductivity": ((300, 3000), "K"), } references = { "thermal conductivity": "Thermal conductivity of uranium dioxide by nonequilibrium molecular dynamics " + "simulation. S. Motoyama. Physical Review B, Volume 60, Number 1, July 1999", "linear expansion": "Thermophysical Properties of MOX and UO2 Fuels Including the Effects of Irradiation. " + "S.G. Popov, et.al. Oak Ridge National Laboratory. ORNL/TM-2000/351", "heat capacity": "ORNL/TM-2000/351", } thermalScatteringLaws = (tsl.fromNameAndCompound("U", tsl.UO2), tsl.fromNameAndCompound("O", tsl.UO2)) # Thermal conductivity values taken from: # Thermal conductivity of uranium dioxide by nonequilibrium molecular dynamics simulation. S. Motoyama. # Physical Review B, Volume 60, Number 1, July 1999 thermalConductivityTableK = [ 300, 600, 900, 1200, 1500, 1800, 2100, 2400, 2700, 3000, ] thermalConductivityTable = [ 7.991, 4.864, 3.640, 2.768, 2.567, 2.294, 2.073, 1.891, 1.847, 1.718, ] def __init__(self): material.FuelMaterial.__init__(self) self.refDens = self.density(Tk=self.refTempK) def applyInputParams(self, U235_wt_frac: float = None, TD_frac: float = None, *args, **kwargs) -> None: if U235_wt_frac is not None: self.adjustMassEnrichment(U235_wt_frac) td = TD_frac if td is not None: if td > 1.0: runLog.warning( "Theoretical density frac for {0} is {1}, which is >1".format(self, td), single=True, label="Large theoretical density", ) elif td == 0: runLog.warning( f"Theoretical density frac for {self} is zero!", single=True, label="Zero theoretical density", ) self.adjustTD(td) material.FuelMaterial.applyInputParams(self, *args, **kwargs) def setDefaultMassFracs(self) -> None: """UO2 mass fractions. Using Natural Uranium without U234.""" nb = self.parent.nuclideBases if self.parent else None if nb is None: u235Weight = 235.043929425 u238Weight = 238.050788298 oxygenWeight = 15.999304875697801 u235Abundance = 0.007204 else: u235Weight = nb.byName["U235"].weight u238Weight = nb.byName["U238"].weight oxygenWeight = nb.byName["O"].weight u235Abundance = nb.byName["U235"].abundance u238Abundance = 1.0 - u235Abundance # neglect U234 and keep U235 at natural level gramsIn1Mol = 2 * oxygenWeight + u235Abundance * u235Weight + u238Abundance * u238Weight self.setMassFrac("U235", u235Weight * u235Abundance / gramsIn1Mol) self.setMassFrac("U238", u238Weight * u238Abundance / gramsIn1Mol) self.setMassFrac("O", 2 * oxygenWeight / gramsIn1Mol) def meltingPoint(self): """ Melting point in K. From [#ornltm2000]_. """ return self.__meltingPoint def density(self, Tk: float = None, Tc: float = None) -> float: """ Density in (g/cc). Polynomial line fit to data from [#ornltm2000]_ on page 11. """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("density", Tk) return (-1.01147e-7 * Tk**2 - 1.29933e-4 * Tk + 1.09805e1) * self.getTD() def heatCapacity(self, Tk: float = None, Tc: float = None) -> float: """ Heat capacity in J/kg-K. From Section 4.3 in [#ornltm2000]_ """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("heat capacity", Tk) hcc = self.heatCapacityConstants # eq 4.2 specificHeatCapacity = ( hcc.c1 * (hcc.theta / Tk) ** 2 * math.exp(hcc.theta / Tk) / (math.exp(hcc.theta / Tk) - 1.0) ** 2 + 2 * hcc.c2 * Tk + hcc.c3 * hcc.Ea * math.exp(-hcc.Ea / Tk) / Tk**2 ) return specificHeatCapacity def linearExpansion(self, Tk: float = None, Tc: float = None) -> float: """ Linear expansion coefficient. Curve fit from data in [#ornltm2000]_ """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion", Tk) return 1.06817e-12 * Tk**2 - 1.37322e-9 * Tk + 1.02863e-5 def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float: """ Return dL/L. From Section 3.3 of [#ornltm2000]_ """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion percent", Tk) if Tk >= 273.0 and Tk < 923.0: return (-2.66e-03 + 9.802e-06 * Tk - 2.705e-10 * Tk**2 + 4.391e-13 * Tk**3) * 100.0 else: return (-3.28e-03 + 1.179e-05 * Tk - 2.429e-09 * Tk**2 + 1.219e-12 * Tk**3) * 100.0 def thermalConductivity(self, Tk: float = None, Tc: float = None) -> float: """ Thermal conductivity. Ref: Thermal conductivity of uranium dioxide by nonequilibrium molecular dynamics simulation. S. Motoyama. Physical Review B, Volume 60, Number 1, July 1999 """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("thermal conductivity", Tk) return interp(Tk, self.thermalConductivityTableK, self.thermalConductivityTable) class UO2(UraniumOxide): """Another name for UraniumOxide.""" def __init__(self): UraniumOxide.__init__(self) self._name = "UraniumOxide" ================================================ FILE: armi/materials/void.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Void material. Use this to fill empty spaces while maintaining proper volume fractions. """ from armi.materials import material class Void(material.Fluid): """A Void material is a bookkeeping material with zero density.""" def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float: return 0.0 def density(self, Tk: float = None, Tc: float = None) -> float: return 0.0 ================================================ FILE: armi/materials/water.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Basic water material. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ import math from armi.materials.material import Fluid from armi.nucDirectory import elements from armi.nucDirectory import thermalScattering as tsl from armi.utils import units from armi.utils.units import getTk _REF_SR1_86 = "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam" class Water(Fluid): """ Water. This is a good faith implementation of the Revised Supplementary Properties of Ordinary Water Substance (1992) by IAPWS -- International Association for the Properties of Water and Steam . This is an abstract class implemented on the Saturated Water Material and the Saturated Steam Material Class, which should be good enough for most uses. http://www.iapws.org/relguide/supsat.pdf IAPWS-IF97 is now the international standard for calculations in the steam power industry """ thermalScatteringLaws = (tsl.fromNameAndCompound("H", tsl.H2O),) references = { "vapor pressure": _REF_SR1_86, "enthalpy (saturated water)": _REF_SR1_86, "enthalpy (saturated steam)": _REF_SR1_86, "entropy (saturated water)": _REF_SR1_86, "entropy (saturated steam)": _REF_SR1_86, "density (saturated water)": _REF_SR1_86, "density (saturated steam)": _REF_SR1_86, } TEMPERATURE_CRITICAL_K = 647.096 DENSITY_CRITICAL_KGPERCUBICMETER = 322.0 DENSITY_CRITICAL_GPERCUBICCENTIMETER = DENSITY_CRITICAL_KGPERCUBICMETER * units.G_PER_KG / units.CM3_PER_M3 VAPOR_PRESSURE_CRITICAL_MPA = 22.064 VAPOR_PRESSURE_CRITICAL_PA = VAPOR_PRESSURE_CRITICAL_MPA * 1e6 ALPHA_0 = 1000 PHI_0 = ALPHA_0 / TEMPERATURE_CRITICAL_K # coefficients for auxiliary quantity for enthalpy and entropy kept as d to match original source d = { 1: -5.65134998e-08, 2: 2690.66631, 3: 127.287297, 4: -135.003439, 5: 0.981825814, "alpha": -1135.905627715, "phi": 2319.5246, } def setDefaultMassFracs(self) -> None: nb = self.parent.nuclideBases if self.parent else None if nb is None: massHydrogen = 1.007976004510346 massOxygen = 15.999304715704756 else: massHydrogen = elements.bySymbol["H"].standardWeight massOxygen = elements.bySymbol["O"].standardWeight totalMass = 2 * massHydrogen + massOxygen massFrac = {"H": 2.0 * massHydrogen / totalMass, "O": massOxygen / totalMass} for nucName, mfrac in massFrac.items(): self.setMassFrac(nucName, mfrac) def theta(self, Tk: float = None, Tc: float = None) -> float: """Returns temperature normalized to the critical temperature.""" return getTk(Tc=Tc, Tk=Tk) / self.TEMPERATURE_CRITICAL_K def tau(self, Tc: float = None, Tk: float = None) -> float: """ Returns 1 - temperature normalized to the critical temperature. Notes ----- thermophysical correlations are give in Tau rather than Tk or Tc """ return 1.0 - self.theta(Tc=Tc, Tk=Tk) def vaporPressure(self, Tk: float = None, Tc: float = None) -> float: """ Returns vapor pressure in (Pa). Parameters ---------- Tk: float temperature in Kelvin Tc: float temperature in Celsius Returns ------- vaporPressure: float vapor pressure in Pa Notes ----- IAPWS-IF97 http://www.iapws.org/relguide/supsat.pdf IAPWS-IF97 is now the international standard for calculations in the steam power industry """ tau = self.tau(Tc=Tc, Tk=Tk) T_ratio = self.TEMPERATURE_CRITICAL_K / getTk(Tc=Tc, Tk=Tk) a1 = -7.85951783 a2 = 1.84408259 a3 = -11.7866497 a4 = 22.6807411 a5 = -15.9618719 a6 = 1.80122502 sum_coefficients = a1 * tau + a2 * tau**1.5 + a3 * tau**3 + a4 * tau**3.5 + a5 * tau**4 + a6 * tau**7.5 log_vapor_pressure = T_ratio * sum_coefficients vapor_pressure = self.VAPOR_PRESSURE_CRITICAL_PA * math.e ** (log_vapor_pressure) # past the supercritical point tau's raised to .5 cause complex #'s return vapor_pressure.real def vaporPressurePrime(self, Tk: float = None, Tc: float = None, dT: float = 1e-6) -> float: """ Approximation of derivative of vapor pressure wrt temperature. Parameters ---------- Tk: float temperature in Kelvin Tc: float temperature in Celsius Note ---- This uses a numerical approximation """ Tcold = getTk(Tc=Tc, Tk=Tk) - dT / 2.0 Thot = Tcold + dT dp = self.vaporPressure(Tk=Thot) - self.vaporPressure(Tk=Tcold) return dp / dT def auxiliaryQuantitySpecificEnthalpy(self, Tk: float = None, Tc: float = None) -> float: """ Returns the auxiliary quantity for specific enthalpy. Parameters ---------- Tk: float temperature in Kelvin Tc: float temperature in Celsius Returns ------- alpha: float specific quantity for enthalpy in J/kg Notes ----- IAPWS-IF97 http://www.iapws.org/relguide/supsat.pdf IAPWS-IF97 is now the international standard for calculations in the steam power industry alpha is used in the relations for enthalpy h = alpha + T/pressure*dp/dT """ theta = self.theta(Tc=Tc, Tk=Tk) normalized_alpha = ( self.d["alpha"] + self.d[1] * theta**-19 + self.d[2] * theta + self.d[3] * theta**4.5 + self.d[4] * theta**5.0 + self.d[5] * theta**54.5 ) # past the supercritical point tau's raised to .5 cause complex #'s return normalized_alpha.real * self.ALPHA_0 def auxiliaryQuantitySpecificEntropy(self, Tk: float = None, Tc: float = None) -> float: """ Returns the auxiliary quantity for specific entropy. Parameters ---------- Tk: float temperature in Kelvin Tc: float temperature in Celsius Returns ------- phi: float specific quantity for entropy in J/(kgK) Notes ----- IAPWS-IF97 http://www.iapws.org/relguide/supsat.pdf IAPWS-IF97 is now the international standard for calculations in the steam power industry alpha is used in the relations for enthalpy s = phi + 1/pressure*dp/dT """ theta = self.theta(Tc=Tc, Tk=Tk) normalized_phi = ( self.d["phi"] + 19.0 / 20.0 * self.d[1] * theta**-20.0 + self.d[2] * math.log(theta) + 9.0 / 7.0 * self.d[3] * theta**3.5 + 5.0 / 4.0 * self.d[4] * theta**4.0 + 109.0 / 107.0 * self.d[5] * theta**53.5 ) # past the supercritical point tau's raised to .5 cause complex #'s return normalized_phi.real * self.PHI_0 def enthalpy(self, Tk: float = None, Tc: float = None) -> float: """ Returns enthalpy of saturated water. Parameters ---------- Tk: float temperature in Kelvin Tc: float temperature in Celsius Returns ------- enthalpy: float vapor pressure in J/kg Notes ----- IAPWS-IF97 http://www.iapws.org/relguide/supsat.pdf IAPWS-IF97 is now the international standard for calculations in the steam power industry """ alpha = self.auxiliaryQuantitySpecificEnthalpy(Tc=Tc, Tk=Tk) T = getTk(Tc=Tc, Tk=Tk) rho = self.pseudoDensityKgM3(Tc=Tc, Tk=Tk) dp_dT = self.vaporPressurePrime(Tc=Tc, Tk=Tk) return alpha + T / rho * dp_dT def entropy(self, Tk: float = None, Tc: float = None) -> float: """ Returns entropy of saturated water. Parameters ---------- Tk: float temperature in Kelvin Tc: float temperature in Celsius Returns ------- entropy: float entropy in J/(kgK) Notes ----- IAPWS-IF97 http://www.iapws.org/relguide/supsat.pdf IAPWS-IF97 is now the international standard for calculations in the steam power industry """ phi = self.auxiliaryQuantitySpecificEntropy(Tc=Tc, Tk=Tk) rho = self.pseudoDensityKgM3(Tc=Tc, Tk=Tk) dp_dT = self.vaporPressurePrime(Tc=Tc, Tk=Tk) return phi + 1.0 / rho * dp_dT def pseudoDensity(self, Tk=None, Tc=None): """ Density for arbitrary forms of water. Notes ----- In ARMI, we define pseudoDensity() and density() as the same for Fluids. """ raise NotImplementedError("Please use a concrete instance: SaturatedWater or SaturatedSteam.") class SaturatedWater(Water): """ Saturated Water. This is a good faith implementation of the Revised Supplementary Properties of Ordinary Water Substance (1992) by IAPWS -- International Association for the Properties of Water and Steam . This is the Saturated Liquid Water Material Class. For steam look to the Saturated Steam Material Class. """ def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float: """ Returns density in g/cc. Parameters ---------- Tk: float temperature in Kelvin Tc: float temperature in Celsius Returns ------- density: float density in g/cc Note ---- In ARMI, we define pseudoDensity() and density() as the same for Fluids. IAPWS-IF97 http://www.iapws.org/relguide/supsat.pdf IAPWS-IF97 is now the international standard for calculations in the steam power industry """ tau = self.tau(Tc=Tc, Tk=Tk) b1 = 1.99274064 b2 = 1.09965342 b3 = -0.510839303 b4 = -1.75493479 b5 = -45.5170352 b6 = -6.74694450e5 normalized_rho = ( 1 + b1 * tau ** (1.0 / 3.0) + b2 * tau ** (2.0 / 3.0) + b3 * tau ** (5.0 / 3.0) + b4 * tau ** (16.0 / 3.0) + b5 * tau ** (43.0 / 3.0) + b6 * tau ** (111.0 / 3.0) ) # past the supercritical point tau's raised to .5 cause complex #'s return normalized_rho.real * self.DENSITY_CRITICAL_GPERCUBICCENTIMETER class SaturatedSteam(Water): """ Saturated Steam. This is a good faith implementation of the Revised Supplementary Properties of Ordinary Water Substance (1992) by IAPWS -- International Association for the Properties of Water and Steam . This is the Saturated Liquid Water Material Class. For steam look to the Saturated Steam Material Class. """ def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float: """ Returns density in g/cc. Parameters ---------- Tk: float temperature in Kelvin Tc: float temperature in Celsius Returns ------- density: float density in g/cc Notes ----- In ARMI, we define pseudoDensity() and density() as the same for Fluids. IAPWS-IF97 http://www.iapws.org/relguide/supsat.pdf IAPWS-IF97 is now the international standard for calculations in the steam power industry """ tau = self.tau(Tc=Tc, Tk=Tk) c1 = -2.03150240 c2 = -2.68302940 c3 = -5.38626492 c4 = -17.2991605 c5 = -44.7586581 c6 = -63.9201063 log_normalized_rho = ( c1 * tau ** (2.0 / 6.0) + c2 * tau ** (4.0 / 6.0) + c3 * tau ** (8.0 / 6.0) + c4 * tau ** (18.0 / 6.0) + c5 * tau ** (37.0 / 6.0) + c6 * tau ** (71.0 / 6.0) ) # past the supercritical point tau's raised to .5 cause complex #'s return math.e**log_normalized_rho.real * self.DENSITY_CRITICAL_GPERCUBICCENTIMETER ================================================ FILE: armi/materials/yttriumOxide.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Yttrium Oxide. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import Material from armi.utils.units import getTk class Y2O3(Material): propertyValidTemperature = {"linear expansion percent": ((273.15, 1573.15), "K")} def __init__(self): Material.__init__(self) self.refDens = 5.03 def setDefaultMassFracs(self): self.setMassFrac("Y89", 0.7875) self.setMassFrac("O16", 0.2125) def linearExpansionPercent(self, Tk=None, Tc=None): """ Return the linear expansion percent for Yttrium Oxide (Yttria). Notes ----- From Table 5 of "Thermal Expansion and Phase Inversion of Rare-Earth Oxides. """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion percent", Tk) return 1.4922e-07 * Tk**2 + 6.2448e-04 * Tk - 1.8414e-01 ================================================ FILE: armi/materials/zincOxide.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Zinc Oxide. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from armi.materials.material import Material from armi.utils.units import getTk class ZnO(Material): propertyValidTemperature = {"linear expansion percent": ((10.12, 1491.28), "K")} def setDefaultMassFracs(self): self.setMassFrac("ZN", 0.8034) self.setMassFrac("O16", 0.1966) def density(self, Tk=None, Tc=None): return 5.61 def linearExpansionPercent(self, Tk=None, Tc=None): """ Return the linear expansion percent for Polycrystalline ZnO. Notes ----- Digitized from Figure 1.24 from Zinc Oxide: Fundamentals, Materials and Device Technology """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion percent", Tk) return -1.9183e-10 * Tk**3 + 6.5944e-07 * Tk**2 + 5.2992e-05 * Tk - 5.2631e-02 ================================================ FILE: armi/materials/zr.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Zirconium metal. The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data contained in this file should not be used in production simulations. """ from numpy import interp from armi.materials.material import Material from armi.utils.units import getTk class Zr(Material): """Metallic zirconium.""" propertyValidTemperature = { "density": ((293, 1800), "K"), "linear expansion": ((293, 1800), "K"), "linear expansion percent": ((293, 1800), "K"), "thermal conductivity": ((298, 2000), "K"), } references = { "density": "AAA Materials Handbook 45803", "thermal conductivity": "AAA Fuels handbook. ANL", "linear expansion": "Y.S. Touloukian, R.K. Kirby, R.E. Taylor and P.D. Desai, Thermal Expansion, " + "Thermophysical Properties of Matter, Vol. 12, IFI/Plenum, New York-Washington (1975)", "linear expansion percent": "Y.S. Touloukian, R.K. Kirby, R.E. Taylor and P.D. Desai, Thermal Expansion, " + "Thermophysical Properties of Matter, Vol. 12, IFI/Plenum, New York-Washington (1975)", } linearExpansionTableK = [ 293, 400, 500, 600, 700, 800, 900, 1000, 1100, 1136.99999, 1137, 1200, 1400, 1600, 1800, ] linearExpansionTable = [ 5.70e-6, 5.90e-6, 6.60e-6, 7.10e-6, 7.60e-6, 7.90e-6, 8.00e-6, 8.20e-6, 8.20e-6, 8.20e-6, 9.00e-6, 9.10e-6, 9.50e-6, 1.03e-5, 1.13e-5, ] refTempK = 298.15 def __init__(self): Material.__init__(self) self.refDens = self._computeReferenceDensity(Tk=self.refTempK) def setDefaultMassFracs(self): self.setMassFrac("ZR", 1.0) def _computeReferenceDensity(self, Tk=None, Tc=None): r"""AAA Materials Handbook 45803.""" Tk = getTk(Tc, Tk) self.checkPropertyTempRange("density", Tk) if Tk < 1135: return -3.29256e-8 * Tk**2 - 9.67145e-5 * Tk + 6.60176 else: return -2.61683e-8 * Tk**2 - 1.11331e-4 * Tk + 6.63616 def thermalConductivity(self, Tk=None, Tc=None): """ Thermal conductivity in W/mK. Reference: AAA Fuels handbook. ANL. """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("thermal conductivity", Tk) return 8.853 + (0.007082 * Tk) + (0.000002533 * Tk**2) + (2992.0 / Tk) def linearExpansion(self, Tk=None, Tc=None): r"""Linear expansion in m/mK. Reference: Y.S. Touloukian, R.K. Kirby, R.E. Taylor and P.D. Desai, Thermal Expansion, Thermophysical Properties of Matter, Vol. 12, IFI/Plenum, New York-Washington (1975) See page 400 """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion", Tk) return interp(Tk, self.linearExpansionTableK, self.linearExpansionTable) def linearExpansionPercent(self, Tk=None, Tc=None): r"""Linear expansion in dL/L. Reference: Y.S. Touloukian, R.K. Kirby, R.E. Taylor and P.D. Desai, Thermal Expansion, Thermophysical Properties of Matter, Vol. 12, IFI/Plenum, New York-Washington (1975) See page 400 """ Tk = getTk(Tc, Tk) self.checkPropertyTempRange("linear expansion percent", Tk) # NOTE: checkPropertyTempRange takes care of lower/upper limits if Tk < 1137: return -0.111 + (2.325e-4 * Tk) + (5.595e-7 * Tk**2) - (1.768e-10 * Tk**3) else: return -0.759 + (1.474e-3 * Tk) - (5.140e-7 * Tk**2) + (1.559e-10 * Tk**3) ================================================ FILE: armi/meta.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Metadata describing an ARMI distribution.""" try: # Python 3.x < 3.8 from importlib import metadata except ImportError: # Python >= 3.8 import importlib_metadata as metadata __version__ = metadata.version("armi") ================================================ FILE: armi/migration/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Migrate input/output from one version of ARMI to another. Users want to be able to upgrade to the latest version of the code without having to invest a bunch of time in updating their previous input and output files. Users have up to thousands of inputs that they want to keep working. Even more serious, follow-on analysts who got an output database (including associated inputs) from an ARMI power-user strongly prefer to be able to migrate old cases. Oftentimes, an output database can be many GB large and be the result of many CPU-weeks, so there's monetary and temporal value to be preserved. Meanwhile, developers want to be able to make upgrades to the input and/or output to fix bugs, ease the training and cognitive burden of new users, and so on. Migrations are key to getting both of these big needs. Migrations should generally happen in the background from the user's perspective, just like happens in mainstream applications like word processors and spreadsheets. """ from armi.migration import ( m0_1_3, m0_1_6, ) ACTIVE_MIGRATIONS = [ m0_1_3.RemoveCentersFromBlueprints, m0_1_3.UpdateElementalNuclides, m0_1_6.ConvertAlphanumLocationSettingsToNum, ] ================================================ FILE: armi/migration/base.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Base migration classes. A classic migration takes a file name, read the files, migrates the data, and re-writes the file. Some migrations need to happen live on a stream. For example, if an old/invalid input file is being read in from an old database. The migration class defined here chooses this behavior based on whether the ``stream`` or ``path`` variables are given in the constructor. """ import os import shutil from armi import runLog from armi.settings import caseSettings class Migration: """Generic migration. To implement a concrete Migration, one must often only implement the ``_applyToStream`` method. """ fromVersion = "x.x.x" toVersion = "x.x.x" def __init__(self, stream=None, path=None): if not (bool(stream) ^ bool(path)): # XOR raise RuntimeError("Stream and path inputs to migration aremutually exclusive. Choose one or the other.") self.stream = stream self.path = path def __repr__(self): return f"<Migration from {self.fromVersion}: {self.__doc__[:40]}..." def apply(self): """ Apply migration. This is generally called from a subclass. """ runLog.info(f"Applying {self}") if self.path: self._loadStreamFromPath() newStream = self._applyToStream() if self.path: self._backupOriginal() self._writeNewFile(newStream) return newStream def _loadStreamFromPath(self): """Common stream-loading code. Must be extended to actually load. The operative subclasses implementing this method are below. """ if not os.path.exists(self.path): raise ValueError(f"File {self.path} does not exist") def _applyToStream(self): """Add actual migration code here in a subclass.""" raise NotImplementedError() def _backupOriginal(self): # must be called after _loadStreamFromPath self.stream.close() shutil.move(self.path, self.path + "-migrated") def _writeNewFile(self, newStream): i = 0 while os.path.exists(self.path): # don't overwrite files (could be blueprints) name, ext = os.path.splitext(self.path) self.path = name + f"{i}" + ext i += 1 with open(self.path, "w") as f: f.write(newStream.read()) class BlueprintsMigration(Migration): """Migration for blueprints input.""" def _loadStreamFromPath(self): from armi.physics.neutronics.settings import CONF_LOADING_FILE Migration._loadStreamFromPath(self) cs = caseSettings.Settings(fName=self.path) self.path = cs[CONF_LOADING_FILE] self.stream = open(self.path) class SettingsMigration(Migration): """Migration for settings input.""" def _loadStreamFromPath(self): Migration._loadStreamFromPath(self) self.stream = open(self.path) class DatabaseMigration(Migration): """Migration for db output.""" pass ================================================ FILE: armi/migration/m0_1_3.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Cleans up blueprints.""" import io import re from armi import runLog from armi.migration.base import BlueprintsMigration class RemoveCentersFromBlueprints(BlueprintsMigration): """Removes now-invalid `centers:` lines from auto-generated component inputs.""" fromVersion = "0.1.2" toVersion = "0.1.3" def _applyToStream(self): runLog.info("Removing `centers:` sections.") migrated = [] for line in self.stream.read().split("\n"): if re.search(r"^\s*centers:\s*$", line): continue migrated.append(line) result = "\n".join(migrated) return io.StringIO(result) class UpdateElementalNuclides(BlueprintsMigration): """Update elemental nuclide flags.""" fromVersion = "0.1.2" toVersion = "0.1.3" swaps = ( ("NA23", "NA"), ("MN55", "MN"), ("HE4", "HE"), ("W182", "W"), ("O16", "O"), ("AL27", "AL"), ("N14", "N"), ) # these get absorbed into W deletions = ("W183", "W184", "W186") def _applyToStream(self): # Change both nuclide flags as well as custom isotopics # Custom isotopics: ` MN: 0.0015135` # Nuclide flags: ` MN55: {burn: false, xs: true}` migrated = [] for line in self.stream.read().split("\n"): for deletion in self.deletions: if re.search(r"^\s*{0}: ".format(deletion), line): continue for swapFrom, swapTo in self.swaps: line = re.sub( r"^(\s+)({0})(:.+)".format(swapFrom), r"\1{0}\3".format(swapTo), line, ) migrated.append(line) result = "\n".join(migrated) return io.StringIO(result) ================================================ FILE: armi/migration/m0_1_6.py ================================================ # Copyright 2021 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Migrate ARMI settings that have alphanumeric location labels to new numeric mode.""" import io import re from armi import runLog from armi.migration.base import SettingsMigration from armi.settings import caseSettings, settingsIO from armi.utils.units import ASCII_LETTER_A, ASCII_ZERO AXIAL_CHARS = [ chr(asciiCode) for asciiCode in ( list(range(ASCII_LETTER_A, ASCII_LETTER_A + 26)) + list(range(ASCII_ZERO, ASCII_ZERO + 10)) + list(range(ASCII_LETTER_A + 26, ASCII_LETTER_A + 32 + 26)) ) ] class ConvertAlphanumLocationSettingsToNum(SettingsMigration): """Convert old location label values to new style.""" fromVersion = "0.1.6" toVersion = "0.1.7" def _applyToStream(self): cs = caseSettings.Settings() reader = settingsIO.SettingsReader(cs) reader.readFromStream(self.stream) if reader.invalidSettings: runLog.info( "The following deprecated settings will be deleted:\n * {}".format( "\n * ".join(list(reader.invalidSettings)) ) ) cs = _modify_settings(cs) writer = settingsIO.SettingsWriter(cs) newStream = io.StringIO() writer.writeYaml(newStream) newStream.seek(0) return newStream def _modify_settings(cs): if cs["detailAssemLocationsBOL"]: newLocs = [] for loc in cs["detailAssemLocationsBOL"]: if "-" not in loc: # assume it is old style assem location. i, j, _k = getIndicesFromDIF3DStyleLocatorLabel(loc) newLoc = f"{i:03d}-{j:03d}" runLog.info(f"Converting old-style location label `{loc}` to `{newLoc}`, assuming hex geom") loc = newLoc newLocs.append(loc) cs = cs.modified(newSettings={"detailAssemLocationsBOL": newLocs}) return cs def getIndicesFromDIF3DStyleLocatorLabel(label): """Convert a ring-based label like A2003B into 1-based ring, location indices.""" locMatch = re.search(r"([A-Z]\d)(\d\d\d)([A-Z]?)", label) if locMatch: # we have a valid location label. Process it and set parameters # convert A4 to 04, B2 to 12, etc. ring = locMatch.group(1) posLabel = locMatch.group(2) axLabel = locMatch.group(3) firstDigit = ord(ring[0]) - ASCII_LETTER_A if firstDigit < 10: i = int("{0}{1}".format(firstDigit, ring[1])) else: raise RuntimeError("invalid label {0}. 1st character too large.".format(label)) j = int(posLabel) if axLabel: k = AXIAL_CHARS.index(axLabel) else: k = None return i, j, k raise RuntimeError("No Indices found for DIF3D-style label: {0}".format(label)) ================================================ FILE: armi/migration/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/migration/tests/test_m0_1_6.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test Locationlabel migration.""" import io import unittest from armi.migration.m0_1_6 import ConvertAlphanumLocationSettingsToNum from armi.settings import caseSettings from armi.settings.settingsIO import SettingsReader, SettingsWriter class TestMigration(unittest.TestCase): def test_locationLabelMigration(self): """Make a setting with an old value and make sure it migrates to expected new value.""" cs = caseSettings.Settings() newSettings = {"detailAssemLocationsBOL": ["B1012"]} cs = cs.modified(newSettings=newSettings) writer = SettingsWriter(cs) stream = io.StringIO() writer.writeYaml(stream) stream.seek(0) converter = ConvertAlphanumLocationSettingsToNum(stream=stream) newCs = caseSettings.Settings() reader = SettingsReader(newCs) reader.readFromStream(converter.apply()) self.assertEqual(newCs["detailAssemLocationsBOL"][0], "011-012") ================================================ FILE: armi/migration/tests/test_migration_base.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test base migration classes.""" import os import unittest from armi.migration.base import Migration, SettingsMigration from armi.tests import TEST_ROOT class TestMigrationBases(unittest.TestCase): def test_basic_validation(self): with self.assertRaises(RuntimeError): _m = Migration(None, None) with self.assertRaises(RuntimeError): _m = Migration("fake_stream", "fake_path") Migration("fake_stream", None) m = Migration(None, "fake_path") with self.assertRaises(ValueError): m._loadStreamFromPath() class TestSettingsMigration(unittest.TestCase): def test_loadStreamFromPath(self): file_path = os.path.join(TEST_ROOT, "armiRun.yaml") m = SettingsMigration(None, file_path) m._loadStreamFromPath() self.assertIsNotNone(m.stream) ================================================ FILE: armi/mpiActions.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module provides an abstract class to be used to implement "MPI actions.". MPI actions are tasks, activities, or work that can be executed on the worker nodes. The standard workflow is essentially that the primary node creates an :py:class:`~armi.mpiActions.MpiAction`, sends it to the workers, and then both the primary and the workers :py:meth:`invoke() <armi.mpiActions.MpiAction.invoke>` together. For example: .. list-table:: Sample MPI Action Workflow :widths: 5 60 35 :header-rows: 1 * - Step - Code - Notes * - 1 - **primary**: :py:class:`distributeState = DistributeStateAction() <armi.mpiActions.MpiAction>` **worker**: :code:`action = context.MPI_COMM.bcast(None, root=0)` - **primary**: Initializing a distribute state action. **worker**: Waiting for something to do, as determined by the primary, this happens within the worker's :py:meth:`~armi.operators.MpiOperator.workerOperate`. * - 2 - **primary**: :code:`context.MPI_COMM.bcast(distributeState, root=0)` **worker**: :code:`action = context.MPI_COMM.bcast(None, root=0)` - **primary**: Broadcasts a distribute state action to all the worker nodes **worker**: Receives the action from the primary, which is a :py:class:`~armi.mpiActions.DistributeStateAction`. * - 3 - **primary**: :code:`distributeState.invoke(self.o, self.r, self.cs)` **worker**: :code:`action.invoke(self.o, self.r, self.cs)` - Both invoke the action, and are in sync. Any broadcast or receive within the action should also be synced up. In order to create a new, custom MPI Action, inherit from :py:class:`~armi.mpiActions.MpiAction`, and override the :py:meth:`~armi.mpiActions.MpiAction.invokeHook` method. """ import collections import gc import math import pickle import timeit from armi import context, interfaces, runLog, settings, utils from armi.reactor import reactors from armi.reactor.parameters import parameterDefinitions from armi.utils import iterables, tabulate class MpiAction: """Base of all MPI actions. MPI Actions are tasks that can be executed without needing lots of other information. When a worker node sits in its main loop, and receives an MPI Action, it will simply call :py:meth:`~armi.mpiActions.MpiAction.invoke`. """ def __init__(self): self.o = None self.r = None self.cs = None self.serial = False # items can be set to exclusive if they will take considerably longer # they will be queued first, and the CPUs for this action will not # be used for any other purpose (except when number of exclusive actions > num CPU groups) self.runActionExclusive = False # lower number is higher; halfway between 1-10.. probably dont need more # than 10 priorities but negative nums work too... self.priority = 5 @property def parallel(self): return not self.serial @classmethod def invokeAsMaster(cls, o, r, cs): """Simplified method to call from the primary process. This can be used in place of: someInstance = MpiAction() someInstance = COMM_WORLD.bcast(someInstance, root=0) someInstance.invoke(o, r, cs) Interestingly, the code above can be used in two ways: 1. Both the primary and worker can call the above code at the same time, or 2. the primary can run the above code, which will be handled by the worker's main loop. Option number 2 is the most common usage. .. warning:: This method will not work if the constructor (i.e. :code:`__init__`) requires additional arguments. Since the method body is so simple, it is strong discouraged to add a :code:`*args` or :code:`**kwargs` arguments to this method. Parameters ---------- o : :py:class:`armi.operators.Operator` If an operator is not necessary, supply :code:`None`. r : :py:class:`armi.operators.Reactor` If a reactor is not necessary, supply :code:`None`. """ instance = cls() instance.broadcast() return instance.invoke(o, r, cs) def _mpiOperationHelper(self, obj, mpiFunction): """Strips off the operator, reactor, cs from the mpiAction before.""" if obj is None or obj is self: # prevent sending o, r, and cs, they should be handled appropriately by the other nodes # reattach with finally obj = self o, r, cs = self.o, self.r, self.cs self.o = self.r = self.cs = None try: return mpiFunction(obj, root=0) except pickle.PicklingError as error: runLog.error("Failed to {} {}.".format(mpiFunction.__name__, obj)) runLog.error(error) raise finally: if obj is self: self.o, self.r, self.cs = o, r, cs def broadcast(self, obj=None): """ A wrapper around ``bcast``, on the primary node can be run with an equals sign, so that it can be consistent within both primary and worker nodes. Parameters ---------- obj : This is any object that can be broadcast, if it is None, then it will broadcast itself, which triggers it to run on the workers (assuming the workers are in the worker main loop. See Also -------- armi.operators.operator.OperatorMPI.workerOperate : receives this on the workers and calls ``invoke`` Notes ----- The standard ``bcast`` method creates a new instance even for the root process. Consequently, when passing an object, references can be broken to the original object. Therefore, this method, returns the original object when called by the primary node, or the broadcasted object when called on the worker nodes. """ if self.serial: return obj if obj is not None else self if context.MPI_SIZE > 1: result = self._mpiOperationHelper(obj, context.MPI_COMM.bcast) # the following if-branch prevents the creation of duplicate objects on the primary node # if the object is large with lots of links, it is prudent to call gc.collect() if obj is None and context.MPI_RANK == 0: return self elif context.MPI_RANK == 0: return obj else: return result def gather(self, obj=None): """A wrapper around ``MPI_COMM.gather``. Parameters ---------- obj : This is any object that can be gathered, if it is None, then it will gather itself. Notes ----- The returned list will contain a reference to the original gathered object, without making a copy of it. """ if self.serial: return [obj if obj is not None else self] if context.MPI_SIZE > 1: result = self._mpiOperationHelper(obj, context.MPI_COMM.gather) if context.MPI_RANK == 0: # this cannot be result[0] = obj or self, because 0.0, 0, [] all eval to False if obj is None: result[0] = self else: result[0] = obj else: result = [] else: result = [obj if obj is not None else self] return result def invoke(self, o, r, cs): """ This method is called by worker nodes, and passed the worker node's operator, reactor and settings file. Parameters ---------- o : :py:class:`armi.operators.operator.Operator` the operator for this process r : :py:class:`armi.reactor.reactors.Reactor` the reactor represented in this process cs : :py:class:`armi.settings.caseSettings.Settings` the case settings Returns ------- result : object result from invokeHook """ self.o = o self.r = r self.cs = cs return self.invokeHook() @staticmethod def mpiFlatten(allCPUResults): """ Flatten results to the same order they were in before making a list of mpiIter results. See Also -------- mpiIter : used for distributing objects/tasks """ return iterables.flatten(allCPUResults) @staticmethod def mpiIter(objectsForAllCoresToIter): """ Generate the subset of objects one node is responsible for in MPI. Notes ----- Each CPU will get similar number of objects. E.G. if there are 12 objects and 5 CPUs, the first 2 CPUs will get 3 objects and the last 3 CPUS will get 2. Parameters ---------- objectsForAllCoresToIter: list List of all objects that need to have an MPI calculation performed on. Note, that since len() is needed this method cannot accept a generator. See Also -------- mpiFlatten : used for collecting results """ ntasks = len(objectsForAllCoresToIter) numLocalObjects, deficit = divmod(ntasks, context.MPI_SIZE) if deficit > context.MPI_RANK: numLocalObjects += 1 first = context.MPI_RANK * numLocalObjects else: first = context.MPI_RANK * numLocalObjects + deficit for objIndex in range(first, first + numLocalObjects): yield objectsForAllCoresToIter[objIndex] def invokeHook(self): """This method must be overridden in sub-clases. This method is called by worker nodes, and has access to the worker node's operator, reactor, and settings (through :code:`self.o`, :code:`self.r`, and :code:`self.cs`). It must return a boolean value of :code:`True` or :code:`False`, otherwise the worker node will raise an exception and terminate execution. Returns ------- result : object Dependent on implementation """ raise NotImplementedError() def runActions(o, r, cs, actions, numPerNode=None, serial=False): """Run a series of MpiActions in parallel, or in series if :code:`serial=True`. Notes ----- The number of actions DOES NOT need to match :code:`context.MPI_SIZE`. Calling this method may invoke MPI Split which will change the MPI_SIZE during the action. This allows someone to call MPI operations without being blocked by tasks which are not doing the same thing. """ if not context.MPI_DISTRIBUTABLE or serial: return runActionsInSerial(o, r, cs, actions) useForComputation = [True] * context.MPI_SIZE if numPerNode is not None: if numPerNode < 1: raise ValueError("numPerNode must be >= 1") numThisNode = {nodeName: 0 for nodeName in context.MPI_NODENAMES} for rank, nodeName in enumerate(context.MPI_NODENAMES): # if we have more processors than tasks, disable the extra useForComputation[rank] = numThisNode[nodeName] < numPerNode numThisNode[nodeName] += 1 queue, numBatches = _makeQueue(actions, useForComputation) runLog.extra(f"Running {len(actions)} MPI actions in parallel over {numBatches} batches") results = [] batchNum = 0 while queue: actionsThisRound = [] batchNum += 1 runLog.extra(f"MPI actions, batch {batchNum} of {numBatches}:\n") for useRank in useForComputation: actionsThisRound.append(queue.pop(0) if useRank and queue else None) distrib = distributeActions(actionsThisRound, useForComputation) distrib.broadcast() results.append(distrib.invoke(o, r, cs)) return results def runBatchedActions(o, r, cs, actionsByNode, serial=False): """Run a series of MpiActions in parallel, or in series if :code:`serial=True`. Notes ----- This method takes a set of actions that have been batched by the user beforehand. This is useful for heterogeneous work packages where some tasks have significantly larger or smaller memory requirements. The user can place an appropriate amount of work on each node. """ if not context.MPI_DISTRIBUTABLE or serial: actions = [] for _node, nodeActions in actionsByNode.items(): actions.extend(nodeActions) return runActionsInSerial(o, r, cs, actions) # count how many actions will run on each node nodes = set(context.MPI_NODENAMES) numToRunOnThisNode = {nodeName: 0 for nodeName in context.MPI_NODENAMES} for nodeName in nodes: numToRunOnThisNode[nodeName] = len(actionsByNode.get(nodeName, [])) # determine which ranks will run the actions numAssigned = {nodeName: 0 for nodeName in nodes} useForComputation = [True] * len(context.MPI_NODENAMES) for rank, nodeName in enumerate(context.MPI_NODENAMES): # if we have more processors than tasks, disable the extra useForComputation[rank] = numAssigned[nodeName] < numToRunOnThisNode[nodeName] if useForComputation[rank]: numAssigned[nodeName] += 1 # check that we do not request more tasks than processors on a node for nodeName in nodes: if numToRunOnThisNode[nodeName] > numAssigned[nodeName]: msg = ( f"There are more actions ({numToRunOnThisNode[nodeName]}) than ranks available " f"({numAssigned[nodeName]}) on {nodeName}!" ) runLog.error(msg) raise ValueError(msg) totalActions = sum(len(actions) for node, actions in actionsByNode.items()) runLog.extra(f"Running {totalActions} MPI actions in parallel over {len(actionsByNode)} nodes.") results = [] actionsThisRound = [] for rank, nodeName in enumerate(context.MPI_NODENAMES): queue = actionsByNode.get(nodeName, []) actionsThisRound.append(queue.pop(0) if useForComputation[rank] and queue else None) distrib = distributeActions(actionsThisRound, useForComputation) distrib.broadcast() results.append(distrib.invoke(o, r, cs)) return results def distributeActions(actionsThisRound, useForComputation): useForComputation = _disableForExclusiveTasks(actionsThisRound, useForComputation) realActions = [ (context.MPI_NODENAMES[rank], rank, act) for rank, act in enumerate(actionsThisRound) if act is not None ] tableText = tabulate.tabulate(realActions, headers=["Nodename", "Rank", "Action"]) runLog.extra(f"Distributing {len(realActions)} MPI actions for parallel processing:\n{tableText}") return DistributionAction(actionsThisRound) def _disableForExclusiveTasks(actionsThisRound, useForComputation): # disable processors that are exclusive for next indicesToDisable = [ i for i, action in enumerate(actionsThisRound) if action is not None and action.runActionExclusive ] for i in indicesToDisable: useForComputation[i] = False return useForComputation def _makeQueue(actions, useForComputation): """ Sort actions by priority in a queue, if more exclusive than CPUs makes all non-exclusive. Notes ----- All exclusive actions will occur first regardless of the priority. All non-exclusive actions will be after all exclusive actions regardless of the priority. Within these 2 bins, priority matters. In the event that more exclusive actions are requested than CPUs - 1, all actions will be changed to non-exclusive but previously evaluated order will remain. CPUs - 1 is to reserve at least 1 CPU for non-exclusive actions. """ def sortActionPriority(action): # exclusive actions first and those groups of CPUs only get 1 action exclusivePriority = 1 if action.runActionExclusive else 2 return (exclusivePriority, action.priority) queue = list(sorted(actions, key=sortActionPriority)) minCPUsForRemainingTasks = 1 nExclusiveCPUs = len([action for action in queue if action.runActionExclusive]) nCPUsAvailable = len([rank for rank in useForComputation if rank]) if nExclusiveCPUs + minCPUsForRemainingTasks > nCPUsAvailable: # there are more exclusive tasks than sets of CPUs, so just make them all # non-exclusive and evenly balance them for action in queue: action.runActionExclusive = False numBatches = int(math.ceil(len(actions) / float(nCPUsAvailable))) else: nLeftoverCPUs = nCPUsAvailable - nExclusiveCPUs nLeftoverActions = len(actions) - nExclusiveCPUs numBatches = int(math.ceil(nLeftoverActions / nLeftoverCPUs)) return queue, numBatches def runActionsInSerial(o, r, cs, actions): """Run a series of MpiActions in serial. Notes ----- This will set the `MpiAction.serial` attribute to :code:`True`, and the `MpiAction.broadcast` and `MpiAction.gather` methods will basically just return the value being supplied. """ results = [] runLog.extra("Running {} MPI actions in serial".format(len(actions))) numActions = len(actions) for aa, action in enumerate(actions): canDistribute = context.MPI_DISTRIBUTABLE action.serial = True context.MPI_DISTRIBUTABLE = False runLog.extra("Running action {} of {}: {}".format(aa + 1, numActions, action)) results.append(action.invoke(o, r, cs)) action.serial = False # return to original state context.MPI_DISTRIBUTABLE = canDistribute return results class DistributionAction(MpiAction): """ This MpiAction scatters the workload of multiple actions to available resources. Notes ----- This currently only works from the root (of COMM_WORLD). Eventually, it would be nice to make it possible for sub-tasks to manage their own communicators and spawn their own work within some sub-communicator. This performs an MPI Split operation and takes over the context.MPI_COMM and associated variables. For this reason, it is possible that when someone thinks they have distributed information to all nodes, it may only be a subset that was necessary to perform the number of actions needed by this DsitributionAction. """ def __init__(self, actions): MpiAction.__init__(self) self._actions = actions def __reduce__(self): """Reduce prevents from unnecessary actions to others, after all we only want to scatter. Consequently, the worker nodes _actions will be None. """ return DistributionAction, (None,) def invokeHook(self): """ Overrides invokeHook to distribute work amongst available resources as requested. Notes ----- Two things about this method make it non-recursive """ canDistribute = context.MPI_DISTRIBUTABLE mpiComm = context.MPI_COMM mpiRank = context.MPI_RANK mpiSize = context.MPI_SIZE mpiNodeNames = context.MPI_NODENAMES if self.cs["verbosity"] == "debug" and mpiRank == 0: runLog.debug("Printing diagnostics for MPI actions!") objectCountDict = collections.defaultdict(int) for debugAction in self._actions: utils.classesInHierarchy(debugAction, objectCountDict) for objekt, count in objectCountDict.items(): runLog.debug("There are {} {} in MPI action {}".format(count, objekt, debugAction)) actionResult = None try: action = mpiComm.scatter(self._actions, root=0) # create a new communicator that only has these specific processes running hasAction = action is not None context.MPI_COMM = mpiComm.Split(int(hasAction)) context.MPI_RANK = context.MPI_COMM.Get_rank() context.MPI_SIZE = context.MPI_COMM.Get_size() context.MPI_DISTRIBUTABLE = context.MPI_SIZE > 1 context.MPI_NODENAMES = context.MPI_COMM.allgather(context.MPI_NODENAME) if hasAction: actionResult = action.invoke(self.o, self.r, self.cs) finally: # restore the global variables context.MPI_DISTRIBUTABLE = canDistribute context.MPI_COMM = mpiComm context.MPI_RANK = mpiRank context.MPI_SIZE = mpiSize context.MPI_NODENAMES = mpiNodeNames return actionResult class MpiActionError(Exception): """Exception class raised when error conditions occur during an MpiAction.""" class DistributeStateAction(MpiAction): def __init__(self, skipInterfaces=False): MpiAction.__init__(self) self._skipInterfaces = skipInterfaces def invokeHook(self): """Sync up all nodes with the reactor, the cs, and the interfaces. Notes ----- This is run by all workers and the primary any time the code needs to sync all processors. """ if context.MPI_SIZE <= 1: runLog.extra("Not distributing state because there is only one processor") return # Detach phase: # The Reactor and the interfaces have links to the Operator, which contains Un-MPI-able objects # like the MPI Comm and the SQL database connections. runLog.info("Distributing State") start = timeit.default_timer() try: cs = self._distributeSettings() self._distributeReactor(cs) DistributeStateAction._distributeParamAssignments() if self._skipInterfaces: self.o.reattach(self.r, cs) else: self._distributeInterfaces() # Lastly, make sure the reactor knows it is up to date. The operator/interface # attachment may invalidate some of the cache, but since all the underlying data is the # same, ultimately all state should be (initially) the same. self.r._markSynchronized() except (pickle.PicklingError, TypeError) as error: runLog.error("Failed to transmit on distribute state root MPI bcast") runLog.error(error) # workers are still waiting for a reactor object if context.MPI_RANK == 0: context.MPI_COMM.bcast("quit") # try to get the workers to quit raise if context.MPI_RANK != 0: self.r.core.regenAssemblyLists() # check to make sure that everything has been properly reattached if self.r.core.getFirstBlock().core.r is not self.r: raise RuntimeError("Block.core.r is not self.r. Reattach the blocks!") beforeCollection = timeit.default_timer() # force collection; we've just created a bunch of objects that don't need to be used again. runLog.debug("Forcing garbage collection.") gc.collect() stop = timeit.default_timer() runLog.extra( "Distributed state in {}s, garbage collection took {}s".format( beforeCollection - start, stop - beforeCollection ) ) def _distributeSettings(self): if context.MPI_RANK == 0: runLog.debug("Sending the settings object") self.cs = cs = self.broadcast(self.o.cs) if isinstance(cs, settings.Settings): runLog.setVerbosity(cs["verbosity"] if context.MPI_RANK == 0 else cs["branchVerbosity"]) runLog.debug("Received settings object") else: raise RuntimeError("Failed to transmit settings, received: {}".format(cs)) if context.MPI_RANK != 0: self.o.cs = cs return cs def _distributeReactor(self, cs): runLog.debug("Sending the Reactor object") r = self.broadcast(self.r) if isinstance(r, reactors.Reactor): runLog.debug("Received reactor") else: raise RuntimeError("Failed to transmit reactor, received: {}".format(r)) if context.MPI_RANK == 0: # on the primary node this unfortunately created a __deepcopy__ of the reactor, delete it del r else: # maintain original reactor object on primary self.r = r self.o.r = r self.r.o = self.o runLog.debug(f"The reactor has {len(self.r.core)} assemblies") # attach here so any interface actions use a properly-setup reactor. self.o.reattach(self.r, cs) # sets r and cs @staticmethod def _distributeParamAssignments(): data = dict() if context.MPI_RANK == 0: data = { (pName, pdType.__name__): pDef.assigned for ( pName, pdType, ), pDef in parameterDefinitions.ALL_DEFINITIONS.items() } data = context.MPI_COMM.bcast(data, root=0) if context.MPI_RANK != 0: for (pName, pdType), pDef in parameterDefinitions.ALL_DEFINITIONS.items(): pDef.assigned = data[pName, pdType.__name__] def _distributeInterfaces(self): """ Distribute the interfaces to all MPI nodes. Interface copy description Since interfaces store information that can influence a calculation, it is important in branch searches to make sure that no information is carried forward from these runs on either the primary node or the workers. However, there are interfaces that cannot be distributed, making this a challenge. To solve this problem, any interface that cannot be distributed is simply re-initialized. If any information needs to be given to the worker nodes on a non-distributable interface, additional function definitions (and likely soul searching as to why needed distributable information is on a non-distributable interface) are required to pass the information around. See Also -------- armi.interfaces.Interface.preDistributeState : runs on primary before DS armi.interfaces.Interface.postDistributeState : runs on primary after DS armi.interfaces.Interface.interactDistributeState : runs on workers after DS """ if context.MPI_RANK == 0: # These run on the primary node. (Worker nodes run synchronized code below) toRestore = {} for i in self.o.getInterfaces(): if i.distributable() == interfaces.Interface.Distribute.DUPLICATE: runLog.debug("detaching interface {0}".format(i.name)) i.detachReactor() toRestore[i] = i.preDistributeState() # Verify that the interface stacks are identical. runLog.debug("Sending the interface names and flags") _dumIList = self.broadcast([(i.name, i.distributable()) for i in self.o.getInterfaces()]) # transmit interfaces for i in self.o.getInterfaces(): # avoid sending things that don't pickle, like the database. if i.distributable() == interfaces.Interface.Distribute.DUPLICATE: runLog.debug("Sending the interface {0}".format(i)) _idum = self.broadcast(i) # don't send the reactor or operator i.postDistributeState(toRestore[i]) i.attachReactor(self.o, self.r) else: # These run on the worker nodes. # verify identical interface stack # This list is (interfaceName, distributable) tuples) interfaceList = self.broadcast(None) for iName, distributable in interfaceList: iOld = self.o.getInterface(iName) if distributable == interfaces.Interface.Distribute.DUPLICATE: # expect a transmission of the interface as a whole. runLog.debug("Receiving new {0}".format(iName)) iNew = self.broadcast(None) runLog.debug("Received {0}".format(iNew)) if iNew == "quit": return self.o.removeInterface(iOld) self.o.addInterface(iNew) iNew.interactDistributeState() elif distributable == interfaces.Interface.Distribute.NEW: runLog.debug("Initializing new interface {0}".format(iName)) # make a fresh instance of the non-transmittable interface. self.o.removeInterface(iOld) iNew = iOld.__class__(self.r, self.cs) if not iNew: for i in self.o.getInterfaces(): runLog.warning(i) raise RuntimeError( "Non-distributable interface {0} exists on the primary MPI process " "but not on the workers. " "Cannot distribute state.".format(iName) ) self.o.addInterface(iNew) iNew.interactInit() iNew.interactBOL() else: runLog.debug("Skipping broadcast of interface {0}".format(iName)) if iOld: iOld.interactDistributeState() ================================================ FILE: armi/nucDirectory/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" The nucDirectory module contains tools to access nuclide information in the :py:mod:`~armi.nucDirectory.nuclideBases` module, and information for :py:mod:`~armi.nucDirectory.nuclide` module. #. :ref:`Element data <doc-elements>` - name, symbol, atomic number (Z). #. :ref:`Generic nuclide data <doc-nuclide-bases>` - this includes mass, atomic number, natural abundance and various names and labels that are used in ARMI for the nuclide. It also includes decay and transmutation modes. .. _doc-elements: Elements ======== :py:class:`Elements <armi.nucDirectory.elements.Element>` are simple objects containing minimal information about atomic elements. This information is loaded from a data file within ARMI; elements.dat. :py:class:`Elements <armi.nucDirectory.elements.Element>` are mainly used as a building block of the nuclide objects , as discussed below. If you need to grab an element there are three available dictionaries provided for rapid access.:: >>> r = Reactor("ExampleReactor", bp) >>> elements = r.nuclideBases.elements >>> uranium = elements.byZ[92] >>> uranium.name 'uranium' >>> uranium.z 92 Likewise, elements can be retrieved by their name or symbol.:: >>> ironFromZ = elements.byZ[26] >>> ironFromName = elements.byName['iron'] >>> ironFromSymbol = elements.bySymbol['FE'] >>> ironFromZ == ironFromName == ironFromSymbol True .. note:: The :py:attr:`~armi.nucDirectory.elements.Elements.byName` and :py:attr:`~armi.nucDirectory.elements.Elements.bySymbol` are case specific; names are *lower case* and symbols are *UPPER CASE*. The elements are truly the *same* :py:class:`~armi.nucDirectory.elements.Element` object. The :py:mod:`~armi.nucDirectory` makes efficient use of the memory being used by elements and will only ever contain ~118 :py:class:`Elements <armi.nucDirectory.elements.Element>`.:: >>> id(ironFromZ) == id(ironFromName) == id(ironFromSymbol) True .. _doc-nuclide-bases: Nuclide Bases ============= The :py:mod:`~armi.nucDirectory` allows ARMI to get information about various nuclides, like U235 or FE56. Often times you need to look up cross section or densities for nuclides, or you might need the atomic weight or the natural isotopic distribution. The :py:mod:`~armi.nucDirectory` is here to help. The fundamental object of nuclide management in ARMI is the :py:class:`~armi.nucDirectory.nuclideBases.INuclide` object. After construction, they contain basic information, such as Z, A, and atomic weight (if known). Similar to :py:class:`Elements <armi.nucDirectory.elements.Element>`, the information is loaded from a series of data files within ARMI. The data is originally from [NIST]_:: >>> r = Reactor("ExampleReactor", bp) >>> u235= r.nuclideBases.byName['U235'] >>> u235.z 92 >>> u235.weight 235.0439299 >>> u235.a 235 .. [NIST] http://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl Upon creating a Reactor, a fully fledged ``NuclideBases`` object will be created. In that object there will be a full Upon loading the :py:mod:`armi.nucDirectory` package, inside that will be a fully instantiated ``Elements`` object and a list called.:py:data:`nuclideBases.instances <armi.nucDirectory.nuclideBases.instances>`. The ``instances`` will be filled with nuclide base objects. Nuclide bases contain a lot of basic information about a nuclide, such as the atomic mass, atomic number (Z), the mass number (A), and the natural abundance. Nuclide names, labels, and IDs ------------------------------ Nuclides have names, labels and IDs. :py:attr:`INuclide.name <armi.nucDirectory.nuclideBases.INuclide.name>` The nuclide name is what *should* be used within ARMI and ARMI-based appliations. This is a human readable name such as, ``U235`` or ``FE``. The names contain **only** capital letters and numbers, made up from the corresponding element symbol and mass number (A). :py:attr:`INuclide.label <armi.nucDirectory.nuclideBases.INuclide.label>` The nuclide label is a unique 4 character name which identifies the nuclide from all others. The label is fixed to 4 characters to conform with the CCCC standard files, which traditionally only allow for a maximum of 6 character labels in legacy nuclear codes. Of the 6 allowable characters, 4 are reserved for the unique identifier of the nuclide and 2 characters are reserved for cross section labels (i.e., AA, AB, ZA, etc.). The cross section labels are based on the cross section group manager implementation within the framework. These labels are not necessarily human readable/interpretable, but are generally the nuclide symbol followed by the last two digits of the mass number (A), so the nuclide for U235 has the label ``U235``, but PU239 has the label ``PU39``. For reference, the data used to build the nuclide bases in ARMI comes from a file called ``nuclides.dat``. Indices - rapid access ---------------------- There are three main ways to retrieve a nuclide, which are provided depending on what information you have about a nuclide. For example, if you know a nuclide name, use ``NuclideBases.byName`` dictionary. There are also dictionaries available for retrieving by the label, ``NuclideBases.byLabel``, and by other software-specific IDs (i.e., MCNP, MC2-2, and MC2-3). The software-specific labels are incorporated into the framework to support plugin developments and may be extended as needed by end-users as needs arise. >>> r = Reactor("testReactor", bp) >>> pu239 = r.nuclideBases.byName["PU239"] >>> pu239.z 94 Just like with elements, the item retrieved from the various dictionaries are the same object. >>> tinFromName = r.nuclideBases.byName["SN112"] >>> tinFromLabel = r.nuclideBases.byLabel["SN112"] >>> tinFromMcc2Id = r.nuclideBases.byName["SN1125"] >>> tinFromMcc3Id = r.nuclideBases.byLabel["SN1127"] >>> tinFromName == tinFromLabel == tinFromMcc2Id == tinFromMcc3Id True >>> id(tinFromName) == id(tinFromLabel) == id(tinFromMcc2Id) == id(tinFromMcc3Id) True """ ================================================ FILE: armi/nucDirectory/elements.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module provides fundamental element information to be used throughout the framework and applications. .. impl:: A tool for querying basic data for elements of the periodic table. :id: I_ARMI_ND_ELEMENTS0 :implements: R_ARMI_ND_ELEMENTS The :py:mod:`elements <armi.nucDirectory.elements>` module defines the :py:class:`Element <armi.nucDirectory.elements.Element>` class which acts as a data structure for organizing information about an individual element, including number of protons, name, chemical symbol, phase (at STP), periodic table group, standard weight, and a list of isotope :py:class:`nuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` instances. The module includes a factory that generates the :py:class:`Element <armi.nucDirectory.elements.Element>` instances by reading from the ``elements.dat`` file stored in the ARMI resources folder. When an :py:class:`Element <armi.nucDirectory.elements.Element>` instance is initialized, it is added to a set of global dictionaries that are keyed by number of protons, element name, and element symbol. The module includes several helper functions for querying these global dictionaries. The element class structure is outlined :ref:`here <elements-class-diagram>`. .. _elements-class-diagram: .. pyreverse:: armi.nucDirectory.elements :align: center :width: 75% Examples -------- >>> elements.byZ[92] <Element U (Z=92), Uranium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID> >>> elements.bySymbol["U"] <Element U (Z=92), Uranium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID> >>> elements.byName["Uranium"] <Element U (Z=92), Uranium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID> Retrieve gaseous elements at Standard Temperature and Pressure (STP): >>> elements.getElementsByChemicalPhase(elements.ChemicalPhase.GAS) [<Element H (Z=1), Hydrogen, ChemicalGroup.NONMETAL, ChemicalPhase.GAS>, <Element HE (Z=2), Helium, ChemicalGroup.NOBLE_GAS, ChemicalPhase.GAS>, <Element N (Z=7), Nitrogen, ChemicalGroup.NONMETAL, ChemicalPhase.GAS>, <Element O (Z=8), Oxygen, ChemicalGroup.NONMETAL, ChemicalPhase.GAS>, <Element F (Z=9), Fluorine, ChemicalGroup.HALOGEN, ChemicalPhase.GAS>, <Element NE (Z=10), Neon, ChemicalGroup.NOBLE_GAS, ChemicalPhase.GAS>, <Element CL (Z=17), Chlorine, ChemicalGroup.HALOGEN, ChemicalPhase.GAS>, <Element AR (Z=18), Argon, ChemicalGroup.NOBLE_GAS, ChemicalPhase.GAS>, <Element KR (Z=36), Krypton, ChemicalGroup.NOBLE_GAS, ChemicalPhase.GAS>, <Element XE (Z=54), Xenon, ChemicalGroup.NOBLE_GAS, ChemicalPhase.GAS>, <Element RN (Z=86), Radon, ChemicalGroup.NOBLE_GAS, ChemicalPhase.GAS>, <Element OG (Z=118), Oganesson, ChemicalGroup.NOBLE_GAS, ChemicalPhase.GAS>] Retrieve elements that are classified as actinides: >>> elements.getElementsByChemicalGroup(elements.ChemicalGroup.ACTINIDE) [<Element AC (Z=89), Actinium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>, <Element TH (Z=90), Thorium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>, <Element PA (Z=91), Protactinium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>, <Element U (Z=92), Uranium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>, <Element NP (Z=93), Neptunium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>, <Element PU (Z=94), Plutonium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>, <Element AM (Z=95), Americium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>, <Element CM (Z=96), Curium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>, <Element BK (Z=97), Berkelium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>, <Element CF (Z=98), Californium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>, <Element ES (Z=99), Einsteinium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>, <Element FM (Z=100), Fermium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>, <Element MD (Z=101), Mendelevium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>, <Element NO (Z=102), Nobelium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>, <Element LR (Z=103), Lawrencium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>] .. only:: html For specific data on nuclides within each element, refer to the :ref:`nuclide bases summary table <nuclide-bases-table>`. .. exec:: from armi.nucDirectory.elements import Elements from armi.utils.tabulate import tabulate from dochelpers import createTable attributes = ['z', 'name', 'symbol', 'phase', 'group', 'is naturally occurring?', 'is heavy metal?', 'num. nuclides',] def getAttributes(element): return [ f'``{element.z}``', f'``{element.name}``', f'``{element.symbol}``', f'``{element.phase}``', f'``{element.group}``', f'``{element.isNaturallyOccurring()}``', f'``{element.isHeavyMetal()}``', f'``{len(element.nuclides)}``', ] elements = Elements() elements.factory() sortedElements = sorted(elements.byZ.values()) return createTable(tabulate(data=[getAttributes(elem) for elem in sortedElements], headers=attributes, tableFmt='rst'), caption='List of elements', label='nuclide-bases-table') Notes ----- Currently, this module contains a lot of data in the global scope. But ARMI is in the process of encapsulating this data, moving it out of the global scope, making it part of the reactor data model, and making it configurable via Settings. Pardon the mess during this transition. """ import os from enum import Enum from typing import List from armi import context from armi.utils.units import HEAVY_METAL_CUTOFF_Z elements = None byZ = None byName = None bySymbol = None class ChemicalPhase(Enum): GAS = 1 LIQUID = 2 SOLID = 3 UNKNOWN = 4 class ChemicalGroup(Enum): ALKALI_METAL = 1 ALKALINE_EARTH_METAL = 2 NONMETAL = 3 TRANSITION_METAL = 4 POST_TRANSITION_METAL = 5 METALLOID = 6 HALOGEN = 7 NOBLE_GAS = 8 LANTHANIDE = 9 ACTINIDE = 10 UNKNOWN = 11 class Element: """Represents an element defined on the Periodic Table.""" def __init__(self, z, symbol, name, phase="UNKNOWN", group="UNKNOWN"): """ Creates an instance of an Element. .. impl:: An element of the periodic table. :id: I_ARMI_ND_ELEMENTS1 :implements: R_ARMI_ND_ELEMENTS The :py:class:`Element <armi.nucDirectory.elements.Element>` class acts as a data structure for organizing information about an individual element, including number of protons, name, chemical symbol, phase (at STP), periodic table group, standard weight, and a list of isotope :py:class:`nuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` instances. The :py:class:`Element <armi.nucDirectory.elements.Element>` class has a few methods for appending additional isotopes, checking whether an isotope is naturally occurring, retrieving the natural isotopic abundance, or whether the element is a heavy metal. Parameters ---------- z : int atomic number, number of protons symbol : str element symbol name: str element name phase : str Chemical phase of the element at standard temperature and pressure (e.g., gas, liquid, solid). group : str Chemical group of the element. """ self.z = z self.symbol = symbol self.name = name self.phase = ChemicalPhase[phase] self.group = ChemicalGroup[group] self.standardWeight = None self.nuclides = [] def __repr__(self): return f"<Element {self.symbol:>3s} (Z={self.z}), {self.name}, {self.group}, {self.phase}>" def __hash__(self): return hash((self.name, self.z, self.symbol, self.phase, self.group, len(self.nuclides))) def __lt__(self, other): return self.z < other.z def __eq__(self, other): return hash(self) == hash(other) def __iter__(self): for nuc in sorted(self.nuclides): yield nuc def append(self, nuclide): """Assigns and sorts the nuclide to the element and ensures no duplicates.""" if nuclide in self.nuclides: return self.nuclides.append(nuclide) self.nuclides = sorted(self.nuclides) def isNaturallyOccurring(self): """Return True if the element is occurs in nature.""" return any([nuc.abundance > 0.0 for nuc in self.nuclides]) def getNaturalIsotopics(self): """ Return a list of nuclides that are naturally occurring for this element. Notes ----- This method will filter out any NaturalNuclideBases from the `nuclides` attribute. """ return [nuc for nuc in self.nuclides if nuc.abundance > 0.0 and nuc.a > 0] def isHeavyMetal(self): """ Return True if all nuclides belonging to the element are heavy metals. Notes ----- Heavy metal in this instance is not related to an exact weight or density cut-off, but rather is designated for nuclear fuel burn-up evaluations, where the initial heavy metal mass within a component should be tracked. It is typical to include any element/nuclide above Actinium. """ return self.z > HEAVY_METAL_CUTOFF_Z def getElementsByChemicalPhase(phase: ChemicalPhase) -> List[Element]: """Pass through to Elements.getElementsByChemicalPhase() for the global Elements object.""" global elements return elements.getElementsByChemicalPhase(phase) def getElementsByChemicalGroup(group: ChemicalGroup) -> List[Element]: """Pass through to Elements.getElementsByChemicalGroup() for the global Elements object.""" global elements return elements.getElementsByChemicalGroup(group) def getName(z: int = None, symbol: str = None) -> str: """Pass through to Elements.getName() for the global Elements object.""" global elements return elements.getName(z, symbol) def getSymbol(z: int = None, name: str = None) -> str: """Pass through to Elements.getSymbol() for the global Elements object.""" global elements return elements.getSymbol(z, name) def getElementZ(symbol: str = None, name: str = None) -> int: """Pass through to Elements.getElementZ() for the global Elements object.""" global elements return elements.getElementZ(symbol, name) def factory(elementsFile: str = None): """Pass through to Elements.factory() for the global Elements object.""" global elements global byZ global byName global bySymbol elements = Elements() elements.factory(elementsFile) byZ = elements.byZ byName = elements.byName bySymbol = elements.bySymbol def addGlobalElement(element: Element): """Pass through to Elements.addElement() for the global Elements object.""" global elements elements.addElement(element) def destroyGlobalElements(): """Pass through to Elements.clear() for the global Elements object.""" global elements elements.clear() class Elements: """ A container for all the atomics elements information in the simulation. By design, you would only expect to have one instance of this object in memory during a simulation. Attributes ---------- byZ: dict[int, Element] A dictionary to find Element objects by atomic number (integer Z). byName: dict[str, Element] A dictionary to find Element objects by unique string identifier ("C", "PU239", "U235", etc). bySymbol: dict[str, Element] A dictionary to find Element objects by atomic symbol ("C", "N", "PU", etc). elementsFile: str File path to the custom ARMI "elements.dat" file. """ DEFAULT_ELEMENTS_FILE = os.path.join(context.RES, "elements.dat") def __init__(self, elementsFile: str = None): self.byZ: dict[int, Element] = {} self.byName: dict[str, Element] = {} self.bySymbol: dict[str, Element] = {} self.elementsFile: str = elementsFile if elementsFile else self.DEFAULT_ELEMENTS_FILE def clear(self): """Empty all the data in this collection.""" self.byZ.clear() self.byName.clear() self.bySymbol.clear() def addElement(self, element: Element): """Add an element to this collection. Raises ------ ValueError If the element already exists in the collection. """ if element.z in self.byZ or element.name in self.byName or element.symbol in self.bySymbol: raise ValueError(f"{element} has already been added and cannot be duplicated.") self.byZ[element.z] = element self.byName[element.name] = element self.bySymbol[element.symbol] = element def factory(self, elementsFile: str = None): """Generate the :class:`Elements <Element>` instances.""" self.clear() # If an input file is provided, use it, otherwise there is a class default. if elementsFile: self.elementsFile = elementsFile with open(self.elementsFile, "r") as f: for line in f: # Skip header lines if line.startswith("#") or line.startswith("Z"): continue # read z, symbol, name, phase, and chemical group lineData = line.split() z = int(lineData[0]) sym = lineData[1].upper() name = lineData[2] phase = lineData[3] group = lineData[4] standardWeight = lineData[5] e = Element(z, sym, name, phase, group) if standardWeight != "Derived": e.standardWeight = float(standardWeight) self.addElement(e) def getElementsByChemicalPhase(self, phase: ChemicalPhase) -> List[Element]: """ Returns all elements that are of the given chemical phase. Parameters ---------- phase: ChemicalPhase This should be one of the valid options from the `ChemicalPhase` class. Returns ------- elems : List[Element] A list of elements that are associated with the given chemical phase. """ elems = [] if not isinstance(phase, ChemicalPhase): raise TypeError(f"{phase} is not an instance of {ChemicalPhase}") for element in self.byName.values(): if element.phase == phase: elems.append(element) return elems def getElementsByChemicalGroup(self, group: ChemicalGroup) -> List[Element]: """ Returns all elements that are of the given chemical group. Parameters ---------- group: ChemicalGroup This should be one of the valid options from the `ChemicalGroup` class. Returns ------- elems : List[Element] A list of elements that are associated with the given chemical group. """ elems = [] if not isinstance(group, ChemicalGroup): raise ValueError(f"{group} is not an instance of {ChemicalGroup}") for element in self.byName.values(): if element.group == group: elems.append(element) return elems def getName(self, z: int = None, symbol: str = None) -> str: r""" Returns element name. Parameters ---------- z : int Atomic number symbol : str Element abbreviation e.g. 'Zr' Examples -------- >>> elements.getName(10) 'Neon' >>> elements.getName(symbol="Ne") 'Neon' """ element = None if z: element = self.byZ[z] else: element = self.byName[symbol.upper()] return element.name def getSymbol(self, z: int = None, name: str = None) -> str: r""" Returns element abbreviation given atomic number Z. Parameters ---------- z : int Atomic number name : str Element name E.g. Zirconium Examples -------- >>> elements.getSymbol(10) 'Ne' >>> elements.getSymbol(name="Neon") 'Ne' """ element = None if z: element = self.byZ[z] else: element = self.byName[name.lower()] return element.symbol def getElementZ(self, symbol: str = None, name: str = None) -> int: """ Get element atomic number given a symbol or name. Parameters ---------- symbol : str Element symbol e.g. 'Zr' name : str Element name e.g. 'Zirconium' Examples -------- >>> elements.getZ("Zr") 40 >>> elements.getZ(name="Zirconium") 40 Notes ----- Element Z is stored in elementZBySymbol, indexed by upper-case element symbol. """ if not symbol and not name: return None element = None if symbol: element = self.bySymbol[symbol.upper()] else: element = self.byName[name.lower()] return element.z factory() ================================================ FILE: armi/nucDirectory/nucDir.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Some original nuclide directory code. Notes ----- This may be deprecated. Consider using the appropriate instance methods available through the :py:class:`armi.nucDirectory.nuclideBases.INuclide` objects and/or the :py:mod:`armi.nucDirectory.nuclideBases` module. """ import re from armi.nucDirectory import elements, nuclideBases nuclidePattern = re.compile(r"([A-Za-z]+)-?(\d{0,3})(\d*)(\S*)") zaPat = re.compile(r"([A-Za-z]+)-?([0-9]+)") # Partially from table 2.2 in Was # See also: Table 2.4 in Primary Radiation Damage in Materials # https://www.oecd-nea.org/science/docs/2015/nsc-doc2015-9.pdf eDisplacement = { "H": 10.0, "C": 31.0, "N": 30.0, "NA": 25.0, "SI": 25.0, "V": 40.0, "CR": 40.0, "MN": 40.0, "NI": 40.0, "MO": 60.0, "FE": 40.0, "W": 90.0, "TI": 30.0, "NB": 60.0, "ZR": 40.0, "CU": 30.0, "CO": 40.0, "AL": 25.0, "PB": 25.0, "TA": 90.0, } def getNuclideFromName(name): actualName = name if "-" in name: actualName = name.replace("-", "") if "_" in name: actualName = name.replace("_", "") return nuclideBases.byName[actualName] def getNaturalIsotopics(elementSymbol=None, z=None): """ Determines the atom fractions of all natural isotopes. Parameters ---------- elementSymbol : str, optional The element symbol, e.g. Zr, U z : int, optional The atomic number of the element Returns ------- abundances : list A list of (A,fraction) tuples where A is the mass number of the isotopes """ element = None if z: element = elements.byZ[z] else: element = elements.bySymbol[elementSymbol] return [(nn.a, nn.abundance) for nn in element.getNaturalIsotopics()] def getNaturalMassIsotopics(elementSymbol=None, z=None): """Return mass fractions of all natural isotopes. To convert number fractions to mass fractions, we multiply by A. """ numIso = getNaturalIsotopics(elementSymbol, z) terms = [] for a, frac in numIso: terms.append(a * frac) s = sum(terms) massIso = [] for i, (a, frac) in enumerate(numIso): massIso.append((a, terms[i] / s)) return massIso def getMc2Label(name): """ Return a MC2 prefix label without a xstype suffix. MC**2 has labels and library names. The labels are like U235IA, ZIRCFB, etc. and the library names are references to specific data sets on the MC**2 libraries (e.g. U-2355, etc.) This method returns the labels without the xstype suffixes (IA, FB). Rather than maintaining a lookup table, this simply converts the ARMI nuclide names to MC**2 names. Parameters ---------- name : str ARMI nuclide name of the nuclide Returns ------- mc2LibLabel : str The MC**2 prefix for this nuclide. Examples -------- >>> nucDir.getMc2Label("U235") 'U235' >> nucDir.getMc2Label('FE') 'FE' >>> nucDir.getMc2Label("IRON") 'FE' >>> nucDir.getMc2Label("AM242") A242 """ # First translate to the proper nuclide. CARB->C nuc = getNuclide(name) return nuc.label def getElementName(z=None, symbol=None): """ Returns element name. Parameters ---------- z : int Atomic number symbol : str Element abbreviation e.g. 'Zr' Examples -------- >>> nucDir.getElementName(10) 'Neon' >>> nucDir.getElementName(symbol="Zr") 'Neon' """ element = None if z: element = elements.byZ[z] else: element = elements.byName[symbol.upper()] return element.name def getElementSymbol(z=None, name=None): """ Returns element abbreviation given atomic number Z. Parameters ---------- z : int Atomic number name : str Element name E.g. Zirconium Examples -------- >>> nucDir.getElementSymbol(10) 'Ne' >>> nucDir.getElementSymbol(name="Neon") 'Ne' """ element = None if z: element = elements.byZ[z] else: element = elements.byName[name.lower()] return element.symbol def getNuclide(nucName): """ Looks up the ARMI nuclide object that has this name. Parameters ---------- nucName : str A nuclide name like U-235 or AM241, AM242M, AM242M Returns ------- nuc : Nuclide An armi nuclide object. """ nuc = nuclideBases.byName.get(nucName, None) if nucName and not nuc: nuc = getNuclideFromName(nucName) if not nuc: raise KeyError(f"Nuclide name {nucName} is invalid.") return nuc def getNuclides(nucName=None, elementSymbol=None): """ Returns a list of nuclide names in a particular nuclide or element. If no arguments, returns all nuclideBases in the directory Used to convert things to DB name, to adjustNuclides, etc. Parameters ---------- nucName : str ARMI nuclide label elementSymbol : str Element symbol e.g. 'Zr' """ if nucName: # just spit back the nuclide if it's in here. Useful when iterating over the result. nucList = [getNuclide(nucName)] elif elementSymbol: nucList = elements.bySymbol[elementSymbol].nuclides else: # all nuclideBases, including shortcut nuclideBases ('CARB') nucList = [nuc for nuc in nuclideBases.instances if nuc.getMcc2Id() is not None] return nucList def getNuclideNames(nucName=None, elementSymbol=None): """ Returns a list of nuclide names in a particular nuclide or element. If no arguments, returns all nuclideBases in the directory. .. warning:: You will get both isotopes and NaturalNuclideBases for each element. Parameters ---------- nucName : str ARMI nuclide label elementSymbol : str Element symbol e.g. 'Zr' """ nucList = getNuclides(nucName, elementSymbol) return [nn.name for nn in nucList] def getAtomicWeight(lab=None, z=None, a=None): """ Returns atomic weight in g/mole. Parameters ---------- lab : str, optional nuclide label, like U235 z : int, optional atomic number a : int, optional mass number Returns ------- aMass : float Atomic weight in grams /mole from NIST, or just mass number if not in library (U239 gives 239) Examples -------- >>> from armi.nucDirectory import nucDir >>> nucDir.getAtomicWeight("U235") 235.0439299 >>> nucDir.getAtomicWeight("U239") 239 >>> nucDir.getAtomicWeight("U238") 238.0507882 >>> nucDir.getAtomicWeight(z=94, a=239) 239.0521634 """ if lab: nuclide = None if lab in nuclideBases.byLabel: nuclide = nuclideBases.byLabel[lab] elif lab in nuclideBases.byMcc3Id: nuclide = nuclideBases.byMcc3Id[lab] else: nuclide = getNuclideFromName(lab) return nuclide.weight elif z == 0 and a == 0: return 0.0 if a == 0 and z: element = elements.byZ[z] return element.standardWeight else: nuclide = nuclideBases.single(lambda nn: nn.a == a and nn.z == z) return nuclide.weight def isHeavyMetal(name): try: return getNuclide(name).isHeavyMetal() except AttributeError: raise AttributeError("The nuclide {0} is not found in the nuclide directory".format(name)) def isFissile(name): try: return getNuclide(name).isFissile() except AttributeError: raise AttributeError("The nuclide {0} is not found in the nuclide directory".format(name)) def getThresholdDisplacementEnergy(nuc): """ Return the Lindhard cutoff; the energy required to displace an atom. From SPECTER.pdf Table II Greenwood, "SPECTER: Neutron Damage Calculations for Materials Irradiations", ANL.FPP/TM-197, Argonne National Lab., (1985). Parameters ---------- nuc : str nuclide name Returns ------- Ed : float The cutoff energy in eV """ nuc = getNuclide(nuc) el = elements.byZ[nuc.z] try: ed = eDisplacement[el.symbol] except KeyError: print( "The element {0} of nuclide {1} does not have a displacement energy in the library. Please add one.".format( el, nuc ) ) raise return ed ================================================ FILE: armi/nucDirectory/nuclideBases.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" This module provides access to fundamental nuclide information to be used throughout the framework and applications. .. impl:: Isotopes and isomers can be queried by name, label, MC2-3 ID, MCNP ID, and AAAZZZS ID. :id: I_ARMI_ND_ISOTOPES0 :implements: R_ARMI_ND_ISOTOPES The :py:mod:`nuclideBases <armi.nucDirectory.nuclideBases>` module defines the :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` class which is used to organize and store metadata about each nuclide. The metadata is read from a provided ``nuclides.dat`` file, which contains metadata for thousands of isotopes. The module also contains classes for special types of nuclides, including :py:class:`DummyNuclideBase <armi.nucDirectory.nuclideBases.DummyNuclideBase>` for dummy nuclides, :py:class:`LumpNuclideBase <armi.nucDirectory.nuclideBases.LumpNuclideBase>`, for lumped fission product nuclides, and :py:class:`NaturalNuclideBase <armi.nucDirectory.nuclideBases.NaturalNuclideBase>` for when data is given collectively for an element at natural abundance rather than for individual isotopes. The :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` provides a data structure for information about a single nuclide, including the atom number, atomic weight, element, isomeric state, half-life, and name. The :py:mod:`nuclideBases <armi.nucDirectory.nuclideBases>` module provides a factory and associated functions for instantiating the :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` objects. It is expected that during a simulation, the ``Reactor`` will contain an instance of ``NuclideBases`` to handle building the nuclide data dictionaries, including: * ``elements`` (collection of Element objects) * ``instances`` (list of INuclide objects) * ``byName`` (keyed by name, e.g., ``U235``) * ``byDBName`` (keyed by database name, e.g., ``nU235``) * ``byLabel`` (keyed by label, e.g., ``U235``) * ``byMcc2Id`` (keyed by MC\ :sup:`2`-2 ID, e.g., ``U-2355``) * ``byMcc3Id`` (keyed by MC\ :sup:`2`-3 ID, e.g., ``U235_7``) * ``byMcc3IdEndfbVII0`` (keyed by MC\ :sup:`2`-3 ID, e.g., ``U235_7``) * ``byMcc3IdEndfbVII1`` (keyed by MC\ :sup:`2`-3 ID, e.g., ``U235_7``) * ``byMcnpId`` (keyed by MCNP ID, e.g., ``92235``) * ``byAAAZZZSId`` (keyed by AAAZZZS, e.g., ``2350920``) The nuclide class structure is outlined :ref:`here <nuclide-bases-class-diagram>`. .. _nuclide-bases-class-diagram: .. pyreverse:: armi.nucDirectory.nuclideBases :align: center :width: 75% Class inheritance diagram for :py:class:`INuclide`. Examples -------- >>> r = Reactor("ExampleReactor", bp) >>> r.nuclideBases.byName["U235"] <NuclideBase U235: Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03> >>> r.nuclideBases.byLabel["U235"] <NuclideBase U235: Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03> Retrieve U-235 by the MC2-2 ID: >>> r.nuclideBases.byMcc2Id["U-2355"] <NuclideBase U235: Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03> Retrieve U-235 by the MC2-3 ID: >>> r.nuclideBases.byMcc3IdEndfVII0["U235_7"] <NuclideBase U235: Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03> Retrieve U-235 by the MCNP ID: >>> r.nuclideBases.byMcnpId["92235"] <NuclideBase U235: Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03> Retrieve U-235 by the AAAZZZS ID: >>> r.nuclideBases.byAAAZZZSId["2350920"] <NuclideBase U235: Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03> Notes ----- Currently, this module contains a lot of data in the global scope. But ARMI is in the process of encapsulating this data, moving it out of the global scope, making it part of the reactor data model, and making it configurable via Settings. Pardon the mess during this transition. """ import os import numpy as np from ruamel.yaml import YAML from armi import context, runLog from armi.nucDirectory import elements, transmutations from armi.utils.units import HEAVY_METAL_CUTOFF_Z # Global nuclide and nuclideBases data nuclideBases = None instances = [] burnChainImposed = False byName = None byDBName = None byLabel = None byMcc2Id = None byMcc3Id = None # for backwards compatibility. Identical to byMcc3IdEndfbVII1 byMcc3IdEndfbVII0 = None byMcc3IdEndfbVII1 = None byMcnpId = None byAAAZZZSId = None # lookup table from https://t2.lanl.gov/nis/data/endf/endfvii-n.html BASE_ENDFB7_MAT_NUM = { "PM": 139, "RA": 223, "AC": 225, "TH": 227, "PA": 229, "NP": 230, "PU": 235, "AM": 235, "CM": 240, "BK": 240, "CF": 240, "TC": 99, } class NuclideInterface: """An abstract nuclide implementation which defining various methods required for a nuclide object.""" def getDatabaseName(self): """Return the the nuclide label for the ARMI database (i.e. "nPu239").""" raise NotImplementedError def getDecay(self, decayType): """ Return a :py:class:`~armi.nucDirectory.transmutations.DecayMode` object. Parameters ---------- decType: str Name of decay mode, e.g. 'sf', 'alpha' Returns ------- decay : :py:class:`DecayModes <armi.nucDirectory.transmutations.DecayMode>` """ raise NotImplementedError def getMcc2Id(self): """Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library.""" raise NotImplementedError def getMcc3Id(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.""" raise NotImplementedError def getMcc3IdEndfbVII0(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library.""" raise NotImplementedError def getMcc3IdEndfbVII1(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.""" raise NotImplementedError def getSerpentId(self): """Get the Serpent nuclide identification label.""" raise NotImplementedError def getNaturalIsotopics(self): """Return the natural isotopics root :py:class:`~elements.Element`.""" raise NotImplementedError def isFissile(self): """Return boolean value indicating whether this nuclide is fissile.""" raise NotImplementedError def isHeavyMetal(self): """Return boolean value indicating whether this nuclide is a heavy metal.""" raise NotImplementedError class NuclideWrapper(NuclideInterface): """A nuclide wrapper class, used as a base class for nuclear data file nuclides.""" def __init__(self, container, key): self._base = None self.container = container self.containerKey = key self.nucLabel = key[:-2] def __repr__(self): return f"<{self.__class__.__name__} {self.containerKey}>" def __format__(self, format_spec): return format_spec.format(repr(self)) @property def name(self): """ Return the underlying nuclide's name (i.e. "PU239"). Notes ----- The nuclide name consists of the capitalized 2 character element symbol and atomic mass number. """ return self._base.name @property def weight(self): """Get the underlying nuclide's weight.""" return self._base.weight def getDatabaseName(self): """Get the database name of the underlying nuclide (i.e. "nPu239").""" return self._base.getDatabaseName() def getDecay(self, decayType): """ Return a :py:class:`~armi.nucDirectory.transmutations.DecayMode` object. Parameters ---------- decType: str Name of decay mode, e.g. 'sf', 'alpha' Returns ------- decay : :py:class:`DecayModes <armi.nucDirectory.transmutations.DecayMode>` """ return self._base.getDecay(decayType) def getMcc2Id(self): """Return the MC2-2 nuclide based on the ENDF/B-V.2 cross section library.""" return self._base.getMcc2Id() def getMcc3Id(self): """Return the MC2-3 nuclide based on the ENDF/B-VII.1 cross section library.""" return self.getMcc3IdEndfbVII1() def getMcc3IdEndfbVII0(self): """Return the MC2-3 nuclide based on the ENDF/B-VII.0 cross section library.""" return self._base.getMcc3IdEndfbVII0() def getMcc3IdEndfbVII1(self): """Return the MC2-3 nuclide based on the ENDF/B-VII.1 cross section library.""" return self._base.getMcc3IdEndfbVII1() def getNaturalIsotopics(self): """Return the natural isotopics root :py:class:`~elements.Element`.""" return self._base.getNaturalIsotopics() def isFissile(self): """Return boolean indicating whether or not the underlying nuclide is fissle.""" return self._base.isFissile() def isHeavyMetal(self): """Return boolean indicating whether or not the underlying nuclide is a heavy metal.""" return self._base.isHeavyMetal() class INuclide(NuclideInterface): """ Nuclide interface, the base of all nuclide objects. Attributes ---------- z : int Number of protons. a : int Number of nucleons. state : int Indicates excitement, 1 is more excited than 0. abundance : float Isotopic fraction of a naturally occurring nuclide. The sum of all nuclide abundances for a naturally occurring element should be 1.0. This is atom fraction, not mass fraction. name : str ARMI's unique name for the given nuclide. label : str ARMI's unique 4 character label for the nuclide. These are not human readable, but do not lose any information. The label is effectively the :py:attr:`Element.symbol <armi.nucDirectory.elements.Element.symbol>` padded to two characters, plus the mass number (A) in base-26 (0-9, A-Z). Additional support for meta-states is provided by adding 100 * the state to the mass number (A). nuSF : float Neutrons released per spontaneous fission. This should probably be moved at some point. """ fissile = ["U235", "PU239", "PU241", "AM242M", "CM244", "U233"] TRANSMUTATION = "transmutation" DECAY = "decay" SPONTANEOUS_FISSION = "nuSF" def __init__( self, element, a, state, weight, abundance, halflife, name, label, mcc2id=None, mcc3idEndfbVII0=None, mcc3idEndfbVII1=None, ): """ Create an instance of an INuclide. Warning ------- Do not call this constructor directly; use the factory instead. """ if state < 0: raise ValueError( f"Error in initializing nuclide {name}. An invalid state {state} is provided. The state must be a " "positive integer." ) if halflife < 0.0: raise ValueError(f"Error in initializing nuclide {name}. The halflife must be a positive value.") self.element = element self.z = element.z self.a = a self.state = state self.decays = [] self.trans = [] self.weight = weight self.abundance = abundance self.halflife = halflife self.name = name self.label = label self.nuSF = 0.0 self.mcc2id = mcc2id or "" self.mcc3idEndfbVII0 = mcc3idEndfbVII0 or "" self.mcc3idEndfbVII1 = mcc3idEndfbVII1 or "" self.element.append(self) def __hash__(self): return hash((self.a, self.z, self.state)) def __reduce__(self): return fromName, (self.name,) def __lt__(self, other): return (self.z, self.a, self.state) < (other.z, other.a, other.state) def __eq__(self, other): return hash(self) == hash(other) def _processBurnData(self, burnInfo): """ Process YAML burn transmutation, decay, and spontaneous fission data for this nuclide. This clears out any existing transmutation/decay information before processing. Parameters ---------- burnInfo: list List of dictionaries containing burn information for the current nuclide """ self.decays = [] self.trans = [] for nuclideBurnCategory in burnInfo: # Check that the burn category has only one defined burn type if len(nuclideBurnCategory) > 1: raise ValueError( f"Improperly defined ``burn-chain`` of {self}. {nuclideBurnCategory.keys()} should be a single " "burn type." ) nuclideBurnType = list(nuclideBurnCategory.keys())[0] if nuclideBurnType == self.TRANSMUTATION: self.trans.append(transmutations.Transmutation(self, nuclideBurnCategory[nuclideBurnType])) elif nuclideBurnType == self.DECAY: self.decays.append(transmutations.DecayMode(self, nuclideBurnCategory[nuclideBurnType])) elif nuclideBurnType == self.SPONTANEOUS_FISSION: userSpontaneousFissionYield = nuclideBurnCategory.get(nuclideBurnType, None) # Check for user-defined value of nuSF within the burn-chain data. If this is updated then prefer the # user change and then note this to the user. Otherwise, maintain the default loaded from the nuclide # bases. if userSpontaneousFissionYield: if userSpontaneousFissionYield != self.nuSF: runLog.info( f"nuSF provided for {self} will be updated from {self.nuSF:<8.6e} to " f"{userSpontaneousFissionYield:<8.6e} based on user provided burn-chain data." ) self.nuSF = userSpontaneousFissionYield else: raise Exception( f"Undefined Burn Data {nuclideBurnType} for {self}. Expected {self.TRANSMUTATION}, {self.DECAY}, " f"or {self.SPONTANEOUS_FISSION}." ) def getDecay(self, decayType): """Get a :py:class:`~armi.nucDirectory.transmutations.DecayMode`. Retrieve the first :py:class:`~armi.nucDirectory.transmutations.DecayMode` matching the specified decType. Parameters ---------- decType: str Name of decay mode e.g. 'sf', 'alpha' Returns ------- decay : :py:class:`DecayModes <armi.nucDirectory.transmutations.DecayMode>` """ for d in self.decays: if d.type == decayType: return d return None def isFissile(self): """Determine if the nuclide is fissile. Returns ------- answer: bool True if the :py:class:`INuclide` is fissile, otherwise False. """ return self.name in self.fissile def getNaturalIsotopics(self): r"""Gets the naturally occurring nuclides for this nuclide. Abstract method, see concrete types for implementation. Returns ------- nuclides: list List of :py:class:`INuclides <INuclide>` See Also -------- :meth:`NuclideBase.getNaturalIsotopics` :meth:`NaturalNuclideBase.getNaturalIsotopics` :meth:`LumpNuclideBase.getNaturalIsotopics` :meth:`DummyNuclideBase.getNaturalIsotopics` """ raise NotImplementedError def getDatabaseName(self): """Get the name of the nuclide used in the database (i.e. "nPu239").""" return f"n{self.name.capitalize()}" def isHeavyMetal(self): return self.z > HEAVY_METAL_CUTOFF_Z class IMcnpNuclide: """Abstract class for retrieving nuclide identifiers for the MCNP software.""" def getMcnpId(self): """Return a string that represents a nuclide label for a material card in MCNP.""" raise NotImplementedError def getAAAZZZSId(self): """Return a string that is ordered by the mass number, A, the atomic number, Z, and the isomeric state, S.""" raise NotImplementedError class NuclideBase(INuclide, IMcnpNuclide): r"""Represents an individual nuclide/isotope. .. impl:: Isotopes and isomers can be queried by name and label. :id: I_ARMI_ND_ISOTOPES1 :implements: R_ARMI_ND_ISOTOPES The :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` class provides a data structure for information about a single nuclide, including the atom number, atomic weight, element, isomeric state, half-life, and name. The class contains static methods for creating an internal ARMI name or label for a nuclide. There are instance methods for generating the nuclide ID for external codes, e.g. MCNP or Serpent, and retrieving the nuclide ID for MC\ :sup:`2`-2 or MC\ :sup:`2`-3. There are also instance methods for generating an AAAZZZS ID and an ENDF MAT number. """ def __init__(self, element, a, weight, abundance, state, halflife): IMcnpNuclide.__init__(self) INuclide.__init__( self, element=element, a=a, state=state, weight=weight, abundance=abundance, halflife=halflife, name=NuclideBase._createName(element, a, state), label=NuclideBase._createLabel(element, a, state), ) def __repr__(self): return ( f"<{self.__class__.__name__} {self.name}: Z:{self.z}, A:{self.a}, S:{self.state}, " + f"W:{self.weight:<12.6e}, Label:{self.label}>, HL:{self.halflife:<15.11e}, " + f"Abund:{self.abundance:<8.6e}>" ) @staticmethod def _createName(element, a, state): metaChar = ["", "M", "M2", "M3"] if state > len(metaChar): raise ValueError(f"The state of NuclideBase is not valid and must not be larger than {len(metaChar)}.") return f"{element.symbol}{a}{metaChar[state]}" @staticmethod def _createLabel(element, a, state): """ Make label for nuclide base. The logic causes labels for things with A<10 to be zero padded like H03 or tritium instead of H3. This avoids the metastable tritium collision which would look like elemental HE. It also allows things like MO100 to be held within 4 characters, which is a constraint of the ISOTXS format if we append 2 characters for XS type. """ # len(e.symbol) is 1 or 2 => a % (either 1000 or 100) # => gives exact a, or last two digits. # the division by 10 removes the last digit. firstTwoDigits = (a % (10 ** (4 - len(element.symbol)))) // 10 # the last digit is either 0-9 if state=0, or A-J if state=1, or K-T if state=2, or U-d if state=3 lastDigit = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcd"[(a % 10) + state * 10] return f"{element.symbol}{firstTwoDigits}{lastDigit}" def getNaturalIsotopics(self): """Gets the natural isotopics root :py:class:`~elements.Element`. Gets the naturally occurring nuclides for this nuclide. Returns ------- nuclides: list List of :py:class:`INuclides <INuclide>` See Also -------- :meth:`INuclide.getNaturalIsotopics` """ return self.element.getNaturalIsotopics() def getMcc2Id(self): """Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library. .. impl:: Isotopes and isomers can be queried by MC2-2 ID. :id: I_ARMI_ND_ISOTOPES2 :implements: R_ARMI_ND_ISOTOPES This method returns the ``mcc2id`` attribute of a :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` instance. This attribute is initially populated by reading from the mcc-nuclides.yaml file in the ARMI resources folder. """ return self.mcc2id def getMcc3Id(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.""" return self.getMcc3IdEndfbVII1() def getMcc3IdEndfbVII0(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library. .. impl:: Isotopes and isomers can be queried by MC2-3 ENDF/B-VII.0 ID. :id: I_ARMI_ND_ISOTOPES3 :implements: R_ARMI_ND_ISOTOPES This method returns the ``mcc3idEndfbVII0`` attribute of a :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` instance. This attribute is initially populated by reading from the mcc-nuclides.yaml file in the ARMI resources folder. """ return self.mcc3idEndfbVII0 def getMcc3IdEndfbVII1(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library. .. impl:: Isotopes and isomers can be queried by MC2-3 ENDF/B-VII.1 ID. :id: I_ARMI_ND_ISOTOPES7 :implements: R_ARMI_ND_ISOTOPES This method returns the ``mcc3idEndfbVII1`` attribute of a :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` instance. This attribute is initially populated by reading from the mcc-nuclides.yaml file in the ARMI resources folder. """ return self.mcc3idEndfbVII1 def getMcnpId(self): """ Gets the MCNP label for this nuclide. .. impl:: Isotopes and isomers can be queried by MCNP ID. :id: I_ARMI_ND_ISOTOPES4 :implements: R_ARMI_ND_ISOTOPES This method generates the MCNP ID for an isotope using the standard MCNP format based on the atomic number A, number of protons Z, and excited state. The implementation includes the special rule for Am-242m, which is 95242. 95642 is used for the less common ground state Am-242. Returns ------- id : str The MCNP ID e.g. ``92235``, ``94239``, ``6000`` """ z, a = self.z, self.a if z == 95 and a == 242: # Am242 has special rules if self.state != 1: # MCNP uses base state for the common metastable state AM242M, so AM242M is just 95242 # AM242 base state is called 95642 (+400) in mcnp. # see https://mcnp.lanl.gov/pdf_files/la-ur-08-1999.pdf # New ACE-Formatted Neutron and Proton Libraries Based on ENDF/B-VII.0 a += 300 + 100 * max(self.state, 1) elif self.state > 0: # in general mcnp adds 300 + 100*m to the Z number for metastables. see above source a += 300 + 100 * self.state return "{z:d}{a:03d}".format(z=z, a=a) def getAAAZZZSId(self): """ Return a string that is ordered by the mass number, A, the atomic number, Z, and the isomeric state, S. .. impl:: Isotopes and isomers can be queried by AAAZZZS ID. :id: I_ARMI_ND_ISOTOPES5 :implements: R_ARMI_ND_ISOTOPES This method generates the AAAZZZS format ID for an isotope. Where AAA is the mass number, ZZZ is the atomic number, and S is the isomeric state. This is a general format independent of any code that precisely defines an isotope or isomer. Notes ----- An example would be for U235, where A=235, Z=92, and S=0, returning ``2350920``. """ return f"{self.a}{self.z:>03d}{self.state}" def getSerpentId(self): """ Returns the SERPENT style ID for this nuclide. Returns ------- id: str The ID of this nuclide based on it's elemental name, weight, and state, eg ``U-235``, ``Te-129m``. """ symbol = self.element.symbol.capitalize() return f"{symbol}-{self.a}{'m' if self.state else ''}" def getEndfMatNum(self): """ Gets the ENDF MAT number. MAT numbers are defined as described in section 0.4.1 of the NJOY manual. Basically, it's Z * 100 + I where I is an isotope number. I=25 is defined as the lightest known stable isotope of element Z, so for Uranium, Z=92 and I=25 refers to U234. The values of I go up by 3 for each mass number, so U235 is 9228. This leaves room for three isomeric states of each nuclide. Returns ------- id : str The MAT number e.g. ``9237`` for U238 """ z, a = self.z, self.a if self.element.symbol in BASE_ENDFB7_MAT_NUM: # no stable isotopes (or other special case). Use lookup table smallestStableA = BASE_ENDFB7_MAT_NUM[self.element.symbol] else: naturalIsotopes = self.getNaturalIsotopics() if naturalIsotopes: smallestStableA = min(ni.a for ni in naturalIsotopes) # no guarantee they were sorted else: raise KeyError(f"Nuclide {self} is unknown in the MAT number lookup") isotopeNum = (a - smallestStableA) * 3 + self.state + 25 mat = z * 100 + isotopeNum return str(mat) class NaturalNuclideBase(INuclide, IMcnpNuclide): """ Represents an individual nuclide/isotope that is naturally occurring. Notes ----- This is meant to represent the combination of all naturally occurring nuclides within an element. The abundance is forced to zero here so that it does not have any interactions with the NuclideBase objects. """ def __init__(self, name, element): INuclide.__init__( self, element=element, a=0, state=0, weight=sum([nn.weight * nn.abundance for nn in element.getNaturalIsotopics()]), abundance=0.0, halflife=np.inf, name=name, label=name, ) def __repr__(self): return f"<{self.__class__.__name__} {self.name}: Z:{self.z}, W:{self.weight:<12.6e}, Label:{self.label}>" def getNaturalIsotopics(self): """Gets the natural isotopics root :py:class:`~elements.Element`. Gets the naturally occurring nuclides for this nuclide. Returns ------- nuclides: list List of :py:class:`INuclides <INuclide>`. See Also -------- :meth:`INuclide.getNaturalIsotopics` """ return self.element.getNaturalIsotopics() def getMcnpId(self): """Gets the MCNP ID for this element. Returns ------- id : str The MCNP ID e.g. ``1000``, ``92000``. Not zero-padded on the left. """ return "{0:d}000".format(self.z) def getMcc2Id(self): """Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library.""" return self.mcc2id def getMcc3Id(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.""" return self.getMcc3IdEndfbVII1() def getMcc3IdEndfbVII0(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library.""" return self.mcc3idEndfbVII0 def getMcc3IdEndfbVII1(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.""" return self.mcc3idEndfbVII1 def getSerpentId(self): """Gets the SERPENT ID for this natural nuclide. Returns ------- id: str SERPENT ID: ``C-nat``, `Fe-nat`` """ return f"{self.element.symbol.capitalize()}-nat" def getEndfMatNum(self): """Get the ENDF mat number for this element.""" if self.z != 6: runLog.warning( f"The only elemental in ENDF/B VII.1 is carbon. ENDF mat num was requested for the elemental {self} and" "will not be helpful for working with ENDF/B VII.1. Try to expandElementalsToIsotopics" ) return str(self.z * 100) class DummyNuclideBase(INuclide): """ Represents a dummy/placeholder nuclide within the system. Notes ----- This may be used to store mass from a depletion calculation, specifically in the instances where the burn chain is truncated. """ def __init__(self, element, name, weight): INuclide.__init__( self, element=element, a=0, state=0, weight=weight, abundance=0.0, halflife=np.inf, name=name, label="DMP" + name[4], ) def __repr__(self): return f"<{self.__class__.__name__} {self.name}: W:{self.weight:<12.6e}, Label:{self.label}>" def __hash__(self): return hash((self.a, self.z, self.state, self.weight)) def __lt__(self, other): return (self.z, self.a, self.state, self.weight) < ( other.z, other.a, other.state, other.weight, ) def getNaturalIsotopics(self): """Gets the natural isotopics, an empty iterator. Gets the naturally occurring nuclides for this nuclide. Returns ------- empty: iterator An empty generator See Also -------- :meth:`INuclide.getNaturalIsotopics` """ return yield def isHeavyMetal(self): return False def getMcc2Id(self): """Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library.""" return self.mcc2id def getMcc3Id(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.""" return self.getMcc3IdEndfbVII1() def getMcc3IdEndfbVII0(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library.""" return self.mcc3idEndfbVII0 def getMcc3IdEndfbVII1(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.""" return self.mcc3idEndfbVII1 class LumpNuclideBase(INuclide): """ Represents a combination of many nuclides from `NuclideBases` into a single lumped nuclide. See Also -------- armi.physics.neutronics.fissionProduct model: Describes what nuclides LumpNuclideBase is expend to. """ def __init__(self, element, name, weight): INuclide.__init__( self, element=element, a=0, state=0, weight=weight, abundance=0.0, halflife=np.inf, name=name, label=name[1:], ) def __repr__(self): return f"<{self.__class__.__name__} {self.name}: W:{self.weight:<12.6e}, Label:{self.label}>" def __hash__(self): return hash((self.a, self.z, self.state, self.weight)) def __lt__(self, other): return (self.z, self.a, self.state, self.weight) < ( other.z, other.a, other.state, other.weight, ) def getNaturalIsotopics(self): """Gets the natural isotopics, an empty iterator. Gets the naturally occurring nuclides for this nuclide. Returns ------- empty: iterator An empty generator See Also -------- :meth:`INuclide.getNaturalIsotopics` """ return yield def isHeavyMetal(self): return False def getMcc2Id(self): """Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library.""" return self.mcc2id def getMcc3Id(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.""" return self.getMcc3IdEndfbVII1() def getMcc3IdEndfbVII0(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library.""" return self.mcc3idEndfbVII0 def getMcc3IdEndfbVII1(self): """Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.""" return self.mcc3idEndfbVII1 def initReachableActiveNuclidesThroughBurnChain(nuclides, numberDensities, activeNuclides): """Pass through to NuclideBases.initReachableActiveNuclidesThroughBurnChain() for the global NuclideBases object.""" global nuclideBases return nuclideBases.initReachableActiveNuclidesThroughBurnChain(nuclides, numberDensities, activeNuclides) def getIsotopics(nucName): """Pass through to NuclideBases.getIsotopics() for the global NuclideBases object.""" global nuclideBases return nuclideBases.getIsotopics(nucName) def fromName(name): """Pass through to NuclideBases.fromName() for the global NuclideBases object.""" global nuclideBases return nuclideBases.fromName(name) def isMonoIsotopicElement(name): """Pass through to NuclideBases.isMonoIsotopicElement() for the global NuclideBases object.""" global nuclideBases return nuclideBases.isMonoIsotopicElement(name) def where(predicate): """Pass through to NuclideBases.where() for the global NuclideBases object.""" global nuclideBases return nuclideBases.where(predicate) def single(predicate): """Pass through to NuclideBases.single() for the global NuclideBases object.""" global nuclideBases return nuclideBases.single(predicate) def changeLabel(nuclideBase, newLabel): """Pass through to NuclideBases.changeLabel() for the global NuclideBases object.""" global nuclideBases nuclideBases.changeLabel(nuclideBase, newLabel) def getDepletableNuclides(activeNuclides, obj): """Get nuclides in this object that are in the burn chain.""" return sorted(set(activeNuclides) & set(obj.getNuclides())) def imposeBurnChain(burnChainStream): """Pass through to NuclideBases.imposeBurnChain() for the global NuclideBases object.""" global nuclideBases nuclideBases.imposeBurnChain(burnChainStream) def factory(): """Pass through to NuclideBases.factory() for the global NuclideBases object.""" global nuclideBases global burnChainImposed global instances global byName global byDBName global byLabel global byMcc2Id global byMcc3Id global byMcc3IdEndfbVII0 global byMcc3IdEndfbVII1 global byMcnpId global byAAAZZZSId nuclideBases = NuclideBases() instances = nuclideBases.instances burnChainImposed = nuclideBases.burnChainImposed byName = nuclideBases.byName byDBName = nuclideBases.byDBName byLabel = nuclideBases.byLabel byMcc2Id = nuclideBases.byMcc2Id byMcc3Id = nuclideBases.byMcc3Id # for backwards compatibility. Identical to byMcc3IdEndfbVII1 byMcc3IdEndfbVII0 = nuclideBases.byMcc3IdEndfbVII0 byMcc3IdEndfbVII1 = nuclideBases.byMcc3IdEndfbVII1 byMcnpId = nuclideBases.byMcnpId byAAAZZZSId = nuclideBases.byAAAZZZSId def addNuclideBases(): """Pass through to NuclideBases.addNuclideBases() for the global NuclideBases object.""" global nuclideBases nuclideBases.addNuclideBases() def readMCCNuclideData(): """Pass through to NuclideBases.readMCCNuclideData() for the global NuclideBases object.""" global nuclideBases nuclideBases.readMCCNuclideData() def updateNuclideBasesForSpecialCases(): """Pass through to NuclideBases.updateNuclideBasesForSpecialCases() for the global NuclideBases object.""" global nuclideBases nuclideBases.updateNuclideBasesForSpecialCases() def addGlobalNuclide(nuclide: NuclideBase): """Pass through to NuclideBases.addNuclide() for the global NuclideBases object.""" global nuclideBases nuclideBases.addNuclide(nuclide) def destroyGlobalNuclides(): """Pass through to NuclideBases.clear() for the global NuclideBases object.""" global nuclideBases nuclideBases.clear() class NuclideBases: """ A container for all the nuclide information in the simulation. By design, you would only expect to have one instance of this object in memory during a simulation. Attributes ---------- burnChainImposed: bool Have we applied transmutation and decay data to each nuclide? instances: list[INuclide] A simple list of the nuclides in this class. byName: dict[str, INuclide] A dictionary of the nuclides in this class, keyed by name, e.g., "U235". byDBName: dict[str, INuclide] A dictionary of the nuclides in this class, keyed by database name, e.g., "nU235". byLabel: dict[str, INuclide] A dictionary of the nuclides in this class, keyed by label, e.g., "U235". byMcc2Id: dict[str, INuclide] A dictionary of the nuclides in this class, keyed by MC2-2 ID, e.g., "U-2355". byMcc3Id: dict[str, INuclide] A dictionary of the nuclides in this class, keyed by MC2-3 ID, e.g., "U235_7". (This exists for backwards compat. Identical to byMcc3IdEndfbVII1.) byMcc3IdEndfbVII0: dict[str, INuclide] A dictionary of the nuclides in this class, keyed by MC2-3 ID, e.g., "U235_7". byMcc3IdEndfbVII1: dict[str, INuclide] A dictionary of the nuclides in this class, keyed by MC2-3 ID, e.g., "U235_7". byMcnpId: dict[str, INuclide] A dictionary of the nuclides in this class, keyed by MCNP ID, e.g., 92235. byAAAZZZSId: dict[int, INuclide] A dictionary of the nuclides in this class, keyed by AAAZZZS, e.g., 2350920. elements: Elements A container for all the atomics elements information in the simulation. nuclidesFile: str File path to the custom ARMI "nuclides.dat" file, containing a plain text description of all the nuclides to be modeled including: Z, number of neutrons, mass number, amu, natural abundance, half life and nu-bar and more. mccNuclidesFile: str File path to the "mcc-nuclides.yaml" file, containing nuclides defined by the MC2-2 and MC2-3 codes, with various ENDF/B-V mappings. """ DEFAULT_NUCLIDES_FILE = os.path.join(context.RES, "nuclides.dat") DEFAULT_MCC_NUCLIDES_FILE = os.path.join(context.RES, "mcc-nuclides.yaml") def __init__(self, nuclidesFile=None, mccNuclidesFile=None): self.burnChainImposed: bool = False self.elements = None self.instances: list[INuclide] = [] self.byName: dict[str, INuclide] = {} self.byDBName: dict[str, INuclide] = {} self.byLabel: dict[str, INuclide] = {} self.byMcc2Id: dict[str, INuclide] = {} self.byMcc3Id: dict[str, INuclide] = {} self.byMcc3IdEndfbVII0: dict[str, INuclide] = {} self.byMcc3IdEndfbVII1: dict[str, INuclide] = {} self.byMcnpId: dict[str, INuclide] = {} self.byAAAZZZSId: dict[int, INuclide] = {} self.nuclidesFile: str = nuclidesFile if nuclidesFile else self.DEFAULT_NUCLIDES_FILE self.mccNuclidesFile: str = mccNuclidesFile if mccNuclidesFile else self.DEFAULT_MCC_NUCLIDES_FILE self.factory() def clear(self): """Empty all the data containers in this object.""" # grab all the globals global burnChainImposed global instances global byName global byDBName global byLabel global byMcc2Id global byMcc3Id global byMcc3IdEndfbVII0 global byMcc3IdEndfbVII1 global byMcnpId global byAAAZZZSId # reset the class attributes self.burnChainImposed = False self.elements = None self.instances = [] self.byName = {} self.byDBName = {} self.byLabel = {} self.byMcc2Id = {} self.byMcc3Id = {} self.byMcc3IdEndfbVII0 = {} self.byMcc3IdEndfbVII1 = {} self.byMcnpId = {} self.byAAAZZZSId = {} # reset the globals instances = self.instances burnChainImposed = self.burnChainImposed byName = self.byName byDBName = self.byDBName byLabel = self.byLabel byMcc2Id = self.byMcc2Id byMcc3Id = self.byMcc3Id byMcc3IdEndfbVII0 = self.byMcc3IdEndfbVII0 byMcc3IdEndfbVII1 = self.byMcc3IdEndfbVII1 byMcnpId = self.byMcnpId byAAAZZZSId = self.byAAAZZZSId def addNuclide(self, nuclide: INuclide): """Add an element to the dictionaries in this class.""" if nuclide.name in self.byName or nuclide.getDatabaseName() in self.byDBName or nuclide.label in self.byLabel: raise ValueError(f"{nuclide} has already been added.") self.instances.append(nuclide) self.byName[nuclide.name] = nuclide self.byDBName[nuclide.getDatabaseName()] = nuclide self.byLabel[nuclide.label] = nuclide # Add look-up based on the MCNP nuclide ID if isinstance(nuclide, IMcnpNuclide): if nuclide.getMcnpId() in self.byMcnpId: raise ValueError(f"{nuclide} with McnpId {nuclide.getMcnpId()} has already been added.") self.byMcnpId[nuclide.getMcnpId()] = nuclide if not isinstance(nuclide, (NaturalNuclideBase, LumpNuclideBase, DummyNuclideBase)): self.byAAAZZZSId[nuclide.getAAAZZZSId()] = nuclide def factory(self, nuclidesFile: str = None, mccNuclidesFile: str = None, elementsFile: str = None): """ Reads data files to instantiate the :py:class:`INuclides <INuclide>`. Reads NIST, MC**2 and burn chain data files to instantiate the :py:class:`INuclides <INuclide>`. Also clears and fills in the class attibues: instances, byName, byLabel, byMcc3IdEndfbVII0, and byMcc3IdEndfbVII1. This method is automatically run upon initializing the class, hence it is not usually necessary to re-run it unless there is a change to the data files, which should not happen during run time, or a *bad* :py:class`INuclide` is created. Parameters ---------- nuclidesFile: str File path to the custom ARMI "nuclides.dat" file, containing a plain text description of all nuclides to be modeled including: Z, number of neutrons, mass number, AMU, natural abundance, half life, nu-bar and more. mccNuclidesFile: str File path to the "mcc-nuclides.yaml" file, containing nuclides defined by the MC2-2 and MC2-3 codes, with various ENDF/B-V mappings. elementsFile: str File path to the custom ARMI "elements.dat" file. Notes ----- This cannot be run more than once. NuclideBase instances are used throughout the ARMI ecosystem and are even class attributes in some cases. Re-instantiating them would orphan any existing ones and break everything. """ if len(self.instances) != 0: raise RuntimeError( "Nuclides are already initialized and cannot be re-initialized unless `nuclideBases.clear()` is called " "first." ) # If an input file is provided, use it, otherwise there is a class default. if nuclidesFile: self.nuclidesFile = nuclidesFile if mccNuclidesFile: self.mccNuclidesFile = mccNuclidesFile # load the fundamental elements library elements.factory(elementsFile) self.elements = elements.elements # load the isotopes and isomers library self.addNuclideBases(self.nuclidesFile) self.__addNaturalNuclideBases() self.__addDummyNuclideBases() self.__addLumpedFissionProductNuclideBases() self.updateNuclideBasesForSpecialCases() self.readMCCNuclideData(self.mccNuclidesFile) self.__renormalizeNuclideToElementRelationship() self.__deriveElementalWeightsByNaturalNuclideAbundances() def initReachableActiveNuclidesThroughBurnChain(self, nuclides, numberDensities, activeNuclides): """ March through the depletion chain and find all nuclides that can be reached by depleting nuclides passed in. This limits depletion to the smallest set of nuclides that matters. Parameters ---------- nuclides : np.array, dtype="S6" Starting array of nuclide names numberDensities : np.array, dtype=np.float64 Starting array of number densities activeNuclides : OrderedSet Active nuclides defined on the reactor blueprints object. See: armi.reactor.blueprints.py """ if not self.burnChainImposed: return nuclides, numberDensities missingActiveNuclides = set() memo = set() nucNames = [nucName.decode() for nucName in nuclides] difference = set(nucNames).difference(memo) while any(difference): newNucs = set() nuclide = difference.pop() memo.add(nuclide) # Skip the nuclide if it is not `active` in the burn-chain if nuclide not in activeNuclides: continue nuclideObj = self.byName[nuclide] for interaction in nuclideObj.trans + nuclideObj.decays: try: # Interaction nuclides can only be added to the number density dictionary if they are a part of the # user-defined active nuclides productNuclide = interaction.getPreferredProduct(activeNuclides) if productNuclide not in nucNames: newNucs.add(productNuclide.encode()) except KeyError: # Keep track of the first production nuclide missingActiveNuclides.add(interaction.productNuclides) # add the new nuclides to the number density arrays newNDens = np.zeros(len(newNucs), dtype=np.float64) nuclides = np.append(nuclides, list(newNucs)) numberDensities = np.append(numberDensities, newNDens) nucNames = [nucName.decode() for nucName in nuclides] difference = set(nucNames).difference(memo) if self.burnChainImposed and missingActiveNuclides: self._failOnMissingActiveNuclides(missingActiveNuclides) return nuclides, numberDensities def _failOnMissingActiveNuclides(self, missingActiveNuclides): """Raise ValueError with notification of which nuclides to include in the burn-chain.""" msg = "Missing active nuclides in loading file. Add the following nuclides:" for i, nucList in enumerate(missingActiveNuclides, 1): msg += f"\n {i} - " # Index of for j, nuc in enumerate(nucList, 1): delimiter = " or " if j < len(nucList) else "" msg += f"{nuc}{delimiter}" raise ValueError(msg) def getIsotopics(self, nucName): """Expand elemental nuc name to isotopic nuc bases.""" nb = self.byName[nucName] if isinstance(nb, (LumpNuclideBase, DummyNuclideBase)): # skip lumped fission products or dumps return [] elif isinstance(nb, NaturalNuclideBase): isotopics = nb.getNaturalIsotopics() else: isotopics = [nb] return isotopics def fromName(self, name): """Return a nuclide from its name.""" matches = [nn for nn in self.instances if nn.name == name] if len(matches) != 1: raise Exception(f"Too many or too few ({len(matches)}) matches for {name}") return matches[0] def isMonoIsotopicElement(self, name): """Return true if this is the only naturally occurring isotope of its element.""" base = self.byName[name] return base.abundance > 0 and len([e for e in base.element.nuclides if e.abundance > 0]) == 1 def where(self, predicate): """ Return all :py:class:`INuclides <INuclide>` objects matching a condition. Returns an iterator of :py:class:`INuclides <INuclide>` matching the specified condition. Parameters ---------- predicate: lambda A lambda, or function, accepting a :py:class:`INuclide` as a parameter Examples -------- >>> from armi.nucDirectory.nuclideBases import NuclideBases >>> nuclideBases = NuclideBases() >>> [nn.name for nn in nuclideBases.where(lambda nb: "Z" in nb.name)] ['ZN64', 'ZN66', 'ZN67', 'ZN68', 'ZN70', 'ZR90', 'ZR91', 'ZR92', 'ZR94', 'ZR96', 'ZR93', 'ZR95', 'ZR'] >>> # in order to get length, convert to list >>> isomers90 = list(nuclideBases.where(lambda nb: nb.a == 95)) >>> len(isomers90) 3 >>> for iso in isomers: ... print(iso) <NuclideBase MO95: Z:42, A:95, S:0, label:MO2N> <NuclideBase NB95: Z:41, A:95, S:0, label:NB2N> <NuclideBase ZR95: Z:40, A:95, S:0, label:ZR2N> """ return filter(predicate, self.instances) def single(self, predicate): """ Return a single :py:class:`INuclide` object meeting the specified condition. Similar to :py:func:`where`, this function uses a lambda input to filter the :py:attr:`INuclide instances <instances>`. If there is not 1 and only 1 match for the specified condition, an exception is raised. Examples -------- >>> from armi.nucDirectory import nuclideBases >>> nuclideBases.single(lambda nb: nb.name == "C") <NaturalNuclideBase C: Z:6, w:12.0107358968, label:C> >>> nuclideBases.single(lambda nb: nb.z == 95 and nb.a == 242 and nb.state == 1) <NuclideBase AM242M: Z:95, A:242, S:1, label:AM4C> """ matches = [nuc for nuc in self.instances if predicate(nuc)] if len(matches) != 1: raise IndexError( "Expected single match, but got {} matches:\n {}".format( len(matches), "\n ".join(str(mo) for mo in matches) ) ) return matches[0] def changeLabel(self, nuclideBase, newLabel): """ Updates a nuclide label and modifies the ``byLabel`` look-up dictionary. Notes ----- Since nuclide objects are defined and stored globally, any change to the attributes will be maintained. """ nuclideBase.label = newLabel self.byLabel[newLabel] = nuclideBase def imposeBurnChain(self, burnChainStream): """ Apply transmutation and decay information to each nuclide. Notes ----- You cannot impose a burn chain twice. Doing so would require that you clean out the transmutations and decays from all the module-level nuclide bases, which generally requires that you rebuild them. But rebuilding those is not an option because some of them get set as class-level attributes and would be orphaned. If a need to change burn chains mid-run re-arises, then a better nuclideBase-level burnchain cleanup should be implemented so the objects don't have to change identity. See Also -------- armi.nucDirectory.transmutations : describes file format """ if self.burnChainImposed: # The only time this should happen is if in a unit test that has already processed conftest.py and is now # building a Case that also imposes this. runLog.warning("Burn chain already imposed. Skipping reimposition.") return self.burnChainImposed = True global burnChainImposed burnChainImposed = True yaml = YAML(typ="rt") yaml.allow_duplicate_keys = False burnData = yaml.load(burnChainStream) for nucName, burnInfo in burnData.items(): nuclide = self.byName[nucName] # think of this protected stuff as "module level protection" rather than class. nuclide._processBurnData(burnInfo) def addNuclideBases(self, nuclidesFile: str): """ Read natural abundances of any natural nuclides. This adjusts already-existing NuclideBases and Elements with the new information. .. impl:: Separating natural abundance data from code. :id: I_ARMI_ND_DATA0 :implements: R_ARMI_ND_DATA This function reads the ``nuclides.dat`` file from the ARMI resources folder. This file contains metadata for 4,614 nuclides, including number of protons, number of neutrons, atomic number, excited state, element symbol, atomic mass, natural abundance, half-life, and spontaneous fission yield. The data in ``nuclides.dat`` have been collected from multiple different sources; the references are given in comments at the top of that file. Parameters ---------- nuclidesFile: str File path to the custom ARMI "nuclides.dat" file, containing a plain text description of all nuclides to be modeled including: Z, number of neutrons, mass number, AMU, natural abundance, half life, nu-bar and more. """ with open(nuclidesFile, "r") as f: for line in f: # Skip header lines if line.startswith("#") or line.startswith("Z"): continue lineData = line.split() _z = int(lineData[0]) _n = int(lineData[1]) a = int(lineData[2]) state = int(lineData[3]) sym = lineData[4].upper() mass = float(lineData[5]) abun = float(lineData[6]) halflife = lineData[7] if halflife == "inf": halflife = np.inf else: halflife = float(halflife) nuSF = float(lineData[8]) element = self.elements.bySymbol[sym] nb = NuclideBase(element, a, mass, abun, state, halflife) nb.nuSF = nuSF self.addNuclide(nb) def __addNaturalNuclideBases(self): """Generates a complete set of nuclide bases for each naturally occurring element.""" for element in self.elements.byZ.values(): if element.symbol not in self.byName: if element.isNaturallyOccurring(): self.addNuclide(NaturalNuclideBase(element.symbol, element)) def __addDummyNuclideBases(self): """ Generates a set of dummy nuclides. Notes ----- These nuclides can be used to truncate a depletion / burn-up chain within the MC2 program. """ self.addNuclide(DummyNuclideBase(element=self.elements.byName["Dummy"], name="DUMP1", weight=10.0)) self.addNuclide(DummyNuclideBase(element=self.elements.byName["Dummy"], name="DUMP2", weight=240.0)) def __addLumpedFissionProductNuclideBases(self): """Generates a set of nuclides for use as lumped fission products.""" self.addNuclide( LumpNuclideBase(element=self.elements.byName["LumpedFissionProduct"], name="LFP35", weight=233.273) ) self.addNuclide( LumpNuclideBase(element=self.elements.byName["LumpedFissionProduct"], name="LFP38", weight=235.78) ) self.addNuclide( LumpNuclideBase(element=self.elements.byName["LumpedFissionProduct"], name="LFP39", weight=236.898) ) self.addNuclide( LumpNuclideBase(element=self.elements.byName["LumpedFissionProduct"], name="LFP40", weight=237.7) ) self.addNuclide( LumpNuclideBase(element=self.elements.byName["LumpedFissionProduct"], name="LFP41", weight=238.812) ) self.addNuclide(LumpNuclideBase(element=self.elements.byName["LumpedFissionProduct"], name="LREGN", weight=1.0)) def readMCCNuclideData(self, mccNuclidesFile): r"""Read in the label data for the MC2-2 and MC2-3 cross section codes to the nuclide bases. .. impl:: Separating MCC data from code. :id: I_ARMI_ND_DATA1 :implements: R_ARMI_ND_DATA This function reads the mcc-nuclides.yaml file from the ARMI resources folder. This file contains the MC\ :sup:`2`-2 ID (from ENDF/B-V.2) and MC\ :sup:`2`-3 ID (from ENDF/B-VII.0) for all nuclides in MC\ :sup:`2`. The ``mcc2id``, ``mcc3idEndfVII0``, and ``mcc3idEndfVII1`` attributes of each :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` instance are updated as the data is read, and the global dictionaries ``byMcc2Id`` ``byMcc3IdEndfVII0`` and ``byMcc3IdEndfVII1`` are populated with the nuclide bases keyed by their corresponding ID for each code. """ with open(mccNuclidesFile, "r") as f: yaml = YAML(typ="rt") nuclides = yaml.load(f) for n in nuclides: nb = self.byName[n] mcc2id = nuclides[n]["ENDF/B-V.2"] mcc3idEndfbVII0 = nuclides[n]["ENDF/B-VII.0"] mcc3idEndfbVII1 = nuclides[n]["ENDF/B-VII.1"] if mcc2id is not None: nb.mcc2id = mcc2id self.byMcc2Id[nb.getMcc2Id()] = nb if mcc3idEndfbVII0 is not None: nb.mcc3idEndfbVII0 = mcc3idEndfbVII0 self.byMcc3IdEndfbVII0[nb.getMcc3IdEndfbVII0()] = nb if mcc3idEndfbVII1 is not None: nb.mcc3idEndfbVII1 = mcc3idEndfbVII1 self.byMcc3IdEndfbVII1[nb.getMcc3IdEndfbVII1()] = nb # Have the byMcc3Id dictionary be VII.1 IDs. self.byMcc3Id = self.byMcc3IdEndfbVII1 def updateNuclideBasesForSpecialCases(self): """ Update the nuclide bases for special case name changes. .. impl:: The special case name Am242g is supported. :id: I_ARMI_ND_ISOTOPES6 :implements: R_ARMI_ND_ISOTOPES This function updates the keys for the :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` instances for Am-242m and Am-242 in the ``byName`` and ``byDBName`` global dictionaries. This function associates the more common isomer Am-242m with the name "AM242", and uses "AM242G" to denote the ground state. Notes ----- This function is specifically added to change the definition of `AM242` to refer to its metastable isomer, `AM242M` by default. `AM242M` is most common isomer of `AM242` and is typically the desired isomer when being requested rather than than the ground state (i.e., S=0) of `AM242`. """ # Change the name of `AM242` to specific represent its ground state. am242g = self.byName["AM242"] am242g.name = "AM242G" self.byName["AM242G"] = am242g self.byDBName[self.byName["AM242G"].getDatabaseName()] = am242g # Update the pointer of `AM242` to refer to `AM242M`. am242m = self.byName["AM242M"] self.byName["AM242"] = am242m self.byDBName["nAm242"] = am242m self.byDBName[self.byName["AM242"].getDatabaseName()] = am242m def __renormalizeNuclideToElementRelationship(self): """Fill in the missing element data for each nuclide.""" for nuc in self.instances: if nuc.element is None: nuc.element = self.elements.byZ[nuc.z] nuc.element.append(nuc) def __deriveElementalWeightsByNaturalNuclideAbundances(self): """Derives and sets the standard atomic weights for each element that has naturally occurring nuclides.""" for element in self.elements.byName.values(): numer = 0.0 denom = 0.0 for nb in element.getNaturalIsotopics(): numer += nb.weight * nb.abundance denom += nb.abundance if denom: element.standardWeight = numer / denom factory() ================================================ FILE: armi/nucDirectory/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from os import path NUCDIRECTORY_TESTS_DEFAULT_DIR_PATH = path.dirname(__file__) ================================================ FILE: armi/nucDirectory/tests/test_elements.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Elements.""" import unittest from armi.nucDirectory.elements import Element, Elements class TestElements(unittest.TestCase): def setUp(self): self.elements = Elements() def test_elements_elementBulkProperties(self): numElements = len(self.elements.byZ) self.assertEqual(numElements, len(self.elements.byZ.values())) self.assertEqual(numElements, len(self.elements.byName)) self.assertEqual(numElements, len(self.elements.bySymbol)) def test_element_elementByNameReturnsElement(self): """Get elements by name. .. test:: Get elements by name. :id: T_ARMI_ND_ELEMENTS0 :tests: R_ARMI_ND_ELEMENTS """ for ee in self.elements.byZ.values(): self.assertIs(ee, self.elements.byName[ee.name]) def test_element_elementByZReturnsElement(self): """Get elements by Z. .. test:: Get elements by Z. :id: T_ARMI_ND_ELEMENTS1 :tests: R_ARMI_ND_ELEMENTS """ for ee in self.elements.byZ.values(): self.assertIs(ee, self.elements.byZ[ee.z]) def test_element_elementBySymbolReturnsElement(self): """Get elements by symbol. .. test:: Get elements by symbol. :id: T_ARMI_ND_ELEMENTS2 :tests: R_ARMI_ND_ELEMENTS """ for ee in self.elements.byZ.values(): self.assertIs(ee, self.elements.bySymbol[ee.symbol]) def test_element_addExistingElementFails(self): for ee in self.elements.byZ.values(): with self.assertRaises(ValueError): self.elements.Element(ee.z, ee.symbol, ee.name) def test_addedElementAppearsInElementList(self): self.assertNotIn("bacon", self.elements.byName) self.assertNotIn(999, self.elements.byZ) self.assertNotIn("BZ", self.elements.bySymbol) self.elements.addElement(Element(999, "BZ", "bacon")) self.assertIn("bacon", self.elements.byName) self.assertIn(999, self.elements.byZ) self.assertIn("BZ", self.elements.bySymbol) def test_elementGetNatIsosOnlyRetrievesAbund(self): for ee in self.elements.byZ.values(): if not ee.isNaturallyOccurring(): continue for nuc in ee.getNaturalIsotopics(): self.assertGreater(nuc.abundance, 0.0) self.assertGreater(nuc.a, 0) def test_elementIsNatOccurring(self): """ Test isNaturallyOccurring method by manually testing all elements. Uses RIPL definitions of naturally occurring. Protactinium is debated as naturally occurring. Yeah it exists as a U235 decay product but it's kind of pseudo-natural. .. test:: Get elements by Z to show if they are naturally occurring. :id: T_ARMI_ND_ELEMENTS3 :tests: R_ARMI_ND_ELEMENTS """ for ee in self.elements.byZ.values(): if ee.z == 43 or ee.z == 61 or 84 <= ee.z <= 89 or ee.z >= 93: self.assertFalse(ee.isNaturallyOccurring()) else: nat = ee.isNaturallyOccurring() self.assertTrue(nat) def test_abundancesAddToOne(self): for ee in self.elements.byZ.values(): if not ee.isNaturallyOccurring(): continue totAbund = sum([iso.abundance for iso in ee.nuclides]) self.assertAlmostEqual( totAbund, 1.0, places=4, ) def test_isHeavyMetal(self): """Get elements by Z. .. test:: Get elements by Z to show if they are heavy metals. :id: T_ARMI_ND_ELEMENTS4 :tests: R_ARMI_ND_ELEMENTS """ for ee in self.elements.byZ.values(): if ee.z > 89: self.assertTrue(ee.isHeavyMetal()) else: self.assertFalse(ee.isHeavyMetal()) ================================================ FILE: armi/nucDirectory/tests/test_nucDirectory.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests nuclide directory.""" import unittest from armi.nucDirectory import nucDir from armi.nucDirectory.nuclideBases import NuclideBases class TestNucDirectory(unittest.TestCase): def test_nucDir_getNameForOldDashedNames(self): oldNames = [ "U-232", "U-233", "U-234", "U-235", "U-236", "U-238", "B-10", "B-11", "BE-9", "F-19", "LI-6", "LI-7", "W-182", "W-183", "W-184", "W-186", "S-32", "O-16", ] for oldName in oldNames: self.assertIsNotNone(nucDir.getNuclideFromName(oldName)) def test_nucDir_getNucFromNucNameReturnsNuc(self): nb = NuclideBases() for nuc in nb.instances: self.assertEqual(nuc, nucDir.getNuclideFromName(nuc.name)) def test_nucDir_getNuclidesFromForBadName(self): with self.assertRaises(Exception): nucDir.getNuclideFromName("Charlie") def test_getDisplacementEnergy(self): """Test getting the displacement energy for a given nuclide.""" ed = nucDir.getThresholdDisplacementEnergy("H1") self.assertEqual(ed, 10.0) with self.assertRaises(KeyError): nucDir.getThresholdDisplacementEnergy("fail") ================================================ FILE: armi/nucDirectory/tests/test_nuclideBases.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for NuclideBases.""" import math import os import random import unittest from ruamel.yaml import YAML from armi.context import RES from armi.nucDirectory.nuclideBases import ( DummyNuclideBase, LumpNuclideBase, NaturalNuclideBase, NuclideBases, ) from armi.nucDirectory.tests import NUCDIRECTORY_TESTS_DEFAULT_DIR_PATH from armi.utils.units import AVOGADROS_NUMBER, CURIE_PER_BECQUEREL, SECONDS_PER_HOUR class TestNuclideBases(unittest.TestCase): @classmethod def setUpClass(cls): cls.nucDirectoryTestsPath = NUCDIRECTORY_TESTS_DEFAULT_DIR_PATH cls.nuclideBases = NuclideBases() # Ensure that the burn chain data is initialized before running these tests. cls.nuclideBases.burnChainImposed = False with open(os.path.join(RES, "burn-chain.yaml"), "r") as burnChainStream: cls.nuclideBases.imposeBurnChain(burnChainStream) def test_nucBases_fromNameBadNameRaisesException(self): with self.assertRaises(KeyError): self.nuclideBases.byName["Cat"] def test_nucBase_AllAbundancesAddToOne(self): for zz in range(1, 102): nuclides = self.nuclideBases.elements.byZ[zz].nuclides # We only process nuclides with measured masses. Some are purely theoretical, mostly over z=100 self.assertGreater(len(nuclides), 0, msg=f"z={zz} unexpectedly has no nuclides") total = sum([nn.abundance for nn in nuclides if nn.a > 0]) self.assertAlmostEqual( any([nn.abundance > 0 for nn in nuclides]), total, delta=1e-4, msg="Abundance ({}) not 1.0 for nuclideBases:\n {}".format( total, "\n ".join(repr(nn) for nn in nuclides) ), ) def test_nucBases_AllLabelsAreUnique(self): labels = [] for nn in self.nuclideBases.instances: self.assertNotIn(nn.label, labels, f"Label already exists: {nn.label}") labels.append(nn.label) def test_nucBases_NegativeZRaisesException(self): for _ in range(0, 5): with self.assertRaises(Exception): self.nuclideBases.isotopes(random.randint(-1000, -1)) def test_nucBases_Z295RaisesException(self): with self.assertRaises(Exception): self.nuclideBases.isotopes(295) def test_nucBases_Mc2Elementals(self): notElemental = [ "LFP35", "LFP38", "LFP39", "LFP40", "LFP41", "DUMMY", "DUMP1", "DUMP2", "LREGN", ] for lump in self.nuclideBases.where(lambda nn: isinstance(nn, LumpNuclideBase)): if lump.name in notElemental: self.assertIsInstance(lump, LumpNuclideBase) else: self.assertIsInstance(lump, NaturalNuclideBase) def test_LumpNucBaseGetNatIsotopDoesNotFail(self): for nuc in self.nuclideBases.where(lambda nn: isinstance(nn, LumpNuclideBase) and nn.z == 0): self.assertEqual(0, len(list(nuc.getNaturalIsotopics())), nuc) def test_NaturalNuclideBase_getNatrualIsotpics(self): for nuc in self.nuclideBases.where(lambda nn: isinstance(nn, NaturalNuclideBase)): numNaturals = len(list(nuc.getNaturalIsotopics())) self.assertGreaterEqual(len(nuc.element.nuclides) - 1, numNaturals) def test_nucBases_singleFailsWithMultipleMatches(self): with self.assertRaises(Exception): self.nuclideBases.single(lambda nuc: nuc.z == 92) def test_nucBases_singleFailsWithNoMatches(self): with self.assertRaises(Exception): self.nuclideBases.single(lambda nuc: nuc.z == 1000) def test_nucBases_singleIsPrettySpecific(self): u235 = self.nuclideBases.single(lambda nuc: nuc.name == "U235") self.assertEqual(235, u235.a) self.assertEqual(92, u235.z) def test_natNucStomicWgtIsAvgOfNatIsotopes(self): for natNuk in self.nuclideBases.where(lambda nn: isinstance(nn, NaturalNuclideBase)): atomicMass = 0.0 for natIso in natNuk.getNaturalIsotopics(): atomicMass += natIso.abundance * natIso.weight self.assertAlmostEqual(atomicMass, natNuk.weight, delta=0.000001) def test_nucBasesLabelAndNameCollsAreForSameNuc(self): """The name and labels for correct for nuclides. .. test:: Validate the name, label, and DB name are accessible for nuclides. :id: T_ARMI_ND_ISOTOPES0 :tests: R_ARMI_ND_ISOTOPES """ count = 0 for nuc in self.nuclideBases.where(lambda nn: nn.name == nn.label): count += 1 self.assertIs(nuc, self.nuclideBases.byName[nuc.name]) self.assertIs(nuc, self.nuclideBases.byDBName[nuc.getDatabaseName()]) self.assertIs(nuc, self.nuclideBases.byLabel[nuc.label]) self.assertGreater(count, 10) def test_nucBases_imposeBurnChainDecayBulkStats(self): """Test must be updated manually when burn chain is modified.""" decayers = list(self.nuclideBases.where(lambda nn: len(nn.decays) > 0)) self.assertTrue(decayers) for nuc in decayers: if nuc.name in [ "U238", "PU240", "PU242", "CM242", "CM244", "CM246", "CF250", "CF252", ]: continue self.assertAlmostEqual(1.0, sum(dd.branch for dd in nuc.decays)) def test_nucBasesImposeBurnChainTransmBulkStats(self): """ Make sure all branches are equal to 1 for every transmutation type. Exception: We allow 3e-4 threshold to account for ternary fissions, which are usually < 2e-4 per fission. """ trasmuters = self.nuclideBases.where(lambda nn: len(nn.trans) > 0) self.assertTrue(trasmuters) for nuc in trasmuters: expected = len(set(tt.type for tt in nuc.trans)) self.assertTrue(all(0.0 <= tt.branch <= 1.0 for tt in nuc.trans)) actual = sum(tt.branch for tt in nuc.trans) # ternary fission self.assertAlmostEqual( expected, actual, msg=f"{nuc} has {expected} transmutation but the branches add up to {actual}", delta=3e-4, ) def test_nucBases_imposeBurn_nuSF(self): """Test the nuclide data from file (specifically neutrons / sponaneous fission). .. test:: Test that nuclide data was read from file instead of code. :id: T_ARMI_ND_DATA0 :tests: R_ARMI_ND_DATA """ actual = {nn.name: nn.nuSF for nn in self.nuclideBases.where(lambda nn: nn.nuSF > 0.0)} expected = { "CM248": 3.1610, "BK249": 3.4000, "CF249": 3.4000, "CF250": 3.5200, "CF252": 3.7676, "U232": 1.710000, "U234": 1.8000, "U235": 1.8700, "U236": 1.900, "U238": 2.000, "PU236": 2.1200, "PU238": 2.2100, "PU239": 2.3200, "PU240": 2.1510, "PU242": 2.1410, "CM242": 2.5280, "CM243": 0.0000, "CM244": 2.6875, "CM245": 0.0000, "CM246": 2.9480, "TH230": 1.390000, "TH232": 1.5, "NP237": 2.05, "PA231": 1.710000, "PU241": 2.25, "PU244": 2.290000, "U233": 1.76, "AM241": 2.5, "AM242M": 2.56, "AM243": 2.61, "ES253": 4.700000, } for key, val in actual.items(): self.assertEqual(val, expected[key]) def test_nucBases_databaseNamesStartWith_n(self): for nb in self.nuclideBases.instances: self.assertEqual("n", nb.getDatabaseName()[0]) def test_nucBases_AllDatabaseNamesAreUnique(self): self.assertEqual( len(self.nuclideBases.instances), len(set(nb.getDatabaseName() for nb in self.nuclideBases.instances)), ) def test_nucBases_Am242m(self): """Test the correct am242g and am242m abbreviations are supported. .. test:: Specifically test for Am242 and Am242g because it is a special case. :id: T_ARMI_ND_ISOTOPES1 :tests: R_ARMI_ND_ISOTOPES """ am242m = self.nuclideBases.byName["AM242"] self.assertIs(am242m, self.nuclideBases.byName["AM242M"]) self.assertEqual("nAm242m", am242m.getDatabaseName()) self.assertIs(am242m, self.nuclideBases.byDBName["nAm242"]) self.assertAlmostEqual(am242m.weight, 242.059601666) am242g = self.nuclideBases.byName["AM242G"] self.assertIs(am242g, self.nuclideBases.byName["AM242G"]) self.assertEqual("nAm242g", am242g.getDatabaseName()) self.assertIs(am242g, self.nuclideBases.byDBName["nAm242g"]) def test_nucBases_isHeavyMetal(self): for nb in self.nuclideBases.where(lambda nn: nn.z <= 89): self.assertFalse(nb.isHeavyMetal()) for nb in self.nuclideBases.where(lambda nn: nn.z > 89): if isinstance(nb, (DummyNuclideBase, LumpNuclideBase)): self.assertFalse(nb.isHeavyMetal()) else: self.assertTrue(nb.isHeavyMetal()) def test_getDecay(self): nb = list(self.nuclideBases.where(lambda nn: nn.z == 89))[0] # This test is a bit boring, because the test nuclide library is a bit boring. self.assertIsNone(nb.getDecay("sf")) def test_getEndfMatNum(self): """Test get nuclides by name. .. test:: Test get nuclides by name. :id: T_ARMI_ND_ISOTOPES2 :tests: R_ARMI_ND_ISOTOPES """ self.assertEqual(self.nuclideBases.byName["U235"].getEndfMatNum(), "9228") self.assertEqual(self.nuclideBases.byName["U238"].getEndfMatNum(), "9237") self.assertEqual(self.nuclideBases.byName["PU239"].getEndfMatNum(), "9437") self.assertEqual(self.nuclideBases.byName["TC99"].getEndfMatNum(), "4325") self.assertEqual(self.nuclideBases.byName["AM242"].getEndfMatNum(), "9547") # meta 1 self.assertEqual(self.nuclideBases.byName["CF252"].getEndfMatNum(), "9861") self.assertEqual(self.nuclideBases.byName["NP237"].getEndfMatNum(), "9346") self.assertEqual(self.nuclideBases.byName["PM151"].getEndfMatNum(), "6161") self.assertEqual(self.nuclideBases.byName["PA231"].getEndfMatNum(), "9131") def test_NonMc2Nuclide(self): """Make sure nuclides that aren't in MC2 still get nuclide bases.""" nuc = self.nuclideBases.byName["YB154"] self.assertEqual(nuc.a, 154) def test_kryptonDecayConstants(self): """Tests that the nuclides data contains the expected decay constants.""" # hand calculated reference data includes stable isotopes, radioactive # isotopes, metastable isotopes and exercises metastable minimum halflife REF_KR_DECAY_CONSTANTS = [ ("KR69", 24.755256448569472), ("KR70", 17.3286795139986), ("KR71", 6.93147180559945), ("KR72", 0.04053492283976288), ("KR73", 0.0253900066139174), ("KR74", 0.0010045611312463), ("KR75", 0.00251140282811574), ("KR76", 0.0000130095191546536), ("KR77", 0.000162139691359051), ("KR78", 0), ("KR79", 5.49488822742219e-06), ("KR79M", 0.0138629436111989), ("KR80", 0), ("KR81", 9.591693391393433e-14), ("KR81M", 0.0529119985160263), ("KR82", 0), ("KR83", 0), ("KR83M", math.log(2) / (1.83 * SECONDS_PER_HOUR)), ("KR84", 0), ("KR85", 2.0453466678736843e-09), ("KR85M", 4.29725468419061e-05), ("KR86", 0), ("KR87", 0.000151408296321526), ("KR88", 0.0000681560649518136), ("KR89", 0.00366744539978807), ("KR90", 0.021446385537127), ("KR91", 0.0808806511738559), ("KR92", 0.376710424217362), ("KR93", 0.538994697169475), ("KR94", 3.26956217245257), ("KR95", 6.08023842596443), ("KR96", 8.66433975699932), ("KR97", 11.0023361993642), ("KR98", 16.1197018734871), ("KR99", 53.3190138892265), ("KR100", 99.0210257942778), ("KR101", 1091570.36308652), ] for nucName, refDecayConstant in REF_KR_DECAY_CONSTANTS: refNb = self.nuclideBases.byName[nucName] decayConstantNb = math.log(2) / refNb.halflife try: self.assertAlmostEqual((refDecayConstant - decayConstantNb) / refDecayConstant, 0, 6) except ZeroDivisionError: self.assertEqual(refDecayConstant, decayConstantNb) except AssertionError: errorMessage = ( f"{nucName} reference decay constant {refDecayConstant} ARMI decay constant {decayConstantNb}" ) raise AssertionError(errorMessage) for nucName in ["XE134", "XE136", "EU151"]: nb = self.nuclideBases.byName[nucName] decayConstantNb = math.log(2) / nb.halflife self.assertAlmostEqual(decayConstantNb, 0, places=3) def test_curieDefinitionWithRa226(self): """ Tests that the decay constant of Ra-226 is close to 1 Ci. Notes ----- The original definition of 1 Ci was based on the half-life of Ra-226 for 1 gram. The latest evaluations show that 1 gram is defined as 0.988 Ci. """ ra226 = self.nuclideBases.byName["RA226"] decayConstantRa226 = math.log(2) / ra226.halflife weight = ra226.weight mass = 1 # gram activity = mass * AVOGADROS_NUMBER / weight * decayConstantRa226 # 1 gram activity = activity * CURIE_PER_BECQUEREL self.assertAlmostEqual(activity, 0.9885593, places=6) def test_loadMcc2Data(self): """Tests consistency with the `mcc-nuclides.yaml` input and the ENDF/B-V.2 nuclides in the data model. .. test:: Test that MCC v2 ENDF/B-V.2 IDs can be queried by nuclides. :id: T_ARMI_ND_ISOTOPES3 :tests: R_ARMI_ND_ISOTOPES """ with open(os.path.join(RES, "mcc-nuclides.yaml")) as f: yaml = YAML(typ="rt") data = yaml.load(f) expectedNuclides = set([nuc for nuc in data.keys() if data[nuc]["ENDF/B-V.2"] is not None]) for nuc, nb in self.nuclideBases.byMcc2Id.items(): self.assertIn(nb.name, expectedNuclides) self.assertEqual(nb.getMcc2Id(), nb.mcc2id) self.assertEqual(nb.getMcc2Id(), nuc) self.assertEqual(len(self.nuclideBases.byMcc2Id), len(expectedNuclides)) def test_loadMcc3EndfVII0Data(self): """Tests consistency with the `mcc-nuclides.yaml` input and the ENDF/B-VII.0 nuclides in the data model. .. test:: Test that MCC v3 ENDF/B-VII.0 IDs can be queried by nuclides. :id: T_ARMI_ND_ISOTOPES4 :tests: R_ARMI_ND_ISOTOPES .. test:: Test the MCC ENDF/B-VII.0 nuclide data that was read from file instead of code. :id: T_ARMI_ND_DATA1 :tests: R_ARMI_ND_DATA """ with open(os.path.join(RES, "mcc-nuclides.yaml")) as f: yaml = YAML(typ="rt") data = yaml.load(f) expectedNuclides = set([nuc for nuc in data.keys() if data[nuc]["ENDF/B-VII.0"] is not None]) for nuc, nb in self.nuclideBases.byMcc3IdEndfbVII0.items(): self.assertIn(nb.name, expectedNuclides) self.assertEqual(nb.getMcc3IdEndfbVII0(), nb.mcc3idEndfbVII0) self.assertEqual(nb.getMcc3IdEndfbVII0(), nuc) # Subtract 1 nuclide due to DUMP2. self.assertEqual(len(self.nuclideBases.byMcc3IdEndfbVII0), len(expectedNuclides) - 1) def test_loadMcc3EndfVII1Data(self): """Tests consistency with the `mcc-nuclides.yaml` input and the ENDF/B-VII.1 nuclides in the data model. .. test:: Test that MCC v3 ENDF/B-VII.1 IDs can be queried by nuclides. :id: T_ARMI_ND_ISOTOPES6 :tests: R_ARMI_ND_ISOTOPES .. test:: Test the MCC ENDF/B-VII.1 nuclide data that was read from file instead of code. :id: T_ARMI_ND_DATA2 :tests: R_ARMI_ND_DATA """ with open(os.path.join(RES, "mcc-nuclides.yaml")) as f: yaml = YAML(typ="rt") data = yaml.load(f) expectedNuclides = set([nuc for nuc in data.keys() if data[nuc]["ENDF/B-VII.1"] is not None]) for nuc, nb in self.nuclideBases.byMcc3IdEndfbVII1.items(): self.assertIn(nb.name, expectedNuclides) self.assertEqual(nb.getMcc3IdEndfbVII1(), nb.mcc3idEndfbVII1) self.assertEqual(nb.getMcc3IdEndfbVII1(), nuc) self.assertEqual(nb.getMcc3Id(), nb.mcc3idEndfbVII1) self.assertEqual(nb.getMcc3Id(), nuc) # Subtract 1 nuclide due to DUMP2 self.assertEqual(len(self.nuclideBases.byMcc3IdEndfbVII1), len(expectedNuclides) - 1) class TestAAAZZZSId(unittest.TestCase): def test_AAAZZZSNameGenerator(self): """Test that AAAZZS ID name generator. .. test:: Query the AAAZZS IDs can be retrieved for nuclides. :id: T_ARMI_ND_ISOTOPES5 :tests: R_ARMI_ND_ISOTOPES """ referenceNucNames = [("C12", "120060"), ("U235", "2350920"), ("AM242M", "2420951")] nuclideBases = NuclideBases() for nucName, refAaazzzs in referenceNucNames: nb = nuclideBases.byName[nucName] if refAaazzzs: self.assertEqual(refAaazzzs, nb.getAAAZZZSId()) ================================================ FILE: armi/nucDirectory/tests/test_thermalScattering.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the composite pattern.""" import unittest from armi.nucDirectory import thermalScattering as ts class TestThermalScattering(unittest.TestCase): def test_dataValidity(self): """Ensure that over time the raw thermal scattering data in ARMI remains valid.""" for key, val in ts.BY_NAME_AND_COMPOUND.items(): # nuclide name must be a non-empty string self.assertIsInstance(key[0], str) self.assertGreater(len(key[0]), 0) if key[1] is not None: # compound CAN be None, but otherwise must be a non-empty string self.assertIsInstance(key[1], str) self.assertGreater(len(key[1]), 0) # ENDF/B-VIII label must be a non-empty string self.assertIsInstance(val[0], str) self.assertGreater(len(val[0]), 0) # ACE label must be a non-empty string self.assertIsInstance(val[1], str) self.assertGreater(len(val[1]), 0) def test_fromNameCompInvalid(self): """If the name/compound inputs aren't valid, we should get a ValueError.""" with self.assertRaises(ValueError): ts.fromNameAndCompound("hi", "mom") with self.assertRaises(ValueError): ts.fromNameAndCompound("C", None) with self.assertRaises(ValueError): ts.fromNameAndCompound("O", None) with self.assertRaises(ValueError): ts.fromNameAndCompound("FE56", "FE56") def test_fromNameCompSpotCheck(self): """Spot check some examples that should work.""" tsl = ts.fromNameAndCompound("FE56", None) self.assertIsInstance(tsl, ts.ThermalScatteringLabels) self.assertEqual(tsl.endf8Label, "tsl-026_Fe_056.endf") self.assertEqual(tsl.aceLabel, "fe-56") tsl = ts.fromNameAndCompound("H", ts.H2O) self.assertIsInstance(tsl, ts.ThermalScatteringLabels) self.assertEqual(tsl.endf8Label, "tsl-HinH2O.endf") self.assertEqual(tsl.aceLabel, "h-h2o") tsl = ts.fromNameAndCompound("O", ts.D2O) self.assertIsInstance(tsl, ts.ThermalScatteringLabels) self.assertEqual(tsl.endf8Label, f"tsl-Oin{ts.D2O}.endf") self.assertEqual(tsl.aceLabel, "o-d2o") tsl = ts.fromNameAndCompound("U", ts.UO2) self.assertIsInstance(tsl, ts.ThermalScatteringLabels) self.assertEqual(tsl.endf8Label, "tsl-UinUO2.endf") self.assertEqual(tsl.aceLabel, "u-uo2") ================================================ FILE: armi/nucDirectory/tests/test_transmutations.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for transmutations.""" import random import string import unittest from armi.nucDirectory import transmutations from armi.nucDirectory.nuclideBases import NuclideBases def randomString(length): return "".join(random.choice(string.ascii_lowercase) for _ in range(length)) class TransmutationTests(unittest.TestCase): @classmethod def setUpClass(cls): cls.nuclideBases = NuclideBases() def test_Transmutation_validReactionTypes(self): data = {"products": [""]} for rxn in transmutations.TRANSMUTATION_TYPES: data["type"] = rxn temp = transmutations.Transmutation(self.nuclideBases.byName["AM242M"], data) self.assertEqual(temp.type, rxn) self.assertEqual(temp.productParticle, transmutations.PRODUCT_PARTICLES.get(temp.type)) def test_Transmutation_productParticle(self): temp = transmutations.Transmutation(self.nuclideBases.byName["AM242M"], {"products": [""], "type": "nalph"}) self.assertEqual(temp.productParticle, "HE4") def test_Transmutation_invalidReactionTypes(self): data = {"products": [""], "branch": 1.0} errorCount = 0 for _ in range(0, 5): rxn = randomString(3) data["type"] = rxn if rxn in transmutations.TRANSMUTATION_TYPES: self.assertIsNotNone(transmutations.Transmutation(self.nuclideBases.byName["AM242M"], data)) else: with self.assertRaises(KeyError): errorCount += 1 transmutations.Transmutation(self.nuclideBases.byName["AM242M"], data) self.assertGreater(errorCount, 2) class DecayModeTests(unittest.TestCase): @classmethod def setUpClass(cls): cls.nuclideBases = NuclideBases() def test_DecayMode_validReactionTypes(self): data = {"products": [""], "branch": 1.0, "halfLifeInSeconds": 1.0} for rxn in transmutations.DECAY_MODES: data["type"] = rxn decay = transmutations.DecayMode(self.nuclideBases.byName["AM242M"], data) self.assertEqual(decay.type, rxn) def test_DecayMode_invalidReactionTypes(self): data = {"products": [""], "branch": 1.0, "halfLifeInSeconds": 1.0} for _ in range(0, 25): rxn = randomString(3) data["type"] = rxn if rxn in transmutations.DECAY_MODES: self.assertIsNotNone(transmutations.DecayMode(self.nuclideBases.byName["AM242M"], data)) else: with self.assertRaises(KeyError): transmutations.DecayMode(self.nuclideBases.byName["AM242M"], data) ================================================ FILE: armi/nucDirectory/thermalScattering.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Handle awareness of Thermal Scattering labels for ENDF/B-VIII and ACE. The information below is based on Parsons, LA-UR-18-25096, https://mcnp.lanl.gov/pdf_files/la-ur-18-25096.pdf Scattering law labels are currently available for a variety of classifications: * Element in Compound (H in H2O, Be in BeO) * Element in structure (C in Graphite, Be in metal) * Can be separated as crystalline, 30% porous, 10% porous, etc. * Element in spin isomer (para H, ortho H, para D, ortho D, etc.) * Compound in phase (solid CH4, liquid CH4, SiO2-alpha, SiO2-beta). * Just compound (benzene) * Just isotope (Fe56, Al27) The labels for these vary across evaluations (e.g. ENDF/B-VII, ENDF/B-VIII, etc.). We provide ENDF/B-III.0 and ACE labels. Other physics kernels will have to derive their own labels as appropriate in client code. """ from dataclasses import dataclass # strings that users might want to reference downstream BE_METAL = "Be-metal" BEO = "BeO" CRYSTALLINE_GRAPHITE = "crystalline-graphite" D2O = "D2O" GRAPHITE_10P = "reactor-graphite-10P" GRAPHITE_30P = "reactor-graphite-30P" H2O = "H2O" SIC = "SiC" UN = "UN" UO2 = "UO2" ZRH = "ZrH" # thermal scattering label data BY_NAME_AND_COMPOUND = { ("AL27", None): ("tsl-013_Al_027.endf", "al-27"), ("BE", BE_METAL): (f"tsl-{BE_METAL}.endf", "be-met"), ("BE", BEO): (BEO, "be-beo"), ("C", CRYSTALLINE_GRAPHITE): (f"tsl-{CRYSTALLINE_GRAPHITE}.endf", "grph"), ("C", GRAPHITE_10P): (f"tsl-{GRAPHITE_10P}.endf", "grph10"), ("C", GRAPHITE_30P): (f"tsl-{GRAPHITE_30P}.endf", "grph30"), ("C", SIC): ("tsl-CinSiC.endf", "c-sic"), ("FE56", None): ("tsl-026_Fe_056.endf", "fe-56"), ("H", H2O): ("tsl-HinH2O.endf", "h-h2o"), ("H", ZRH): ("tsl-HinZrH.endf", "h-zrh"), ("H2", D2O): (f"tsl-Din{D2O}.endf", "d-d2o"), ("N", UN): ("tsl-NinUN.endf", "n-un"), ("O", BEO): ("tsl-OinBeO.endf", "o-beo"), ("O", D2O): (f"tsl-Oin{D2O}.endf", "o-d2o"), ("O", UO2): ("tsl-OinUO2.endf", "o-uo2"), ("SI", SIC): ("tsl-SIinSiC.endf", "si-sic"), ("U", UN): ("tsl-UinUN.endf", "u-un"), ("U", UO2): ("tsl-UinUO2.endf", "u-uo2"), ("ZR", ZRH): ("tsl-ZRinZrH.endf", "zr-zrh"), } @dataclass(frozen=True) class ThermalScatteringLabels: """Container for the labels for a particular nuclide/compound combination. Attributes ---------- name: str Name of the nuclide. This should match the string in the "byName" field in nuclideBases. compound: str Label indicating what the subjects are in (e.g. ``"Graphite"`` or ``"H2O"``. Can be left off for, e.g. Fe56. endf8Label: str Label for ENDF/B-VIII evaluation. aceLabel: str Lavel for ACE. """ name: str compound: str endf8Label: str aceLabel: str def fromNameAndCompound(name: str, compound: str): """The standard interface for getting ENDF/B-VIII and ACE labels for a given nuclide. Parameters ---------- name: str Name of the nuclide. compound: str Name of the compound (can be None). Returns ------- ThermalScatteringLabels An instance of the data class used to contain the ENDF/ACE labels for this nuclide/componound combination. Raises ------ ValueError ARMI does not store a large data set of labels. If the user requests one ARMI does not have, they get an error. """ if (name, compound) in BY_NAME_AND_COMPOUND: endf, ace = BY_NAME_AND_COMPOUND[(name, compound)] return ThermalScatteringLabels(name, compound, endf, ace) else: raise ValueError(f"No thermal scattering labels are known for name/compound: {name}/{compound}") ================================================ FILE: armi/nucDirectory/transmutations.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains the definition of :py:class:`~Transmutation` and :py:class:`~Decay` classes. .. inheritance-diagram:: Transmutation DecayMode The mappings between active nuclides during transmutation and decay are described in a ``burn-chain.yaml`` file pointed to by the ``burnChainFileName`` setting. This file contains one entry per nuclide that can transmute or decay that look similar to the example below:: U238: - nuSF: 2.0000 - transmutation: branch: 1.0 products: - NP237 type: n2n - transmutation: branch: 1.0 products: - LFP38 type: fission - transmutation: branch: 1.0 products: - NP239 - PU239 type: nGamma - decay: branch: 5.45000e-07 halfLifeInSeconds: 1.4099935680e+17 products: - LFP38 type: sf This example defines 3 transmutations (an ``(n,2n)`` reaction, an ``(n,fission)`` reaction, an ``(n,gamma``)`` reaction), and a spontaneous fission decay reaction with a very low branching ratio. Valid reaction ``type`` values are listed in :py:class:`~armi.nucDirectory.transmutations.Transmutation` and :py:class:`~armi.nucDirectory.transmutations.DecayMode`. The ``branch`` entry determines the fraction of the products of a given reaction that will end up in a particular product. The branches must never sum up to anything other than 1.0. The ``products`` entry is a list, but only one entry will be the actual product. The list defines a preference order. For example, if ``NP239`` is being tracked as an active nuclide in the problem it will be the product of the ``nGamma`` reaction above. Otherwise, ``U238`` will transmute directly to the alternate product, ``PU239``. .. warning:: If you track very short-lived decays explicitly then the burn matrix becomes very ill-conditioned and numerical solver issues can result. Specialized matrix exponential solvers (e.g. CRAM [1]) are required to get adequate solutions in these cases [2]. The example above also defines a ``nuSF`` item, which is how many neutrons are emitted per spontaneous fission. This is used for intrinsic source term calculations. [1] Pusa, Maria, and Jaakko Leppanen. "Computing the matrix exponential in burnup calculations." Nuclear science and engineering 164.2 (2010): 140-150. [2] Moler, Cleve, and Charles Van Loan. "Nineteen dubious ways to compute the exponential of a matrix." SIAM review 20.4 (1978): 801-836. """ import math from armi import runLog from armi.utils import iterables LN2 = math.log(2) TRANSMUTATION_TYPES = ["n2n", "fission", "nGamma", "nalph", "np", "nd", "nt"] DECAY_MODES = [ "bmd", # beta minus "bpd", # beta plus "ad", # alpha decay "ec", # electron capture "sf", ] # spontaneous-fission PRODUCT_PARTICLES = {"nalph": "HE4", "np": "H1", "nd": "H2", "nt": "H3", "ad": "HE4"} class Transmutable: """ Transmutable base class. Attributes ---------- parent : NuclideBase The parent nuclide in this reaction. type : str The type name of reaction (e.g. ``n2n``, ``fission``, etc.) productNuclides : list The names of potential product nuclides of this reaction, in order of preference. Multiple options exist to allow the library to specify a transmutation to one nuclide if the user is modeling that nuclide, and other ones as fallbacks in case the user is not tracking the preferred product. Only one of these products will be created. productParticle : str The outgoing particle of this reaction. Could be HE4 for n,alpha, etc. Default is None. branch : float The fraction of the time that this transmutation occurs. Should be between 0 and 1. Less than 1 when a decay or reaction can branch between multiple productNuclides. Do not make this >1 to get more than one product because it scales the reaction cross section which will double-deplete the parent. Notes ----- These are used to link two :py:class:`~armi.nucDirectory.nuclideBases.NuclideBase` objects through transmutation or decay. See Also -------- Transmutation DecayMode """ def __init__(self, parent, dataDict): self.parent = parent self.type = dataDict["type"] self.productNuclides = tuple(dataDict["products"]) self.productParticle = dataDict.get("productParticle", PRODUCT_PARTICLES.get(self.type)) self.branch = dataDict.get("branch", None) if self.branch is None: self.branch = 1.0 runLog.info(f"The branching ratio for {self} was not defined and is assumed to be 1.0.") def getPreferredProduct(self, libraryNucNames): """ Get the index of the most preferred transmutation product/decay daughter. Notes ----- The ARMI burn chain is not a full burn chain. It short circuits shorter half-lives, and uses lumped nuclides as catch-all objects for things that just sit around. Consequently, the "preferred" product/daughter may not be actual physical product/daughter. """ for product in self.productNuclides: if product in libraryNucNames: return product groupedNames = iterables.split(libraryNucNames, max(1, int(len(libraryNucNames) / 10))) msg = "Could not find suitable product/daughter for {}.\nThe available options were:\n {}".format( self, ",\n ".join(", ".join(chunk) for chunk in groupedNames) ) raise KeyError(msg) class Transmutation(Transmutable): r""" A transmutation from one nuclide to another. Notes ----- The supported transmutation types include: * :math:`n,2n` * :math:`n,fission` * :math:`n,\gamma` (``nGamma``) * :math:`n,\alpha` (``nalph``) * :math:`n,p` (proton) (``np``) * :math:`n,d` (deuteron) (``nd``) * :math:`n,t` (triton) (``nt``) """ def __init__(self, parent, dataDict): Transmutable.__init__(self, parent, dataDict) if self.type not in TRANSMUTATION_TYPES: raise KeyError("{} not in {}".format(self.type, TRANSMUTATION_TYPES)) def __repr__(self): return "<Transmutation by {} from {:7s} to {} with branching ratio of {:12.5E}>".format( self.type, self.parent.name, self.productNuclides, self.branch ) class DecayMode(Transmutable): r"""Defines a decay from one nuclide to another. Notes ----- The supported decay types are also all transmutations, and include: * :math:`\beta^-` (``bmd``) * :math:`\beta^+` (``bpd``) * :math:`\alpha` (``ad``) * Electron capture (``ec``) * Spontaneous fission (``sf``) Of note, the following are not supported: * Internal conversion * Gamma decay """ def __init__(self, parent, dataDict): Transmutable.__init__(self, parent, dataDict) self.halfLifeInSeconds = parent.halflife # Check for user-defined value of half-life within the burn-chain data. If this is # updated then prefer the user change and then note this to the user. Otherwise, # maintain the default loaded from the nuclide bases. userHalfLife = dataDict.get("halfLifeInSeconds", None) if userHalfLife: if userHalfLife != parent.halflife: runLog.info( f"Half-life provided for {self} will be updated from " f"{parent.halflife:<15.11e} to {userHalfLife:<15.11e} seconds based on " "user provided burn-chain data." ) self.halfLifeInSeconds = userHalfLife self.decay = LN2 / self.halfLifeInSeconds * self.branch # decay constant, reduced by branch to make it accurate if self.type not in DECAY_MODES: raise KeyError("{} is not in {}".format(self.type, DECAY_MODES)) def __repr__(self): return "<DecayMode by {} from {:7s} to {} with a half-life of {:12.5E} s>".format( self.type, self.parent.name, self.productNuclides, self.halfLifeInSeconds, ) ================================================ FILE: armi/nuclearDataIO/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Read and/or write data files associated with nuclear data and reactor physics data.""" # ruff: noqa: F401 # Export the cccc modules here for backward compatibility, though prefer full imports in new code. from armi.nuclearDataIO.cccc import ( compxs, dif3d, dlayxs, fixsrc, gamiso, geodst, isotxs, labels, nhflux, pmatrx, pwdint, rtflux, rzflux, ) from armi.physics import neutronics def getExpectedISOTXSFileName(cycle=None, node=None, suffix=None, xsID=None): """ Return the ISOTXS file that matches either the current cycle or xsID with a suffix. See Also -------- getExpectedCOMPXSFileName getExpectedGAMISOFileName getExpectedPMATRXFileName """ if xsID is not None and cycle is not None: raise ValueError("Both `xsID` and `cycle` cannot be specified together.") if suffix is not None and cycle is not None: raise ValueError("Both `suffix` and ``cycle cannot be specified together.") if xsID is not None: neutronFileName = neutronics.ISOTXS[:3] else: neutronFileName = neutronics.ISOTXS return _findExpectedNeutronFileName(neutronFileName, _getNeutronKeywords(cycle, node, suffix, xsID)) def getExpectedCOMPXSFileName(cycle=None, node=None): """ Return the COMPXS file that matches either the current cycle. See Also -------- getExpectedISOTXSFileName getExpectedGAMISOFileName getExpectedPMATRXFileName """ return _findExpectedNeutronFileName(neutronics.COMPXS, _getNeutronKeywords(cycle, node, suffix=None, xsID=None)) def _findExpectedNeutronFileName(fileType, fileNameKeywords): return fileType + "".join(fileNameKeywords) def _getNeutronKeywords(cycle, node, suffix, xsID): if cycle is not None and xsID is not None: raise ValueError("Keywords are over-specified. Choose `cycle` or `xsID` only") # If neither cycle or xsID are provided there are no additional keywords to add to the file name if cycle is None and xsID is None: keywords = [] else: # example: ISOTXS-c0 if cycle is not None: keywords = [f"-c{cycle}n{node}"] if node is not None else ["-c", str(cycle)] # example: ISOAA-test elif xsID is not None: keywords = [xsID] if suffix not in [None, ""]: keywords.append("-" + suffix) return keywords def getExpectedGAMISOFileName(cycle=None, node=None, suffix=None, xsID=None): """ Return the GAMISO file that matches either the ``cycle`` or ``xsID`` and ``suffix``. For example: If ``cycle`` is set to 0, then ``cycle0.gamiso`` will be returned. If ``xsID`` is set to ``AA`` with a ``suffix`` of ``test``, then ``AA-test.gamiso`` will be returned. See Also -------- getExpectedISOTXSFileName getExpectedCOMPXSFileName getExpectedPMATRXFileName """ if any(i is not None for i in (cycle, suffix, xsID)): # file path extensions are lower case gamiso0 = neutronics.GAMISO_EXT else: # GAMISO as a file is upper case gamiso0 = neutronics.GAMISO return _findExpectedGammaFileName(gamiso0, _getGammaKeywords(cycle, node, suffix, xsID)) def getExpectedPMATRXFileName(cycle=None, node=None, suffix=None, xsID=None): """ Return the PMATRX file that matches either the ``cycle`` or ``xsID`` and ``suffix``. For example: If ``cycle`` is set to 0 d, then ``cycle0.pmatrx`` will be returned. If ``xsID`` is set to ``AA`` with a ``suffix`` of ``test``, then ``AA-test.pmatrx`` will be returned. See Also -------- getExpectedISOTXSFileName getExpectedCOMPXSFileName getExpectedGAMISOFileName """ if any(i is not None for i in (cycle, suffix, xsID)): # file path extensions are lower case pmatrx0 = neutronics.PMATRX_EXT else: # PMATRX as a file is upper case pmatrx0 = neutronics.PMATRX return _findExpectedGammaFileName(pmatrx0, _getGammaKeywords(cycle, node, suffix, xsID)) def _findExpectedGammaFileName(fileType, fileNameKeywords): return "".join(fileNameKeywords) + fileType def _getGammaKeywords(cycle, node, suffix, xsID): if cycle is not None and xsID is not None: raise ValueError("Keywords are over-specified. Choose `cycle` or `xsID` only") # If neither cycle or xsID are provided there are no additional keywords to add # to the file name if cycle is None and xsID is None: keywords = [] else: # example: cycle0.gamiso if cycle is not None: keywords = [f"cycle{cycle}node{node}"] if node is not None else [f"cycle{cycle}"] elif xsID is not None: keywords = [xsID] if suffix not in [None, ""]: if not suffix.startswith("-"): suffix = "-" + suffix keywords.append(suffix) else: raise ValueError("The cycle or XS ID must be specified.") keywords.append(".") return keywords ================================================ FILE: armi/nuclearDataIO/cccc/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This subpackage reads and writes CCCC standard interface files for reactor physics codes. Starting in the late 1960s, the computational nuclear analysis community recognized a need to establish some standard file formats to exchange reactor descriptions and reactor physics quantities. They formed the Committee on Computer Code Coordination (CCCC) and issued several versions of their standards. The latest was issued in 1977 as [CCCC-IV]_. Many reactor codes to this day use these files. This package provides a Python abstraction to read many (though not necessarily all) of these files, manipulate the data, and write them back out to disk. Section IV of [CCCC-IV]_ defines the standard interface files that were created by the CCCC. In addition to the standard files listed in this document, software like DIF3D, PARTISN, and other reactor physics codes may have their own code-dependent interface files. In most cases, they follow a similar structure and definition as the standardized formats, but were not general enough to be used and implemented across all codes. The following are listed as the standard interface files: * ISOTXS (:py:mod:`armi.nuclearDataIO.cccc.isotxs`) - Nuclide (isotope) - ordered, multigroup neutron cross section data * GRUPXS - Group-ordered, isotopic, multigroup neutron cross section data. * BRKOXS - Bondarenko (Russian format) self-shielding data * DLAYXS (:py:mod:`armi.nuclearDataIO.cccc.dlayxs`) - Delayed neutron precursor data * ISOGXS (:py:mod:`armi.nuclearDataIO.cccc.gamiso`) - Nuclide (isotope) - ordered, multigroup gamma cross section data * GEODST (:py:mod:`armi.nuclearDataIO.cccc.geodst`) - Geometry description * NDXSRF - Nuclear density and cross section referencing data * ZNATDN - Zone and subzone atomic densities * SEARCH - Criticality search data * SNCON - Sn (Discrete Ordinates) constants * FIXSRC (:py:mod:`armi.nuclearDataIO.cccc.fixsrc`) - Distributed and surface fixed sources * RTFLUX (:py:mod:`armi.nuclearDataIO.cccc.rtflux`) - Regular total (scalar) neutron flux * ATFLUX (:py:mod:`armi.nuclearDataIO.cccc.rtflux`) - Adjoint total (scalar) neutron flux * RCURNT - Regular neutron current * ACURNT - Adjoint neutron current * RAFLUX - Regular angular neutron flux * AAFLUX - Adjoint angular neutron flux * RZFLUX (:py:mod:`armi.nuclearDataIO.cccc.rzflux`) - Regular, zone-avearged flux by neutron group * PWDINT (:py:mod:`armi.nuclearDataIO.cccc.pwdint`) - Power densitiy by mesh interval * WORTHS - Reactivity (per cc) by mesh interval Other code-dependent interface files may also be included in this package but should be documented which software they are created from and used for. The file structures should also be provided in the module-level docstrings. .. [CCCC-IV] R. Douglas O'Dell, "Standard Interface Files and Procedures for Reactor Physics Codes, Version IV," LA-6941-MS, Los Alamos National Laboratory (September 1977). Web. doi:10.2172/5369298. (`OSTI <https://www.osti.gov/biblio/5369298>`__) Using the system ---------------- Most supported files are in their own module. Each has their own :py:class:`cccc.DataContainer` to hold the data and one or more :py:class:`cccc.Stream` objects representing different I/O formats. The general pattern is to use any of the following methods on a ``Stream`` object: * :py:meth:`cccc.Stream.readBinary` * :py:meth:`cccc.Stream.readAscii` * :py:meth:`cccc.Stream.writeBinary` * :py:meth:`cccc.Stream.writeAscii` For example, to get an RTFLUX data structure from a binary file named ``RTFLUX``, you run:: >>> from armi.nuclearDataIO.cccc import rtflux >>> rtfluxData = rtflux.RtfluxStream.readBinary("RTFLUX") Then if you want to write that data to an ASCII file named ``rtflux.ascii``, you run: >>> rtflux.RtfluxStream.writeAscii(rtfluxData, "rtflux.ascii") Implementation details ---------------------- We have come up with a powerful but somewhat confusing-at-first implementation that allows us to define the structure of the files in code just once, in a way that can both read and write the files. Many methods start with the prefix ``rw`` to indicate that they are used during both reading and writing. Normal users of this code do not need to know the implementation details. Discussion ---------- While loading from stream classmethods is explicit and nice and all, there has been some talk about moving the read/write ascii/binary methods to the data classes for implementations that use data structures. This would hide the Stream subclasses from users, which may be appropriate. On the other hand, logic to select which stream subclass to user (e.g. adjoint vs. real) will have to be moved into the data classes. Notes ----- A CCCC record consists of a leading and ending integer, which indicates the size of the record in bytes. (This is actually just FORTRAN unformatted sequential files are written, see e.g. https://gcc.gnu.org/onlinedocs/gfortran/File-format-of-unformatted-sequential-files.html) As a result, it is possible to perform a check when reading in a record to determine if it was read correctly, by making sure the record size at the beginning and ending of a record are always equal. There are similarities between this code and that in the PyNE cccc subpackage. This is the original source of the code. TerraPower authorized the publication of some of the CCCC code to the PyNE project way back in the 2011 era. This code has since been updated significantly to both read and write the files. This was originally inspired by Prof. James Paul Holloway's alpha release of ccccutils written in c++ from 2001. """ from armi.nuclearDataIO.cccc.cccc import * # noqa: F403 ================================================ FILE: armi/nuclearDataIO/cccc/cccc.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Defines containers for the reading and writing standard interface files for reactor physics codes. .. impl:: Generic tool for reading and writing Committee on Computer Code Coordination (CCCC) format files for reactor physics codes :id: I_ARMI_NUCDATA :implements: R_ARMI_NUCDATA_ISOTXS, R_ARMI_NUCDATA_GAMISO, R_ARMI_NUCDATA_GEODST, R_ARMI_NUCDATA_DIF3D, R_ARMI_NUCDATA_PMATRX, R_ARMI_NUCDATA_DLAYXS This module provides a number of base classes that implement general capabilities for binary and ASCII file I/O. The :py:class:`IORecord` serves as an abstract base class that instantiates a number of methods that the binary and ASCII children classes are meant to implement. These methods, prefixed with ``rw``, are meant to convert literal data types, e.g. float or int, to either binary or ASCII. This base class does its own conversion for container data types, e.g. list or matrix, relying on the child implementation of the literal types that the container possesses. The binary conversion is implemented in :py:class:`BinaryRecordReader` and :py:class:`BinaryRecordWriter`. The ASCII conversion is implemented in :py:class:`AsciiRecordReader` and :py:class:`AsciiRecordWriter`. These :py:class:`IORecord` classes are used within :py:class:`Stream` objects for the data conversion. :py:class:`Stream` is a context manager that opens a file for reading or writing on the ``__enter__`` and closes that file upon ``__exit__``. :py:class:`Stream` is an abstract base class that is subclassed for each CCCC file. It is subclassed directly for the CCCC files that contain cross-section data: * :py:class:`ISOTXS <armi.nuclearDataIO.cccc.isotxs.IsotxsIO>` * :py:mod:`GAMISO <armi.nuclearDataIO.cccc.gamiso>` * :py:class:`PMATRX <armi.nuclearDataIO.cccc.pmatrx.PmatrxIO>` * :py:class:`DLAYXS <armi.nuclearDataIO.cccc.dlayxs.DlayxsIO>` * :py:mod:`COMPXS <armi.nuclearDataIO.cccc.compxs>` For the CCCC file types that are outputs from a flux solver such as DIF3D (e.g., GEODST, DIF3D, NHFLUX) the streams are subclassed from :py:class:`StreamWithDataContainer`, which is a special abstract subclass of :py:class:`Stream` that implements a common pattern used for these file types. In a :py:class:`StreamWithDataContainer`, the data is directly read to or written from a specialized data container. The data container structure for each type of CCCC file is implemented in the module for that file, as a subclass of :py:class:`DataContainer`. The subclasses for each CCCC file type define standard attribute names for the data that will be read from or written to the CCCC file. CCCC file types that follow this pattern include: * :py:class:`GEODST <armi.nuclearDataIO.cccc.geodst.GeodstData>` * :py:class:`DIF3D <armi.nuclearDataIO.cccc.dif3d.Dif3dData>` * :py:class:`NHFLUX <armi.nuclearDataIO.cccc.nhflux.NHFLUX>` (and multiple sub-classes) * :py:class:`LABELS <armi.nuclearDataIO.cccc.labels.LabelsData>` * :py:class:`PWDINT <armi.nuclearDataIO.cccc.pwdint.PwdintData>` * :py:class:`RTFLUX <armi.nuclearDataIO.cccc.rtflux.RtfluxData>` * :py:class:`RZFLUX <armi.nuclearDataIO.cccc.rzflux.RzfluxData>` * :py:class:`RTFLUX <armi.nuclearDataIO.cccc.rtflux.RtfluxData>` The logic to parse or write each specific file format is contained within the :py:meth:`Stream.readWrite` implementations of the respective subclasses. """ import io import itertools import os import struct from copy import deepcopy from typing import List import numpy as np from armi import runLog from armi.nuclearDataIO import nuclearFileMetadata IMPLICIT_INT = "IJKLMN" """Letters that trigger implicit integer types in old FORTRAN 77 codes.""" class IORecord: """ A single CCCC record. Reads or writes information to or from a stream. Parameters ---------- stream A collection of data to be read or written hasRecordBoundaries : bool A True value means the fortran file was written using access='sequential' and contains a 4 byte int count at the beginning and end of each record. Otherwise, if False the fortran file was written using access='direct'. Notes ----- The methods in this object often have `rw` prefixes, meaning the same method can be used for both reading and writing. We consider this a significant achievement that enforces consistency between the code for reading and writing CCCC records. The tradeoff is that it's a bit challenging to comprehend at first. """ _intSize = struct.calcsize("i") _longSize = struct.calcsize("q") maxsize = len(str(2**31 - 1)) # limit to max short even though Python3 can go bigger. _intFormat = " {{:>+{}}}".format(maxsize) _intLength = maxsize + 1 _floatSize = struct.calcsize("f") _floatFormat = " {:+.16E}" _floatLength = 2 + 2 + 16 + 4 _characterSize = struct.calcsize("c") count = 0 def __init__(self, stream, hasRecordBoundaries=True): IORecord.count += 1 self._stream = stream self.numBytes = 0 self.byteCount = 0 self._hasRecordBoundaries = hasRecordBoundaries def __enter__(self): """Open the stream for reading/writing and return :code:`self`. See Also -------- armi.nuclearDataIO.cccc.IORecord.open """ self.open() return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is not None: return try: self.close() except Exception as ee: runLog.error("Failed to close CCCC record.") runLog.error(ee) raise BufferError( "Failed to close record, {}.\n{}\n" "It is possible too much data was read from the " "record, and the end of the stream was reached.\n" "".format(self, ee) ) def open(self): """Abstract method for opening the stream.""" raise NotImplementedError() def close(self): """Abstract method for closing the stream.""" raise NotImplementedError() def rwInt(self, val): """Abstract method for reading or writing an integer. Notes ----- The method has a seemingly odd signature, because it is used for both reading and writing. When writing, the :code:`val` should have value, but when the record is being read, :code:`val` can be :code:`None` or anything else; it is ignored. """ raise NotImplementedError() def rwBool(self, val): """Read or write a boolean value from an integer.""" val = False if not isinstance(val, bool) else val return bool(self.rwInt(int(val))) def rwFloat(self, val): """Abstract method for reading or writing a floating point (single precision) value. Notes ----- The method has a seemingly odd signature, because it is used for both reading and writing. When writing, the :code:`val` should have value, but when the record is being read, :code:`val` can be :code:`None` or anything else; it is ignored. """ raise NotImplementedError() def rwDouble(self, val): """Abstract method for reading or writing a floating point (double precision) value. Notes ----- The method has a seemingly odd signature, because it is used for both reading and writing. When writing, the :code:`val` should have value, but when the record is being read, :code:`val` can be :code:`None` or anything else; it is ignored. """ raise NotImplementedError() def rwString(self, val, length): """Abstract method for reading or writing a string. Notes ----- The method has a seemingly odd signature, because it is used for both reading and writing. When writing, the :code:`val` should have value, but when the record is being read, :code:`val` can be :code:`None` or anything else; it is ignored. """ raise NotImplementedError() def rwList(self, contents, containedType, length, strLength=0): """ A method for reading and writing a (array) of items of a specific type. Notes ----- The method has a seemingly odd signature, because it is used for both reading and writing. When writing, the :code:`contents` should have value, but when the record is being read, :code:`contents` can be :code:`None` or anything else; it is ignored. Warning ------- If a :code:`contents` evaluates to :code:`True`, the array must be the same size as :code:`length`. """ actions = { "int": self.rwInt, "float": self.rwFloat, "string": lambda val: self.rwString(val, strLength), "double": self.rwDouble, } action = actions.get(containedType) if action is None: raise Exception('Cannot pack or unpack the type "{}".'.format(containedType)) # this little trick will make this work for both reading and writing, yay! if contents is None or len(contents) == 0: contents = [None for _ in range(length)] return np.array([action(contents[ii]) for ii in range(length)]) def rwMatrix(self, contents, *shape): """A method for reading and writing a matrix of floating point values. Notes ----- The method has a seemingly odd signature, because it is used for both reading and writing. When writing, the :code:`contents` should have value, but when the record is being read, :code:`contents` can be :code:`None` or anything else; it is ignored. Warning ------- If a :code:`contents` is not :code:`None`, the array must be the same shape as :code:`*shape`. """ return self._rwMatrix(contents, self.rwFloat, *shape) def rwDoubleMatrix(self, contents, *shape): """Read or write a matrix of floating point values. Notes ----- The method has a seemingly odd signature, because it is used for both reading and writing. When writing, the :code:`contents` should have value, but when the record is being read, :code:`contents` can be :code:`None` or anything else; it is ignored. Warning ------- If a :code:`contents` is not :code:`None`, the array must be the same shape as :code:`*shape`. """ return self._rwMatrix(contents, self.rwDouble, *shape) def rwIntMatrix(self, contents, *shape): """Read or write a matrix of int values.""" return self._rwMatrix(contents, self.rwInt, *shape) @staticmethod def _rwMatrix(contents, func, *shape): """ Read/write a matrix. Notes ----- This can be important for performance when reading large matrices (e.g. scatter matrices). It may be worth investigating ``np.frombuffer`` on read and something similar on write. With shape, the first shape argument should be the outermost loop because these are stored in column major order (the FORTRAN way). Note that np.ndarrays can be built with ``order="F"`` to have column-major ordering. So if you have ``((MR(I,J),I=1,NCINTI),J=1,NCINTJ)`` you would pass in the shape as (NCINTJ, NCINTI). """ fortranShape = list(reversed(shape)) if contents is None or contents.size == 0: contents = np.empty(fortranShape) for index in itertools.product(*[range(ii) for ii in shape]): fortranIndex = tuple(reversed(index)) contents[fortranIndex] = func(contents[fortranIndex]) return contents def rwImplicitlyTypedMap(self, keys: List[str], contents) -> dict: """ Read a dict of floats and/or ints with FORTRAN77-style implicit typing. Length of list is determined by length of list of keys passed in. """ for key in keys: # ready for some implicit madness from the FORTRAN 77 days? if key[0].upper() in IMPLICIT_INT: contents[key] = self.rwInt(contents[key]) else: contents[key] = self.rwFloat(contents[key]) return contents class BinaryRecordReader(IORecord): """ Writes a single CCCC record in binary format. Notes ----- This class reads a single CCCC record in binary format. A CCCC record consists of a leading and ending integer indicating how many bytes the record is. The data contained within the record may be integer, float, double, or string. """ def open(self): """Open the record by reading the number of bytes in the record, this value will be used to ensure the entire record was read. """ if not self._hasRecordBoundaries: return self.numBytes = self.rwInt(None) self.byteCount -= 4 def close(self): """Closes the record by reading the number of bytes from then end of the record, if it does not match the initial value, an exception will be raised. """ if not self._hasRecordBoundaries: return # now read end of record numBytes2 = self.rwInt(None) self.byteCount -= 4 if numBytes2 != self.numBytes: raise BufferError( "Number of bytes specified at end the of record, {}, " "does not match the originally specified number, {}.\n" "Read {} bytes.".format(numBytes2, self.numBytes, self.byteCount) ) def rwInt(self, val): """Reads an integer value from the binary stream.""" self.byteCount += self._intSize (i,) = struct.unpack("i", self._stream.read(self._intSize)) return i def rwBool(self, val): """Read or write a boolean value from an integer.""" return IORecord.rwBool(self, val) def rwLong(self, val): """Reads an integer value from the binary stream.""" self.byteCount += self._longSize (ll,) = struct.unpack("q", self._stream.read(self._longSize)) return ll def rwFloat(self, val): """Reads a single precision floating point value from the binary stream.""" self.byteCount += self._floatSize (f,) = struct.unpack("f", self._stream.read(self._floatSize)) return f def rwDouble(self, val): """Reads a double precision floating point value from the binary stream.""" self.byteCount += self._floatSize * 2 (d,) = struct.unpack("d", self._stream.read(self._floatSize * 2)) return d def rwString(self, val, length): """Reads a string of specified length from the binary stream.""" self.byteCount += length (s,) = struct.unpack("%ds" % length, self._stream.read(length)) return s.rstrip().decode() # convert bytes to string on reading. class BinaryRecordWriter(IORecord): """ Reads a single CCCC record in binary format. Reads binary information sequentially. """ def __init__(self, stream, hasRecordBoundaries=True): IORecord.__init__(self, stream, hasRecordBoundaries) self.data = None def open(self): self.data = [] def close(self): if self._hasRecordBoundaries: packedNumBytes = self._getPackedNumBytes() self._stream.write(packedNumBytes) for i in range(0, len(self.data) + 1, io.DEFAULT_BUFFER_SIZE): self._write_buffer_to_stream(i) if self._hasRecordBoundaries: self._stream.write(packedNumBytes) self.data = None def _getPackedNumBytes(self): return struct.pack("i", self.numBytes) def _write_buffer_to_stream(self, i): self._stream.write(b"".join(self.data[i : i + io.DEFAULT_BUFFER_SIZE])) def rwInt(self, val): self.numBytes += self._intSize self.data.append(struct.pack("i", val)) return val def rwBool(self, val): """Read or write a boolean value from an integer.""" return IORecord.rwBool(self, val) def rwLong(self, val): """Reads an integer value from the binary stream.""" self.byteCount += self._longSize self.data.append(struct.pack("q", val)) return val def rwFloat(self, val): self.numBytes += self._floatSize self.data.append(struct.pack("f", val)) return val def rwDouble(self, val): self.numBytes += self._floatSize * 2 self.data.append(struct.pack("d", val)) return val def rwString(self, val, length): self.numBytes += length * self._characterSize self.data.append(struct.pack("%ds" % length, val.ljust(length).encode("utf-8"))) return val class AsciiRecordReader(BinaryRecordReader): """ Reads a single CCCC record in ASCII format. See Also -------- AsciiRecordWriter """ def close(self): BinaryRecordReader.close(self) # read one extra character for the new line \n... python somehow correctly figures out # that on windows \r\n is really just a \n... no idea how. self._stream.read(1) def _getPackedNumBytes(self): return self.numBytes def _write_buffer_to_stream(self, i): self._stream.write("".join(self.data[i : i + io.DEFAULT_BUFFER_SIZE])) def rwInt(self, val): return int(self._stream.read(self._intLength)) def rwFloat(self, val): return float(self._stream.read(self._floatLength)) def rwDouble(self, val): return self.rwFloat(val) def rwString(self, val, length): # read one space self._stream.read(1) return self._stream.read(length).rstrip() class AsciiRecordWriter(IORecord): r""" Writes a single CCCC record in ASCII format. Since there is no specific format of an ASCII CCCC record, the format is roughly the same as the :py:class:`BinaryRecordWriter`, except that the :class:`AsciiRecordReader` puts a space in front of all values (ints, floats, and strings), and puts a newline character :code:`\\n` at the end of all records. """ def __init__(self, stream, hasRecordBoundaries=True): IORecord.__init__(self, stream, hasRecordBoundaries) self.data = None self.numBytes = 0 def open(self): self.data = [] def close(self): self._stream.write(self._intFormat.format(self.numBytes)) self._stream.write("".join(self.data)) self._stream.write(self._intFormat.format(self.numBytes)) self._stream.write("\n") self.data = None def rwInt(self, val): self.numBytes += self._intSize self.data.append(self._intFormat.format(val)) return val def rwFloat(self, val): self.numBytes += self._floatSize self.data.append(self._floatFormat.format(val)) return val def rwDouble(self, val): self.numBytes += self._floatSize * 2 self.data.append(self._floatFormat.format(val)) return val def rwString(self, val, length): self.numBytes += length * self._characterSize self.data.append(" {value:<{length}}".format(length=length, value=val)) return val class DataContainer: """ Data representation that can be read/written to/from with a cccc.Stream. This is an optional convenience class expected to be used in concert with :py:class:`StreamWithDataStructure`. """ def __init__(self): # Need Metadata subclass for default keys self.metadata = nuclearFileMetadata._Metadata() class Stream: """ An abstract CCCC IO stream. Warning ------- This is more of a stream Parser/Serializer than an actual stream. Notes ----- A concrete instance of this class should implement the :py:meth:`~armi.nuclearDataIO.cccc.Stream.readWrite` method. """ _fileModes = { "rb": BinaryRecordReader, "wb": BinaryRecordWriter, "r": AsciiRecordReader, "w": AsciiRecordWriter, } def __init__(self, fileName, fileMode): """ Create an instance of a :py:class:`~armi.nuclearDataIO.cccc.Stream`. Parameters ---------- fileName : str name of the file to be read fileMode : str the file mode, i.e. 'w' for writing ASCII, 'r' for reading ASCII, 'wb' for writing binary, and 'rb' for reading binary. """ self._fileName = fileName self._fileMode = fileMode self._stream = None if fileMode not in self._fileModes: raise KeyError("{} not in {}".format("fileMode", list(self._fileModes.keys()))) def __deepcopy__(self, memo): """Open file objects can't be deepcopied so we clear them before copying.""" cls = self.__class__ result = cls.__new__(cls) result._stream = None memo[id(self)] = result for k, v in self.__dict__.items(): if k != "_stream": setattr(result, k, deepcopy(v, memo)) return result def __repr__(self): return "<{} {}>".format(self.__class__.__name__, self._fileName) def __enter__(self): """At the inception of a with command, open up the file for a read/write.""" try: self._stream = open(self._fileName, self._fileMode) except IOError: runLog.error("Cannot find {} in {}".format(self._fileName, os.getcwd())) raise return self def __exit__(self, exc_type, exc_value, traceback): """At the termination of a with command, close the file.""" self._stream.close() def readWrite(self): """This method should be implemented on any sub-classes to specify the order of records.""" raise NotImplementedError() def createRecord(self, hasRecordBoundaries=True): recordClass = self._fileModes[self._fileMode] return recordClass(self._stream, hasRecordBoundaries) @classmethod def readBinary(cls, fileName: str): """Read data from a binary file into a data structure.""" return cls._read(fileName, "rb") @classmethod def readAscii(cls, fileName: str): """Read data from an ASCII file into a data structure.""" return cls._read(fileName, "r") @classmethod def _read(cls, fileName, fileMode): raise NotImplementedError() @classmethod def writeBinary(cls, data: DataContainer, fileName: str): """Write the contents of a data container to a binary file.""" return cls._write(data, fileName, "wb") @classmethod def writeAscii(cls, data: DataContainer, fileName: str): """Write the contents of a data container to an ASCII file.""" return cls._write(data, fileName, "w") @classmethod def _write(cls, lib, fileName, fileMode): raise NotImplementedError() class StreamWithDataContainer(Stream): """ A cccc.Stream that reads/writes to a specialized data container. This is a relatively common pattern so some of the boilerplate is handled here. Warning ------- This is more of a stream Parser/Serializer than an actual stream. Notes ----- It should be possible to fully merge this with ``Stream``, which may make this a little less confusing. """ def __init__(self, data: DataContainer, fileName: str, fileMode: str): Stream.__init__(self, fileName, fileMode) self._data = data self._metadata = self._data.metadata @staticmethod def _getDataContainer() -> DataContainer: raise NotImplementedError() @classmethod def _read(cls, fileName: str, fileMode: str): data = cls._getDataContainer() return cls._readWrite( data, fileName, fileMode, ) @classmethod def _write(cls, data: DataContainer, fileName: str, fileMode: str): return cls._readWrite(data, fileName, fileMode) @classmethod def _readWrite(cls, data: DataContainer, fileName: str, fileMode: str): with cls(data, fileName, fileMode) as rw: rw.readWrite() return data def getBlockBandwidth(m, nintj, nblok): """ Return block bandwidth JL, JU from CCCC interface files. It is common for CCCC files to block data in various records with a description along the lines of:: WITH M AS THE BLOCK INDEX, JL=(M-1)*((NINTJ-1)/NBLOK +1)+1 AND JU=MIN0(NINTJ,JUP) WHERE JUP=M*((NINTJ-1)/NBLOK +1) This function computes JL and JU for these purposes. It also converts JL and JU to zero based indices rather than 1 based ones, as is almost always wanted when dealing with python/numpy matrices. The term *bandwidth* refers to a kind of sparse matrix representation. Some rows only have columns JL to JH in them rather than 0 to JMAX. The non-zero band from JL to JH is what we're talking about here. """ x = (nintj - 1) // nblok + 1 jLow = (m - 1) * x + 1 jHigh = min(nintj, m * x) return jLow - 1, jHigh - 1 ================================================ FILE: armi/nuclearDataIO/cccc/compxs.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ COMPXS is a binary file that contains multigroup macroscopic cross sections for homogenized regions in a full core. The file format can be found in [DIF3D]_. .. [DIF3D] Derstine, K. L. DIF3D: A Code to Solve One-, Two-, and Three-Dimensional Finite-Difference Diffusion Theory Problems, report, April 1984; Argonne, Illinois. (https://digital.library.unt.edu/ark:/67531/metadc283553/: accessed October 17, 2019), University of North Texas Libraries, Digital Library, https://digital.library.unt.edu; crediting UNT Libraries Government Documents Department. The file structure is listed here :: RECORD TYPE PRESENT IF =================================== ========== SPECIFICATIONS ALWAYS COMPOSITION INDEPENDENT DATA ALWAYS ********* (REPEAT FOR ALL COMPOSITIONS) * COMPOSITION SPECIFICATIONS ALWAYS * ****** (REPEAT FOR ALL ENERGY GROUPS * * IN THE ORDER OF DECREASING * * ENERGY) * * COMPOSITION MACROSCOPIC GROUP ALWAYS * * CROSS SECTIONS ********* POWER CONVERSION FACTORS ALWAYS See Also -------- :py:mod:`armi.nuclearDataIO.cccc.isotxs` Examples -------- >>> from armi.nuclearDataIO import compxs >>> lib = compxs.readBinary("COMPXS") >>> r0 = lib.regions[0] >>> r0.macros.fission # returns fission XS for this region >>> r0.macros.higherOrderScatter[1] # returns P1 scattering matrix >>> r0.macros.higherOrderScatter[5] *= 0 # zero out P5 scattering matrix >>> compxs.writeBinary(lib, "COMPXS2") Notes ----- Power conversion factors are used by some codes to determine how to scale the flux in a region to a desired power based on either fissions/watt-second or captures/watt-second. If the user does not plan on using these values, the COMPXS format indicates the values should be set to ``-1E+20``. The value of ``powerConvMult`` "times the group J integrated flux for the regions containing the current composition yields the total power in those regions and energy group J due to fissions and non-fission absorptions." The ``d<1,2,3>Multiplier`` values are the first, second, and third dimension directional diffusion coefficient multipliers, respectively. Similarly, the ``d<1,2,3>Additive`` values are the first, second, and third dimension directional diffusion coefficient additive terms, respectively. """ from traceback import format_exc import numpy as np from scipy.sparse import csc_matrix from armi import runLog from armi.nuclearDataIO import cccc from armi.nuclearDataIO.nuclearFileMetadata import ( COMPXS_POWER_CONVERSION_FACTORS, REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF, RegionXSMetadata, ) from armi.nuclearDataIO.xsCollections import XSCollection from armi.utils.properties import lockImmutableProperties, unlockImmutableProperties def _getRegionIO(): return _CompxsRegionIO def _flattenScatteringVector(colVector, group, numUpScatter, numDownScatter): flatVector = colVector[group - numDownScatter : group + numUpScatter + 1].toarray().flatten() return list(reversed(flatVector)) def compare(lib1, lib2, tolerance=0.0, verbose=False): """ Compare two COMPXS libraries and return True if equal, or False if not equal. Parameters ---------- lib1: XSLibrary first library lib2: XSLibrary second library tolerance: float Disregard errors that are less than tolerance. verbose: bool show the macroscopic cross sections that are not equal Returns ------- equals: bool True if libraries are equal, else false """ from armi.nuclearDataIO.xsLibraries import compareLibraryNeutronEnergies equals = True equals &= compareLibraryNeutronEnergies(lib1, lib2, tolerance) equals &= lib1.compxsMetadata.compare(lib2.compxsMetadata, lib1, lib2, tolerance) for regionName in set(lib1.regionLabels + lib2.regionLabels): region1 = lib1[regionName] region2 = lib2[regionName] if region1 is None or region2 is None: warning = "Region {} is not in library {} and cannot be compared" if region1: runLog.warning(warning.format(region1, 2)) if region2: runLog.warning(warning.format(region2, 1)) equals = False continue equals &= _compareRegionXS(region1, region2, tolerance, verbose) return equals def _compareRegionXS(region1, region2, tolerance, verbose): """Compare the macroscopic cross sections between two homogenized regions.""" return region1.macros.compare(region2.macros, None, tolerance, verbose) class _CompxsIO(cccc.Stream): """Semi-abstract stream used for reading to/writing from a COMPXS file. Parameters ---------- fileName: str path to compxs file lib: armi.nuclearDataIO.xsLibrary.CompxsLibrary Compxs library that is being written to or read from `fileName` fileMode: str string indicating if ``fileName`` is being read or written, and in ascii or binary format getRegionFunc: function function that returns a :py:class:`CompxsRegion` object given the name of the region. See Also -------- armi.nuclearDataIO.cccc.isotxs.IsotxsIO """ _METADATA_TAGS = ( "numComps", "numGroups", "fileWideChiFlag", "numFissComps", "maxUpScatterGroups", "maxDownScatterGroups", "numDelayedFam", "maxScatteringOrder", ) def __init__(self, fileName, lib, fileMode, getRegionFunc): cccc.Stream.__init__(self, fileName, fileMode) self._lib = lib self._metadata = self._getFileMetadata() self._metadata.fileNames.append(fileName) self._getRegion = getRegionFunc self._isReading = "r" in self._fileMode def _getFileMetadata(self): return self._lib.compxsMetadata def isReadingCompxs(self): return self._isReading def fileMode(self): return self._fileMode @classmethod def _read(cls, fileName, fileMode): from armi.nuclearDataIO.xsLibraries import CompxsLibrary lib = CompxsLibrary() return cls._readWrite( lib, fileName, fileMode, lambda containerKey: CompxsRegion(lib, containerKey), ) @classmethod def _write(cls, lib, fileName, fileMode): return cls._readWrite(lib, fileName, fileMode, lambda containerKey: lib[containerKey]) @classmethod def _readWrite(cls, lib, fileName, fileMode, getRegionFunc): with _CompxsIO(fileName, lib, fileMode, getRegionFunc) as rw: rw.readWrite() return lib def readWrite(self): """ Read from or write to the COMPXS file. See Also -------- armi.nuclearDataIO.cccc.isotxs.IsotxsIO.readWrite : reading/writing ISOTXS files """ runLog.info("{} macroscopic cross library {}".format("Reading" if self._isReading else "Writing", self)) unlockImmutableProperties(self._lib) try: regNames = self._rw1DRecord(self._lib.regionLabels) self._rw2DRecord() for regLabel in regNames: region = self._getRegion(regLabel) regionIO = _getRegionIO()(region, self, self._lib) regionIO.rwRegionData() self._rw5DRecord() except Exception: raise OSError("Failed to {} {} \n\n\n{}".format("read" if self._isReading else "write", self, format_exc())) finally: lockImmutableProperties(self._lib) def _rw1DRecord(self, regNames): """Write the specifications block.""" with self.createRecord() as record: for datum in self._METADATA_TAGS: self._metadata[datum] = record.rwInt(self._metadata[datum]) self._metadata["reservedFlag1"] = record.rwInt(self._metadata["reservedFlag1"]) self._metadata["reservedFlag2"] = record.rwInt(self._metadata["reservedFlag2"]) regNames = list(range(self._metadata["numComps"])) return regNames def _rw2DRecord(self): """Write the composition independent data block.""" with self.createRecord() as record: if self._metadata["fileWideChiFlag"]: self._metadata["fileWideChi"] = record.rwMatrix( self._metadata["fileWideChi"], (self._metadata["fileWideChiFlag"], self._metadata["numGroups"]), ) self._rwLibraryEnergies(record) self._metadata["minimumNeutronEnergy"] = record.rwDouble(self._metadata["minimumNeutronEnergy"]) self._rwDelayedProperties(record, self._metadata["numDelayedFam"]) def _rwLibraryEnergies(self, record): self._lib.neutronVelocity = record.rwList(self._lib.neutronVelocity, "double", self._metadata["numGroups"]) self._lib.neutronEnergyUpperBounds = record.rwList( self._lib.neutronEnergyUpperBounds, "double", self._metadata["numGroups"] ) def _rwDelayedProperties(self, record, numDelayedFam): if numDelayedFam: self._metadata["delayedChi"] = record.rwMatrix( self._metadata["delayedChi"], (self._metadata["numGroups"], numDelayedFam), ) self._metadata["delayedDecayConstant"] = record.rwList( self._metadata["delayedDecayConstant"], "double", numDelayedFam ) self._metadata["compFamiliesWithPrecursors"] = record.rwList( self._metadata["compFamiliesWithPrecursors"], "int", self._metadata["numComps"], ) def _rw5DRecord(self): """Write power conversion factors.""" numComps = self._getFileMetadata()["numComps"] with self.createRecord() as record: for factor in COMPXS_POWER_CONVERSION_FACTORS: self._getFileMetadata()[factor] = record.rwList(self._getFileMetadata()[factor], "double", numComps) readBinary = _CompxsIO.readBinary readAscii = _CompxsIO.readAscii writeBinary = _CompxsIO.writeBinary writeAscii = _CompxsIO.writeAscii class _CompxsRegionIO: """ Specific object assigned a single region to read/write composition information. Used with _COMPXS object to read/write 3D and 4D records - composition specifications and compsosition macroscopic cross sections. Cross sections are read/written in order of decreasing energy. This differs from the _COMPXS object, as this object acts on a single region, but uses the file mode and file path from the _COMPXS region that instantiated this object. """ _ORDERED_PRIMARY_XS = ("absorption", "total", "removal", "transport") def __init__(self, region, compxsIO, lib): self._lib = lib self._compxsIO = compxsIO self._region = region self._numGroups = self._getFileMetadata()["numGroups"] self._fileMode = compxsIO.fileMode() self._isReading = compxsIO.isReadingCompxs() def _getRegionMetadata(self): return self._region.metadata def _getFileMetadata(self): return self._lib.compxsMetadata def rwRegionData(self): """Read/write the region specific information for this composition.""" self._rw3DRecord() self._rw4DRecord() def _rw3DRecord(self): r"""Write the composition specifications block.""" with self._compxsIO.createRecord() as record: self._getRegionMetadata()["chiFlag"] = record.rwInt(self._getRegionMetadata()["chiFlag"]) self._getRegionMetadata()["numUpScatterGroups"] = record.rwList( self._getRegionMetadata()["numUpScatterGroups"], "int", self._numGroups ) self._getRegionMetadata()["numDownScatterGroups"] = record.rwList( self._getRegionMetadata()["numDownScatterGroups"], "int", self._numGroups, ) if self._getRegionMetadata()["numPrecursorFamilies"]: self._getRegionMetadata()["numFamI"] = record.rwList( self._getRegionMetadata()["numFamI"], "int", self._getRegionMetadata()["numPrecursorFamilies"], ) def _rw4DRecord(self): r"""Write the composition macroscopic cross sections.""" if self._isReading: self._region.allocateXS(self._getFileMetadata()["numGroups"]) for group in range(self._getFileMetadata()["numGroups"]): with self._compxsIO.createRecord() as record: self._rwGroup4DRecord(record, group, self._region.macros) if self._isReading: self._region.makeScatteringMatrices() def _rwGroup4DRecord(self, record, group, macros): self._rwPrimaryXS(record, group, macros) self._rwScatteringMatrix(record, group, macros, 0) for datum in REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF: self._getRegionMetadata()[datum][group] = record.rwDouble(self._getRegionMetadata()[datum][group]) if self._getRegionMetadata()["numPrecursorFamilies"]: self._getRegionMetadata()["numPrecursorsProduced", group] = record.rwList( self._getRegionMetadata()["numPrecursorsProduced", group], "int", self._getRegionMetadata()["numPrecursorFamilies"], ) macros.n2n[group] = record.rwDouble(macros.n2n[group]) for higherOrder in range(1, self._getFileMetadata()["maxScatteringOrder"] + 1): self._rwScatteringMatrix(record, group, macros, higherOrder) def _rwPrimaryXS(self, record, group, macros): for xs in self._ORDERED_PRIMARY_XS: macros[xs][group] = record.rwDouble(macros[xs][group]) if self._getRegionMetadata()["chiFlag"]: macros["fission"][group] = record.rwDouble(macros["fission"][group]) macros["nuSigF"][group] = record.rwDouble(macros["nuSigF"][group]) macros["chi"][group] = record.rwList(macros["chi"][group], "double", self._getRegionMetadata()["chiFlag"]) def _rwScatteringMatrix(self, record, group, macros, order): numUpScatter = self._getRegionMetadata()["numUpScatterGroups"][group] numDownScatter = self._getRegionMetadata()["numDownScatterGroups"][group] sparseMat = macros.higherOrderScatter[order] if order else macros.totalScatter dataj = ( None if self._isReading else _flattenScatteringVector(sparseMat[:, group], group, numUpScatter, numDownScatter) ) dataj = record.rwList(dataj, "double", numUpScatter + 1 + numDownScatter) indicesj = list(reversed(range(group - numDownScatter, group + numUpScatter + 1))) if self._isReading: sparseMat.addColumnData(dataj, indicesj) class _CompxsScatterMatrix: """When reading COMPXS scattering blocks, store the data here and then reconstruct after.""" def __init__(self, shape): self.data = [] self.indices = [] self.indptr = [0] self.shape = shape def addColumnData(self, dataj, indicesj): self.data.extend(dataj) self.indices.extend(indicesj) self.indptr.append(len(dataj) + self.indptr[-1]) def makeSparse(self, sparseFunc=csc_matrix): self.data = np.array(self.data, dtype="d") self.indices = np.array(self.indices, dtype="d") self.indptr = np.array(self.indptr, dtype="d") return sparseFunc((self.data, self.indices, self.indptr), shape=self.shape) class CompxsRegion: """ Class for creating/tracking homogenized region information. Notes ----- Region objects are created from reading COMPXS files through :py:meth:`~_CompxsIO.readWrite` and connected to the resulting library, similar to instances of :py:class:`~armi.nuclearDataIO.xsNuclides.XSNuclide`. This allows instances of :py:class:`~armi.nuclearDataIO.xsLibraries.CompxsLibrary` to read from and write to ``COMPXS`` files, access region information by name, and plot macroscopic cross sections from the homogenized regions. The main attributes for an instance of `Region` are the macroscopic cross sections, ``macros``, and the metadata. The metadata deals primarily with delayed neutron information and use of the ``fileWideChi``, if that option is set. See Also -------- armi.nuclearDataIO.xsNuclides.XSNuclide Examples -------- >>> lib = compxs.readBinary("COMPXS") >>> lib.regions <Region REG00> <Region REG01> <Region REG02> ... <Region RegNN> >>> r0 = lib.regions[0] >>> r10 = lib.regions[10] >>> r0.isFissile False >>> r10.isFissile True >>> r10.macros.fission array([0.01147095, 0.01006284, 0.0065597, 0.00660079, 0.005587, ... 0.08920149, 0.13035864, 0.16192732] """ _primaryXS = ("absorption", "total", "removal", "transport", "n2n") def __init__(self, lib, regionNumber): self.container = lib lib[regionNumber] = self self.regionNumber = regionNumber self.macros = XSCollection(parent=self) self.metadata = self._getMetadata() def __repr__(self): return "<{} {}>".format(self.__class__.__name__, self.regionNumber) def _getFileMetadata(self): return self.container.compxsMetadata def _getMetadata(self): specs = RegionXSMetadata() chiFlag = specs["fileWideChiFlag"] = self._getFileMetadata()["fileWideChiFlag"] if chiFlag: self.macros.chi = specs["fileWideChi"] = self._getFileMetadata()["fileWideChi"] compFamiliesWithPrecursors = self._getFileMetadata()["compFamiliesWithPrecursors"] if compFamiliesWithPrecursors is not None and compFamiliesWithPrecursors.size: specs["numPrecursorFamilies"] = compFamiliesWithPrecursors[self.regionNumber] else: specs["numPrecursorFamilies"] = 0 return specs def initMetadata(self, groups): """Initialize the metadata for this region.""" self.metadata = self._getMetadata() for datum in REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF: if "Additive" in datum: quantity = 0.0 else: quantity = 1.0 self.metadata[datum] = groups * [quantity] for datum in COMPXS_POWER_CONVERSION_FACTORS: self.metadata[datum] = 1.0 @property def isFissile(self): return self.macros.fission is not None def allocateXS(self, numGroups): r""" Allocate the cross section arrays. When reading in the cross sections from a COMPXS file, the cross sections are read for each energy group, i.e. ..math:: \Sigma_{a,1},\Sigma_{t,1},\Sigma_{rem,1}, \cdots, \Sigma_{a,2},\Sigma_{t,2},\Sigma_{rem,2}, \cdots, \Sigma_{a,G},\Sigma_{t,G{,\Sigma_{rem,G} Since the cross sections can not be read in with a single read command, the arrays are allocated here to be populated later. Scattering matrices are read in as columns of a sparse scattering matrix and reconstructed after all energy groups have been read in. See Also -------- :py:meth:`makeScatteringMatrices` """ for xs in self._primaryXS: self.macros[xs] = np.zeros(numGroups) self.macros.totalScatter = _CompxsScatterMatrix((numGroups, numGroups)) if self.metadata["chiFlag"]: self.macros.fission = np.zeros(numGroups) self.macros.nuSigF = np.zeros(numGroups) self.macros.chi = np.zeros((numGroups, self.metadata["chiFlag"])) if self._getFileMetadata()["maxScatteringOrder"]: for scatterOrder in range(1, self._getFileMetadata()["maxScatteringOrder"] + 1): self.macros.higherOrderScatter[scatterOrder] = _CompxsScatterMatrix((numGroups, numGroups)) for datum in REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF: self.metadata[datum] = (np.zeros(numGroups) if "Additive" in datum else np.ones(numGroups)).tolist() def makeScatteringMatrices(self): r""" Create the sparse scattering matrix from components. The scattering matrix :math:`S_{i,j}=\Sigma_{s,i\rightarrow j}` is read in from the COMPXS as segments on each column in three parts: ..math:: XSCATU_J = \lbrace S_{g', J}\vert g'=J+NUP(J), J+NUP(J)-1, cdots, J+1\rbrace XSCATJ_J = S_{J,J} XSCATD_J = \lbrace S_{g', J}\vert g'=J-1, J-2, \cdots, J_NDN(J) \rbrace where :math:`NUP(J)` and :math:`NDN(J)` are the number of group that upscatter and downscatter into energy group :math:`J` See Also -------- :py:class:`scipy.sparse.csc_matrix` """ self.macros.totalScatter = self.macros.totalScatter.makeSparse() self.macros.totalScatter.eliminate_zeros() if self._getFileMetadata()["maxScatteringOrder"]: for sctOrdr, sctObj in self.macros.higherOrderScatter.items(): self.macros.higherOrderScatter[sctOrdr] = sctObj.makeSparse() self.macros.higherOrderScatter[sctOrdr].eliminate_zeros() def getXS(self, interaction): """ Get the macroscopic cross sections for a specific interaction. See Also -------- :py:meth:`armi.nucDirectory.XSNuclide.getXS` """ return self.macros[interaction] def merge(self, other): """Merge attributes of two homogenized Regions.""" self.metadata = self.metadata.merge(other.metadata, self, other, "COMPXS", OSError) self.macros.merge(other.macros) ================================================ FILE: armi/nuclearDataIO/cccc/dif3d.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Module for reading from and writing to DIF3D files, which are module dependent binary inputs for the DIF3D code. """ from armi import runLog from armi.nuclearDataIO import cccc FILE_SPEC_2D_PARAMS = ( [ "IPROBT", "ISOLNT", "IXTRAP", "MINBSZ", "NOUTMX", "IRSTRT", "LIMTIM", "NUPMAX", "IOSAVE", "IOMEG1", "INRMAX", "NUMORP", "IRETRN", ] + [f"IEDF{e}" for e in range(1, 11)] + [ "NOUTBQ", "I0FLUX", "NOEDIT", "NOD3ED", "ISRHED", "NSN", "NSWMAX", "NAPRX", "NAPRXZ", "NFMCMX", "NXYSWP", "NZSWP", "ISYMF", "NCMRZS", "ISEXTR", "NPNO", "NXTR", "IOMEG2", "IFULL", "NVFLAG", "ISIMPL", "IWNHFL", "IPERT", "IHARM", ] ) FILE_SPEC_3D_PARAMS = [ "EPS1", "EPS2", "EPS3", "EFFK", "FISMIN", "PSINRM", "POWIN", "SIGBAR", "EFFKQ", "EPSWP", ] + [f"DUM{e}" for e in range(1, 21)] TITLE_RANGE = 11 class Dif3dData(cccc.DataContainer): def __init__(self): cccc.DataContainer.__init__(self) self.twoD = {e: None for e in FILE_SPEC_2D_PARAMS} self.threeD = {e: None for e in FILE_SPEC_3D_PARAMS} self.fourD = None self.fiveD = None class Dif3dStream(cccc.StreamWithDataContainer): """Tool to read and write DIF3D files.""" @staticmethod def _getDataContainer() -> Dif3dData: return Dif3dData() def _rwFileID(self) -> None: """ Record for file identification information. The parameters are stored as a dictionary under the attribute `metadata`. """ with self.createRecord() as record: for param in ["HNAME", "HUSE1", "HUSE2"]: self._metadata[param] = record.rwString(self._metadata[param], 8) self._metadata["VERSION"] = record.rwInt(self._metadata["VERSION"]) def _rw1DRecord(self) -> None: """ Record for problem title, storage, and dump specifications. The parameters are stored as a dictionary under the attribute `metadata`. """ with self.createRecord() as record: for i in range(TITLE_RANGE): param = f"TITLE{i}" self._metadata[param] = record.rwString(self._metadata[param], 8) self._metadata["MAXSIZ"] = record.rwInt(self._metadata["MAXSIZ"]) self._metadata["MAXBLK"] = record.rwInt(self._metadata["MAXBLK"]) self._metadata["IPRINT"] = record.rwInt(self._metadata["IPRINT"]) def _rw2DRecord(self) -> None: """ Record for DIF3D integer control parameters. The parameters are stored as a dictionary under the attribute `twoD`. """ with self.createRecord() as record: for param in FILE_SPEC_2D_PARAMS: self._data.twoD[param] = record.rwInt(self._data.twoD[param]) def _rw3DRecord(self) -> None: """ Record for convergence criteria and other sundry floating point data (such as k-effective). The parameters are stored as a dictionary under the attribute `threeD`. """ with self.createRecord() as record: for param in FILE_SPEC_3D_PARAMS: self._data.threeD[param] = record.rwDouble(self._data.threeD[param]) def _rw4DRecord(self) -> None: """ Record for the optimum overrelaxation factors. This record is only present when using DIF3D-FD and if `NUMORP` is greater than 0. The parameters are stored as a dictionary under the attribute `fourD`. This could be changed into a list in the future since this record represents groupwise data. """ if self._data.twoD["NUMORP"] != 0: omegaParams = [f"OMEGA{e}" for e in range(1, self._data.twoD["NUMORP"] + 1)] with self.createRecord() as record: # Initialize the record if we're reading if self._data.fourD is None: self._data.fourD = {omegaParam: None for omegaParam in omegaParams} for omegaParam in omegaParams: self._data.fourD[omegaParam] = record.rwDouble(self._data.fourD[omegaParam]) def _rw5DRecord(self) -> None: """ Record for the axial coarse mesh rebalancing boundaries. Coarse mesh balancing is disabled in DIF3D-VARIANT, so this record is only relevant for DIF3D-Nodal. This record is only present if `NCMRZS` is greater than 0. The parameters are stored as a dictionary under the attribute `fiveD`. """ if self._data.twoD["NCMRZS"] != 0: zcmrcParams = [f"ZCMRC{e}" for e in range(1, self._data.twoD["NCMRZS"] + 1)] nzintsParams = [f"NZINTS{e}" for e in range(1, self._data.twoD["NCMRZS"] + 1)] with self.createRecord() as record: # Initialize the record if we're reading if self._data.fiveD is None: self._data.fiveD = {zcmrcParam: None for zcmrcParam in zcmrcParams} self._data.fiveD.update({nzintsParam: None for nzintsParam in nzintsParams}) for zcmrcParam in zcmrcParams: self._data.fiveD[zcmrcParam] = record.rwDouble(self._data.fiveD[zcmrcParam]) for nzintsParam in nzintsParams: self._data.fiveD[nzintsParam] = record.rwInt(self._data.fiveD[nzintsParam]) def readWrite(self): """Reads or writes metadata and data from the five records of the DIF3D binary file. .. impl:: Tool to read and write DIF3D files. :id: I_ARMI_NUCDATA_DIF3D :implements: R_ARMI_NUCDATA_DIF3D The reading and writing of the DIF3D binary file is performed using :py:class:`StreamWithDataContainer <.cccc.StreamWithDataContainer>` from the :py:mod:`~armi.nuclearDataIO.cccc` package. This class allows for the reading and writing of CCCC binary files, processing one record at a time using subclasses of the :py:class:`IORecord <.cccc.IORecord>`. Each record in a CCCC binary file consists of words that represent integers (short or long), floating-point numbers (single or double precision), or strings of data. One or more of these words are parsed one at a time by the reader. Multiple words processed together have meaning, such as such as groupwise overrelaxation factors. While reading, the data is stored in a Python dictionary as an attribute on the object, one for each record. The keys in each dictionary represent the parsed grouping of words in the records; for example, for the 4D record (stored as the attribute ``fourD``), each groupwise overrelaxation factor is stored as the key ``OMEGA{i}``, where ``i`` is the group number. See :need:`I_ARMI_NUCDATA` for more details on the general implementation. Each record is also embedded with the record size at the beginning and end of the record (always assumed to be present), which is used for error checking at the end of processing each record. The DIF3D reader processes the file identification record (stored as the attribute ``_metadata``) and the five data records for the DIF3D file, as defined in the specification for the file distributed with the DIF3D software. This class can also read and write an ASCII version of the DIF3D file. While this format is not used by the DIF3D software, it can be a useful representation for users to access the file in a human-readable format. """ msg = f"{'Reading' if 'r' in self._fileMode else 'Writing'} DIF3D binary data {self}" runLog.info(msg) self._rwFileID() self._rw1DRecord() self._rw2DRecord() self._rw3DRecord() self._rw4DRecord() self._rw5DRecord() readBinary = Dif3dStream.readBinary readAscii = Dif3dStream.readAscii writeBinary = Dif3dStream.writeBinary writeAscii = Dif3dStream.writeAscii ================================================ FILE: armi/nuclearDataIO/cccc/fixsrc.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ FIXSRC is a CCCC standard data file for storing multigroup fixed sources on a triangular mesh. Currently, the FIXSRC writing capability assumes a gamma (not neutron) fixed source problem. This enables photon transport problems. [CCCC-IV]_ """ import collections import numpy as np from armi import runLog from armi.nuclearDataIO import cccc def readBinary(fileName): """Read a binary FIXSRC file.""" with FIXSRC(fileName, "rb", np.zeros((0, 0, 0, 0))) as fs: fs.readWrite() return fs.fixSrc def writeBinary(fileName, fixSrcArray): """Write fixed source data to a FIXSRC file.""" with FIXSRC(fileName, "wb", fixSrcArray) as fs: fs.readWrite() class FIXSRC(cccc.Stream): """Read or write a binary FIXSRC file from DIF3D fixed source input.""" def __init__(self, fileName, fileMode, fixSrc): """ Initialize a gamma FIXSRC class for reading or writing a binary FIXSRC file for DIF3D gamma fixed source input. If the intent is to write a gamma FIXSRC file, the variable FIXSRC.fixSrc, which contains to-be-written core-wide multigroup gamma fixed source data, is constructed from an existing neutron RTFLUX file. Parameters ---------- fileName : str, optional The file name of the RTFLUX/ATFLUX binary file to be read. fileMode : str, optional If 'wb', this class writes a FIXSRC binary file. If 'rb', this class reads a preexisting FIXSRC binary file. fixSrc : np.ndarray Core-wide multigroup gamma fixed-source data. """ cccc.Stream.__init__(self, fileName, fileMode) # copied from a sample FIXSRC output from "type 19" DIF3D input self.label = "FIXSRC " self.fileId = 1 self.fixSrc = fixSrc ni, nj, nz, ng = self.fixSrc.shape self.fc = collections.OrderedDict( [ ("itype", 0), ("ndim", 3), ("ngroup", ng), ("ninti", ni), ("nintj", nj), ("nintk", nz), ("idists", 1), ("ndcomp", 1), ("nscomp", 0), ("nedgi", 0), ("nedgj", 0), ("nedjk", 0), ("nblok", 1), ] ) def readWrite(self): """Read or write a binary FIXSRC file for DIF3D fixed source input.""" runLog.info("{} gamma fixed source file {}".format("Reading" if "r" in self._fileMode else "Writing", self)) self._rwFileID() self._rw1DRecord() ng = self.fc["ngroup"] nz = self.fc["nintk"] for g in range(ng): for z in range(nz): self._rw3DRecord(g, z) def _rwFileID(self): """Read file identification information.""" with self.createRecord() as fileIdRecord: self.label = fileIdRecord.rwString(self.label, 24) self.fileId = fileIdRecord.rwInt(self.fileId) def _rw1DRecord(self): """Read/write parameters from/to the FIXSRC 1D block (file control).""" with self.createRecord() as record: for var in self.fc.keys(): self.fc[var] = record.rwInt(self.fc[var]) def _rw3DRecord(self, g, z): """ Read/write fixed source data from 3D block records. Parameters ---------- g : int The gamma energy group index. z : int The DIF3D axial node index. """ with self.createRecord() as record: ni = self.fc["ninti"] nj = self.fc["nintj"] for j in range(nj): for i in range(ni): self.fixSrc[i, j, z, g] = record.rwDouble(self.fixSrc[i, j, z, g]) ================================================ FILE: armi/nuclearDataIO/cccc/gamiso.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Module for reading GAMISO files which contains gamma cross section data. GAMISO is a binary file created by MC**2-v3 that contains multigroup microscopic gamma cross sections. GAMISO data is contained within a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`. .. impl:: Tool to read and write GAMISO files. :id: I_ARMI_NUCDATA_GAMISO :implements: R_ARMI_NUCDATA_GAMISO The majority of the functionality in this module is inherited from the :py:mod:`~armi.nuclearDataIO.cccc.isotxs` module. See :py:class:`~armi.nuclearDataIO.cccc.isotxs.IsotxsIO` and its associated implementation :need:`I_ARMI_NUCDATA_ISOTXS` for more information. The only difference from ISOTXS neutron data is a special treatment for gamma velocities, which is done by overriding ``_rwLibraryEnergies``. See [GAMSOR]_. .. [GAMSOR] Smith, M. A., Lee, C. H., and Hill, R. N. GAMSOR: Gamma Source Preparation and DIF3D Flux Solution. United States: N. p., 2016. Web. doi:10.2172/1343095. `On OSTI <https://www.osti.gov/biblio/1343095-gamsor-gamma-source-preparation-dif3d-flux-solution>`__ """ from armi import runLog from armi.nuclearDataIO import xsLibraries, xsNuclides from armi.nuclearDataIO.cccc import isotxs def compare(lib1, lib2): """Compare two XSLibraries, and return True if equal, or False if not.""" equal = True # first check the lib properties (also need to unlock to prevent from getting an exception). equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, "gammaEnergyUpperBounds") # compare the meta data equal &= lib1.gamisoMetadata.compare(lib2.gamisoMetadata, lib1, lib2) # check the nuclides for nucName in set(lib1.nuclideLabels + lib2.nuclideLabels): nuc1 = lib1.get(nucName, None) nuc2 = lib2.get(nucName, None) if nuc1 is None or nuc2 is None: continue equal &= compareNuclideXS(nuc1, nuc2) return equal def compareNuclideXS(nuc1, nuc2): equal = nuc1.gamisoMetadata.compare(nuc2.gamisoMetadata, nuc1.container, nuc2.container) equal &= nuc1.gammaXS.compare(nuc2.gammaXS, []) return equal def addDummyNuclidesToLibrary(lib, dummyNuclides): """ This method adds DUMMY nuclides to the current GAMISO library. Parameters ---------- lib : obj GAMISO library object dummyNuclides: list List of DUMMY nuclide objects that will be copied and added to the GAMISO file Notes ----- Since MC2-3 does not write DUMMY nuclide information for GAMISO files, this is necessary to provide a consistent set of nuclide-level data across all the nuclides in a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`. """ if not dummyNuclides: runLog.important("No dummy nuclide data provided to be added to {}".format(lib)) return False elif len(lib.xsIDs) > 1: runLog.warning( "Cannot add dummy nuclide data to GAMISO library {} containing data for more than 1 XS ID.".format(lib) ) return False dummyNuclideKeysAddedToLibrary = [] for dummyNuclide in dummyNuclides: dummyKey = dummyNuclide.nucLabel if len(lib.xsIDs): dummyKey += lib.xsIDs[0] if dummyKey in lib: continue runLog.debug("Adding {} nuclide data to {}".format(dummyKey, lib)) newDummy = xsNuclides.XSNuclide(lib, dummyKey) # Copy gamiso metadata from the isotxs metadata of the given dummy nuclide for kk, vv in dummyNuclide.isotxsMetadata.items(): if kk in ["jj", "jband"]: # clear out data here before populating with gamma groups newDummy.gamisoMetadata[kk] = {} for gNum in range(lib.gamisoMetadata["numGroups"]): for bNum in range(lib.gamisoMetadata["maxScatteringBlocks"]): newDummy.gamisoMetadata[kk][(gNum, bNum)] = 1 else: newDummy.gamisoMetadata[kk] = vv lib[dummyKey] = newDummy dummyNuclideKeysAddedToLibrary.append(dummyKey) return any(dummyNuclideKeysAddedToLibrary) class _GamisoIO(isotxs.IsotxsIO): """ A reader/writer for GAMISO data files. Notes ----- The GAMISO file format is identical to ISOTXS. """ def _getFileMetadata(self): return self._lib.gamisoMetadata def _getNuclideIO(self): return _GamisoNuclideIO def _rwMessage(self): runLog.debug("{} GAMISO data {}".format("Reading" if "r" in self._fileMode else "Writing", self)) def _rwLibraryEnergies(self, record): # neutron velocity (cm/s) metadata = self._getFileMetadata() metadata["gammaVelocity..NOT"] = record.rwList( metadata["gammaVelocity..NOT"], "float", self._metadata["numGroups"] ) # read emax for each group in descending eV. self._lib.gammaEnergyUpperBounds = record.rwMatrix( self._lib.gammaEnergyUpperBounds, self._metadata["numGroups"] ) readBinary = _GamisoIO.readBinary readAscii = _GamisoIO.readAscii writeBinary = _GamisoIO.writeBinary writeAscii = _GamisoIO.writeAscii class _GamisoNuclideIO(isotxs._IsotxsNuclideIO): """ A reader/writer for GAMISO nuclides. Notes ----- The GAMISO file format is identical to ISOTXS. """ _FILE_LABEL = "GAMISO" def _getFileMetadata(self): return self._lib.gamisoMetadata def _getNuclideMetadata(self): return self._nuclide.gamisoMetadata def _getMicros(self): return self._nuclide.gammaXS ================================================ FILE: armi/nuclearDataIO/cccc/geodst.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Read/write a CCCC GEODST geometry definition file. GEODST files define fine and coarse meshes and mappings between region numbers and mesh indices. They also store some zone information. File format definition is from [CCCC-IV]_. Examples -------- >>> geo = geodst.readBinary("GEODST") >>> print(geo.xmesh) >>> geo.zmesh[-1] *= 2 # make a modification to data >>> geodst.writeBinary(geo, "GEODST2") """ import numpy as np from armi.nuclearDataIO import cccc GEODST = "GEODST" # See CCCC-IV documentation for definitions FILE_SPEC_1D_KEYS = ( "IGOM", "NZONE", "NREG", "NZCL", "NCINTI", "NCINTJ", "NCINTK", "NINTI", "NINTJ", "NINTK", "IMB1", "IMB2", "JMB1", "JMB2", "KMB1", "KMB2", "NBS", "NBCS", "NIBCS", "NZWBB", "NTRIAG", "NRASS", "NTHPT", "NGOP1", "NGOP2", "NGOP3", "NGOP4", ) class GeodstData(cccc.DataContainer): """ Data representation that can be read from or written to a GEODST file. The region numbers in this data structure START AT 1, not zero! Thus you must always remember the off-by-one conversion when comparing with list or matrix indices. Notes ----- Analogous to a IsotxsLibrary for ISTOXS files. """ def __init__(self): cccc.DataContainer.__init__(self) # 4D data self.xmesh = None self.ymesh = None self.zmesh = None self.iintervals = None self.jintervals = None self.kintervals = None # 5D data self.regionVolumes = None self.bucklings = None self.boundaryConstants = None self.internalBlackBoundaryConstants = None self.zonesWithBlackAbs = None self.zoneClassifications = None self.regionZoneNumber = None # 6d self.coarseMeshRegions = None # 7d self.fineMeshRegions = None class GeodstStream(cccc.StreamWithDataContainer): """ Stream for reading to/writing from with GEODST data. Parameters ---------- geom : GeodstData Data structure fileName: str path to geodst file fileMode: str string indicating if ``fileName`` is being read or written, and in ascii or binary format """ @staticmethod def _getDataContainer() -> GeodstData: return GeodstData() def readWrite(self): """ Step through the structure of a GEODST file and read/write it. Logic to control which records will be present is here, which comes directly off the File specification. .. impl:: Tool to read and write GEODST files. :id: I_ARMI_NUCDATA_GEODST :implements: R_ARMI_NUCDATA_GEODST Reading and writing GEODST files is performed using the general nuclear data I/O functionalities described in :need:`I_ARMI_NUCDATA`. Reading/writing a GEODST file is performed through the following steps: #. Read/write file ID record #. Read/write file specifications on 1D record. #. Based on the geometry type (``IGOM``), one of following records are read/written: * Slab (1), cylinder (3), or sphere (3): Read/write 1-D coarse mesh boundaries and fine mesh intervals. * X-Y (6), R-Z (7), Theta-R (8), uniform triangular (9), hexagonal (10), or R-Theta (11): Read/write 2-D coarse mesh boundaries and fine mesh intervals. * R-Theta-Z (12, 15), R-Theta-Alpha (13, 16), X-Y-Z (14), uniform triangular-Z (17), hexagonal-Z(18): Read/write 3-D coarse mesh boundaries and fine mesh intervals. #. If the geometry is not zero-dimensional (``IGOM`` > 0) and buckling values are specified (``NBS`` > 0): Read/write geometry data from 5D record. #. If the geometry is not zero-dimensional (``IGOM`` > 0) and region assignments are coarse-mesh-based (``NRASS`` = 0): Read/write region assignments to coarse mesh interval. #. If the geometry is not zero-dimensional (``IGOM`` > 0) and region assignments are fine-mesh-based (``NRASS`` = 1): Read/write region assignments to fine mesh interval. """ self._rwFileID() self._rw1DRecord() geomType = self._metadata["IGOM"] if 0 > geomType >= 3: self._rw2DRecord() elif 6 <= geomType <= 11: self._rw3DRecord() elif geomType >= 12: self._rw4DRecord() if geomType > 0 or self._metadata["NBS"] > 0: self._rw5DRecord() if geomType > 0: if self._metadata["NRASS"] == 0: self._rw6DRecord() elif self._metadata["NRASS"] == 1: self._rw7DRecord() def _rwFileID(self): """ Read/write file id record. Notes ----- The number 28 was actually obtained from a hex editor and may be code specific. """ with self.createRecord() as record: self._metadata["label"] = record.rwString(self._metadata["label"], 28) def _rw1DRecord(self): """ Read/write File specifications on 1D record. This record contains 27 integers. """ with self.createRecord() as record: for key in FILE_SPEC_1D_KEYS: self._metadata[key] = record.rwInt(self._metadata[key]) def _rw2DRecord(self): """Read/write 1-D coarse mesh boundaries and fine mesh intervals.""" with self.createRecord() as record: self._data.xmesh = record.rwList(self._data.xmesh, "double", self._metadata["NCINTI"] + 1) self._data.iintervals = record.rwList(self._data.iintervals, "int", self._metadata["NCINTI"]) def _rw3DRecord(self): """Read/write 2-D coarse mesh boundaries and fine mesh intervals.""" with self.createRecord() as record: self._data.xmesh = record.rwList(self._data.xmesh, "double", self._metadata["NCINTI"] + 1) self._data.ymesh = record.rwList(self._data.ymesh, "double", self._metadata["NCINTJ"] + 1) self._data.iintervals = record.rwList(self._data.iintervals, "int", self._metadata["NCINTI"]) self._data.jintervals = record.rwList(self._data.jintervals, "int", self._metadata["NCINTJ"]) def _rw4DRecord(self): """Read/write 3-D coarse mesh boundaries and fine mesh intervals.""" with self.createRecord() as record: self._data.xmesh = record.rwList(self._data.xmesh, "double", self._metadata["NCINTI"] + 1) self._data.ymesh = record.rwList(self._data.ymesh, "double", self._metadata["NCINTJ"] + 1) self._data.zmesh = record.rwList(self._data.zmesh, "double", self._metadata["NCINTK"] + 1) self._data.iintervals = record.rwList(self._data.iintervals, "int", self._metadata["NCINTI"]) self._data.jintervals = record.rwList(self._data.jintervals, "int", self._metadata["NCINTJ"]) self._data.kintervals = record.rwList(self._data.kintervals, "int", self._metadata["NCINTK"]) def _rw5DRecord(self): """Read/write Geometry data from 5D record.""" with self.createRecord() as record: self._data.regionVolumes = record.rwList(self._data.regionVolumes, "float", self._metadata["NREG"]) self._data.bucklings = record.rwList(self._data.bucklings, "float", self._metadata["NBS"]) self._data.boundaryConstants = record.rwList(self._data.boundaryConstants, "float", self._metadata["NBCS"]) self._data.internalBlackBoundaryConstants = record.rwList( self._data.internalBlackBoundaryConstants, "float", self._metadata["NIBCS"], ) self._data.zonesWithBlackAbs = record.rwList(self._data.zonesWithBlackAbs, "int", self._metadata["NZWBB"]) self._data.zoneClassifications = record.rwList( self._data.zoneClassifications, "int", self._metadata["NZONE"] ) self._data.regionZoneNumber = record.rwList(self._data.regionZoneNumber, "int", self._metadata["NREG"]) def _rw6DRecord(self): """Read/write region assignments to coarse mesh interval.""" if self._data.coarseMeshRegions is None: # initialize all-zeros here before reading now that we # have the matrix dimension metadata available. self._data.coarseMeshRegions = np.zeros( ( self._metadata["NCINTI"], self._metadata["NCINTJ"], self._metadata["NCINTK"], ), dtype=np.int32, ) for ki in range(self._metadata["NCINTK"]): with self.createRecord() as record: self._data.coarseMeshRegions[:, :, ki] = record.rwIntMatrix( self._data.coarseMeshRegions[:, :, ki], self._metadata["NCINTJ"], self._metadata["NCINTI"], ) def _rw7DRecord(self): """Read/write region assignments to fine mesh interval.""" if self._data.fineMeshRegions is None: # initialize all-zeros here before reading now that we # have the matrix dimension metadata available. self._data.fineMeshRegions = np.zeros( ( self._metadata["NINTI"], self._metadata["NINTJ"], self._metadata["NINTK"], ), dtype=np.int16, ) for ki in range(self._metadata["NINTK"]): with self.createRecord() as record: self._data.fineMeshRegions[:, :, ki] = record.rwIntMatrix( self._data.fineMeshRegions[:, :, ki], self._metadata["NINTJ"], self._metadata["NINTI"], ) readBinary = GeodstStream.readBinary readAscii = GeodstStream.readAscii writeBinary = GeodstStream.writeBinary writeAscii = GeodstStream.writeAscii ================================================ FILE: armi/nuclearDataIO/cccc/isotxs.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module reads and writes ISOTXS files. ISOTXS is a binary file that contains multigroup microscopic cross sections. ISOTXS stands for *Isotope Cross Sections*. ISOTXS files are often created by a lattice physics code such as MC2 or DRAGON and used as input to a global flux solver such as DIF3D. This module implements reading and writing of the ISOTXS file format, consistent with [CCCC-IV]_. Examples -------- >>> from armi.nuclearDataIO.cccc import isotxs >>> myLib = isotxs.readBinary("ISOTXS-ref") >>> nuc = myLib.getNuclide("U235", "AA") >>> fis5 = nuc.micros.fission[5] >>> scat = nuc.micros.scatter[(0, 5, 6, 1)] # 1st order elastic scatter from group 5->6 >>> nuc.micros.fission[7] = fis5 * 1.01 # you can modify the isotxs too. >>> captureEnergy = nuc.isotxsMetadata["ecapt"] >>> isotxs.writeBinary(myLib, "ISOTXS-modified") """ import itertools import traceback import numpy as np from scipy import sparse from armi import runLog from armi.nuclearDataIO import cccc, xsLibraries, xsNuclides from armi.utils import properties # scattering block definitions from ISOTXS # The definition is: TOTAL_SCATTER = 0 # 000 + NN = total scattering for Legendre Order NN ELASTIC_SCATTER = 100 # 100 + NN, ELASTIC SCATTERING INELASTIC_SCATTER = 200 # 200 + NN, INELASTIC SCATTERING N2N_SCATTER = 300 # 300 + NN, (N,2N) SCATTERING def compareSet(fileNames, tolerance=0.0, verbose=False): """ Takes a list of strings and reads all binaries with that name comparing them in all combinations. Notes ----- useful for finding mcc bugs when you want to compare a series of very similar isotxs outputs Verbose gets VERY long """ comparisons = [] xsLibs = [readBinary(fileName) for fileName in fileNames] for thisXSLib, thatXSLib in itertools.combinations(xsLibs, 2): # all unique combinations with 2 items runLog.info("\n*****\n*****comparing {} and {}\n*****".format(thisXSLib, thatXSLib)) comparisons.append((compare(thisXSLib, thatXSLib, tolerance, verbose), thisXSLib, thatXSLib)) sameFileNames = "\n" for comparison in comparisons: if comparison[0]: sameFileNames += "\t{} and {}\n".format(comparison[1], comparison[2]) sameFileNames = sameFileNames + "None were the same" if sameFileNames == "\n" else sameFileNames runLog.info("the following libraries are the same within the specified tolerance:{}".format(sameFileNames)) def compare(lib1, lib2, tolerance=0.0, verbose=False): """ Compare two XSLibraries, and return True if equal, or False if not. Notes ----- Tolerance allows the user to ignore small changes that may be caused by small library differences or floating point calculations the closer to zero the more differences will be shown 10**-5 is a good tolerance to use if not using default. Verbose shows the XS matrixes that are not equal """ equal = True # first check the lib properties (also need to unlock to prevent from getting an exception). equal &= xsLibraries.compareLibraryNeutronEnergies(lib1, lib2, tolerance) # compare the meta data equal &= lib1.isotxsMetadata.compare(lib2.isotxsMetadata, lib1, lib2) # check the nuclides for nucName in set(lib1.nuclideLabels + lib2.nuclideLabels): nuc1 = lib1.get(nucName, None) nuc2 = lib2.get(nucName, None) if nuc1 is None or nuc2 is None: warning = "Nuclide {:>20} in library {} is not present in library {} and cannot be compared" if nuc1: runLog.warning(warning.format(nuc1, 1, 2)) if nuc2: runLog.warning(warning.format(nuc2, 2, 1)) equal = False continue nucEqual = compareNuclideXS(nuc1, nuc2, tolerance, verbose, nucName) equal &= nucEqual return equal def compareNuclideXS(nuc1, nuc2, tolerance=0.0, verbose=False, nucName=""): equal = nuc1.isotxsMetadata.compare(nuc2.isotxsMetadata, nuc1, nuc2) equal &= nuc1.micros.compare(nuc2.micros, [], tolerance, verbose, nucName=nucName) return equal def addDummyNuclidesToLibrary(lib, dummyNuclides): """ This method adds DUMMY nuclides to the current ISOTXS library. Parameters ---------- lib : obj ISOTXS library object dummyNuclides: list List of DUMMY nuclide objects that will be copied and added to the GAMISO file Notes ----- Since MC2-3 does not write DUMMY nuclide information for GAMISO files, this is necessary to provide a consistent set of nuclide-level data across all the nuclides in a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`. """ if not dummyNuclides: runLog.important("No dummy nuclide data provided to be added to {}".format(lib)) return False elif len(lib.xsIDs) > 1: runLog.warning( "Cannot add dummy nuclide data to ISOTXS library {} containing data for more than 1 XS ID.".format(lib) ) return False dummyNuclideKeysAddedToLibrary = [] for dummyNuclide in dummyNuclides: dummyKey = dummyNuclide.nucLabel if len(lib.xsIDs): dummyKey += lib.xsIDs[0] if dummyKey in lib: continue newDummy = xsNuclides.XSNuclide(lib, dummyKey) newDummy.micros = dummyNuclide.micros # Copy isotxs metadata from the isotxs metadata of the given dummy nuclide for kk, vv in dummyNuclide.isotxsMetadata.items(): if kk in ["jj", "jband"]: newDummy.isotxsMetadata[kk] = {} for mm in vv: newDummy.isotxsMetadata[kk][mm] = 1 else: newDummy.isotxsMetadata[kk] = vv lib[dummyKey] = newDummy dummyNuclideKeysAddedToLibrary.append(dummyKey) return any(dummyNuclideKeysAddedToLibrary) class IsotxsIO(cccc.Stream): """ A semi-abstract stream for reading and writing to a :py:class:`~armi.nuclearDataIO.isotxs.Isotxs`. Notes ----- This is a bit of a special case compared to most other CCCC files because of the special nuclide-level container in addition to the XSLibrary container. The :py:meth:`~armi.nuclearDataIO.isotxs.IsotxsIO.readWrite` defines the ISOTXS file structure as specified in http://t2.lanl.gov/codes/transx-hyper/isotxs.html. """ _FILE_LABEL = "ISOTXS" def __init__(self, fileName, lib, fileMode, getNuclideFunc): cccc.Stream.__init__(self, fileName, fileMode) self._lib = lib self._metadata = self._getFileMetadata() self._metadata.fileNames.append(fileName) self._getNuclide = getNuclideFunc def _getFileMetadata(self): return self._lib.isotxsMetadata def _getNuclideIO(self): return _IsotxsNuclideIO @classmethod def _read(cls, fileName, fileMode): lib = xsLibraries.IsotxsLibrary() return cls._readWrite( lib, fileName, fileMode, lambda containerKey: xsNuclides.XSNuclide(lib, containerKey), ) @classmethod def _write(cls, lib, fileName, fileMode): return cls._readWrite(lib, fileName, fileMode, lambda containerKey: lib[containerKey]) @classmethod def _readWrite(cls, lib, fileName, fileMode, getNuclideFunc): with cls(fileName, lib, fileMode, getNuclideFunc) as rw: rw.readWrite() return lib def _rwMessage(self): runLog.debug("{} ISOTXS data {}".format("Reading" if "r" in self._fileMode else "Writing", self)) def _updateFileLabel(self): """ Update the file label when reading in the ISOTXS-like file if it differs from its expected value. Notes ----- This occurs when MC2-3 is preparing GAMISO files. The merging of ISOTXS-like files fail if the labels are not unique (i.e. merging ISOTXS into GAMISO with each file having a file label of `ISOTXS`. """ if self._metadata["label"] != self._FILE_LABEL: runLog.debug( "File label in {} is not the expected type. Updating the label from {} to {}".format( self, self._metadata["label"], self._FILE_LABEL ) ) self._metadata["label"] = self._FILE_LABEL def readWrite(self): """Read and write ISOTSX file. .. impl:: Tool to read and write ISOTXS files. :id: I_ARMI_NUCDATA_ISOTXS :implements: R_ARMI_NUCDATA_ISOTXS Reading and writing ISOTXS files is performed using the general nuclear data I/O functionalities described in :need:`I_ARMI_NUCDATA`. Reading/writing a ISOTXS file is performed through the following steps: #. Read/write file ID record #. Read/write file 1D record, which includes: * Number of energy groups (``NGROUP``) * Maximum number of up-scatter groups (``MAXUP``) * Maximum number of down-scatter groups (``MAXDN``) * Maximum scattering order (``MAXORD``) * File-wide specification on fission spectrum type, i.e. vector or matrix (``ICHIST``) * Maximum number of blocks of scattering data (``MSCMAX``) * Subblocking control for scatter matrices (``NSBLOK``) #. Read/write file 2D record, which includes: * Library IDs for each isotope (``HSETID(I)``) * Isotope names (``HISONM(I)``) * Global fission spectrum (``CHI(J)``) if file-wide spectrum is specified (``ICHIST`` = 1) * Energy group structure (``EMAX(J)`` and ``EMIN``) * Locations of each nuclide record in the file (``LOCA(I)``) .. note:: The offset data is not read from the binary file because the ISOTXS reader can dynamically calculate the offset itself. Therefore, during a read operation, this data is ignored. #. Read/write file 4D record for each nuclide, which includes isotope-dependent, group-independent data. #. Read/write file 5D record for each nuclide, which includes principal cross sections. #. Read/write file 6D record for each nuclide, which includes fission spectrum if it is flagged as a matrix (``ICHI`` > 1). #. Read/write file 7D record for each nuclide, which includes the scattering matrices. """ self._rwMessage() properties.unlockImmutableProperties(self._lib) try: self._fileID() numNucs = self._rw1DRecord(len(self._lib)) nucNames = self._rw2DRecord(numNucs, self._lib.nuclideLabels) if self._metadata["fileWideChiFlag"] > 1: self._rw3DRecord() for nucLabel in nucNames: # read nuclide name, other global stuff from the ISOTXS library nuc = self._getNuclide(nucLabel) if "r" in self._fileMode: # on add nuclides when reading self._lib[nucLabel] = nuc nuclideIO = self._getNuclideIO()(nuc, self, self._lib) nuclideIO.rwNuclide() except Exception: raise OSError("Failed to read/write {} \n\n\n{}".format(self, traceback.format_exc())) finally: properties.lockImmutableProperties(self._lib) def _fileID(self): with self.createRecord() as record: self._metadata["label"] = record.rwString(self._metadata["label"], 24) self._metadata["fileId"] = record.rwInt(self._metadata["fileId"]) self._updateFileLabel() def _rw1DRecord(self, numNucs): with self.createRecord() as record: self._metadata["numGroups"] = record.rwInt(self._metadata["numGroups"]) numNucs = record.rwInt(numNucs) self._metadata["maxUpScatterGroups"] = record.rwInt(self._metadata["maxUpScatterGroups"]) self._metadata["maxDownScatterGroups"] = record.rwInt(self._metadata["maxDownScatterGroups"]) self._metadata["maxScatteringOrder"] = record.rwInt(self._metadata["maxScatteringOrder"]) self._metadata["fileWideChiFlag"] = record.rwInt(self._metadata["fileWideChiFlag"]) self._metadata["maxScatteringBlocks"] = record.rwInt(self._metadata["maxScatteringBlocks"]) self._metadata["subblockingControl"] = record.rwInt(self._metadata["subblockingControl"]) return numNucs def _rw2DRecord(self, numNucs, nucNames): """ Read 2D ISOTXS record. Notes ----- Contains isotope names, global chi distribution, energy group structure, and locations of each nuclide record in the file """ with self.createRecord() as record: # skip "merger test..." string self._metadata["libraryLabel"] = record.rwString(self._metadata["libraryLabel"], 12 * 8) nucNames = record.rwList(nucNames, "string", numNucs, 8) if self._metadata["fileWideChiFlag"] == 1: # file-wide chi distribution vector listed here. self._metadata["chi"] = record.rwMatrix(self._metadata["chi"], self._metadata["numGroups"]) self._rwLibraryEnergies(record) self._metadata["minimumNeutronEnergy"] = record.rwFloat(self._metadata["minimumNeutronEnergy"]) record.rwList(self._computeNuclideRecordOffset(), "int", numNucs) return nucNames def _rwLibraryEnergies(self, record): # neutron velocity (cm/s) self._lib.neutronVelocity = record.rwMatrix(self._lib.neutronVelocity, self._metadata["numGroups"]) # read emax for each group in descending eV. self._lib.neutronEnergyUpperBounds = record.rwMatrix( self._lib.neutronEnergyUpperBounds, self._metadata["numGroups"] ) def _rw3DRecord(self): """Read file-wide chi-distribution matrix.""" raise NotImplementedError def _computeNuclideRecordOffset(self): """ Compute the record offset of each nuclide. Notes ----- The offset data is not read from the binary file because the ISOTXS reader can dynamically calculate the offset itself. Therefore, during a read operation, this data is ignored. """ recordsPerNuclide = [self._computeNumIsotxsRecords(nuc) for nuc in self._lib.nuclides] return [sum(recordsPerNuclide[0:ii]) for ii in range(len(self._lib))] def _computeNumIsotxsRecords(self, nuclide): """Compute the number of ISOTXS records for a specific nuclide.""" numRecords = 2 metadata = self._getNuclideIO()(nuclide, self, self._lib)._getNuclideMetadata() if metadata["chiFlag"] > 1: numRecords += 1 numRecords += sum(1 for _ord in metadata["ords"] if _ord > 0) return numRecords readBinary = IsotxsIO.readBinary readAscii = IsotxsIO.readAscii writeBinary = IsotxsIO.writeBinary writeAscii = IsotxsIO.writeAscii class _IsotxsNuclideIO: """ A reader/writer class for ISOTXS nuclides. Notes ----- This is to be used in conjunction with an IsotxsIO object. """ def __init__(self, nuclide, isotxsIO, lib): self._nuclide = nuclide self._metadata = self._getNuclideMetadata() self._isotxsIO = isotxsIO self._lib = lib self._fileWideChiFlag = self._getFileMetadata()["fileWideChiFlag"] self._fileWideChi = self._getFileMetadata()["chi"] self._numGroups = self._getFileMetadata()["numGroups"] self._maxScatteringBlocks = self._getFileMetadata()["maxScatteringBlocks"] self._subblockingControl = self._getFileMetadata()["subblockingControl"] def _getFileMetadata(self): return self._lib.isotxsMetadata def _getNuclideMetadata(self): return self._nuclide.isotxsMetadata def _getMicros(self): return self._nuclide.micros def rwNuclide(self): """Read nuclide name, other global stuff from the ISOTXS library.""" properties.unlockImmutableProperties(self._nuclide) try: self._rw4DRecord() self._nuclide.updateBaseNuclide() self._rw5DRecord() if self._metadata["chiFlag"] > 1: self._rw6DRecord() # get scatter matrix for blockNumIndex in range(self._maxScatteringBlocks): for subBlock in range(self._subblockingControl): if self._metadata["ords"][blockNumIndex] > 0: # ords flag == 1 implies this scatter type of scattering exists on this nuclide. self._rw7DRecord(blockNumIndex, subBlock) finally: properties.lockImmutableProperties(self._nuclide) def _rw4DRecord(self): """ Read 4D ISOTXS record. Notes ----- Read the following individual nuclide XS record. Load data into nuc. This record contains non-mg data like atomic mass, temperature, and some flags. """ with self._isotxsIO.createRecord() as nucRecord: # read string data for datum in ["nuclideId", "libName", "isoIdent"]: self._metadata[datum] = nucRecord.rwString(self._metadata[datum], 8) # read float data for datum in ["amass", "efiss", "ecapt", "temp", "sigPot", "adens"]: self._metadata[datum] = nucRecord.rwFloat(self._metadata[datum]) # read integer data for datum in [ "classif", "chiFlag", "fisFlag", "nalph", "np", "n2n", "nd", "nt", "ltot", "ltrn", "strpd", ]: self._metadata[datum] = nucRecord.rwInt(self._metadata[datum]) # defines what kind of scattering block each block is; total, inelastic, elastic, n2n self._metadata["scatFlag"] = nucRecord.rwList(self._metadata["scatFlag"], "int", self._maxScatteringBlocks) # number of scattering orders in this block. if 0, this block isn't present. self._metadata["ords"] = nucRecord.rwList(self._metadata["ords"], "int", self._maxScatteringBlocks) # bandwidth of this block: number of groups that scatter into this group, including this one. jband = self._metadata["jband"] or {} for n in range(self._maxScatteringBlocks): for j in range(self._numGroups): jband[j, n] = nucRecord.rwInt(jband.get((j, n), None)) self._metadata["jband"] = jband # position of in-group scattering for scattering data in group j jj = self._metadata["jj"] or {} # Some mcc**2 cases seem to just have a bunch of 1's listed here. # does this mean we never have upscatter? possibly. for n in range(self._maxScatteringBlocks): for j in range(self._numGroups): jj[j, n] = nucRecord.rwInt(jj.get((j, n), None)) self._metadata["jj"] = jj def _rw5DRecord(self): """Read principal microscopic MG XS data for a nuclide.""" with self._isotxsIO.createRecord() as record: micros = self._getMicros() nuc = self._nuclide numGroups = self._numGroups micros.transport = record.rwMatrix(micros.transport, self._metadata["ltrn"], numGroups) micros.total = record.rwMatrix(micros.total, self._metadata["ltot"], numGroups) micros.nGamma = record.rwMatrix(micros.nGamma, numGroups) if self._metadata["fisFlag"] > 0: micros.fission = record.rwMatrix(micros.fission, numGroups) micros.neutronsPerFission = record.rwMatrix(micros.neutronsPerFission, numGroups) else: micros.fission = micros.getDefaultXs(numGroups) micros.neutronsPerFission = micros.getDefaultXs(numGroups) if self._metadata["chiFlag"] == 1: micros.chi = record.rwMatrix(micros.chi, numGroups) elif self._metadata["fisFlag"] > 0: if self._fileWideChiFlag != 1: raise OSError("Fissile nuclide {} in library but no individual or global chi!".format(nuc)) micros.chi = self._fileWideChi else: micros.chi = micros.getDefaultXs(numGroups) # read some other important XS, if they exist for xstype in ["nalph", "np", "n2n", "nd", "nt"]: if self._metadata[xstype]: micros.__dict__[xstype] = record.rwMatrix(micros.__dict__[xstype], numGroups) else: micros.__dict__[xstype] = micros.getDefaultXs(numGroups) # coordinate direction transport cross section (for various coordinate directions) if self._metadata["strpd"] > 0: micros.strpd = record.rwMatrix(micros.strpd, self._metadata["strpd"], numGroups) else: micros.strpd = micros.getDefaultXs(numGroups) def _rw6DRecord(self): """Reads nuclide-level chi dist.""" raise NotImplementedError def _rw7DRecord(self, blockNumIndex, subBlock): """ Read scatter matrix. Parameters ---------- blockNumIndex : int Index of the scattering block (aka type of scattering) in this nuclide subBlock : int Index-tracking integer. Since neutrons don't scatter to and from all energies, there is a bandwidth defined to save on storage. Notes ----- The data is stored as a giant array, and read in as a CSR matrix. The below matrix is lower triangular, where periods are non-zero. . 0 0 0 0 0 . . 0 0 0 0 . . . 0 0 0 . . . . 0 0 . . . . . 0 . . . . . . The data is read in rows starting at the top and going to the bottom. Per row, there are JBAND non-zero entries. Per row, there are JJ non-zero entries on or beyond the diagonal. . 0 0 0 0 0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Additionally, the data is reversed for whatever reason. So, let's say we are reading the third row in our ficitious matrix. JBAND is 2, JJ is 1. We will read "1" first, and then "2" from the ISOTXS. Since they are backwards, we need to reverse the numbers before putting them into the matrix. . 0 0 0 0 0 . . - - - - . 2 1 - - - - - - - - - - - - - - - - - - - - - However, since we are reading a CSR, we can just add the indices in reverse (this is fast) and read the data in as is (which is a bit slower). Then we will allow the CSR matrix to fix the order later on, if necessary. """ scatter = self._getScatterMatrix(blockNumIndex) if scatter is not None: scatter = scatter.toarray() with self._isotxsIO.createRecord() as record: ng = self._numGroups nsblok = self._subblockingControl m = subBlock + 1 # fix starting at zero problem and use same indices as CCCC specification # be careful with starting indices at 0 here!! lordn = self._metadata["ords"][blockNumIndex] # this is basically how many scattering cross sections there are for this scatter type for this nuclide jl = (m - 1) * ((ng - 1) // nsblok + 1) + 1 jup = m * ((ng - 1) // nsblok + 1) ju = min(ng, jup) metadata = self._metadata indptr = [0] indices = [] dataVals = [] for _scatterLoopOrder in range(lordn): for g in range(jl - 1, ju): jup = g + metadata["jj"][g, blockNumIndex] bandWidth = metadata["jband"][g, blockNumIndex] jdown = jup - bandWidth if scatter is None: indptr.append(len(indices) + bandWidth) # add the indices in reverse indices.extend(range(jup - 1, jdown - 1, -1)) # read the data as-is for _ in range(bandWidth): dataVals.append(record.rwFloat(0.0)) else: for xs in reversed(scatter[g, jdown:jup].tolist()): record.rwFloat(xs) if scatter is None: # we're reading. scatter = sparse.csr_matrix((np.array(dataVals), indices, indptr), shape=(ng, ng)) scatter.eliminate_zeros() self._setScatterMatrix(blockNumIndex, scatter) def _getScatterBlockNum(self, scatterType): """ Determine which scattering block is elastic scattering. This information is stored in the scatFlab libparam and is possibly different for each nuclide (e.g. C, B-10, etc.) Parameters ---------- scatterType : int ISOTXS-defined special int flag for a scatter type (100 for elastic, etc.) Returns ------- blockNum : int A index of the scatter matrix. """ try: return np.where(self._metadata["scatFlag"] == scatterType)[0][0] except IndexError: return None def _getElasticScatterBlockNumIndex(self, legendreOrder=0): return self._getScatterBlockNum(ELASTIC_SCATTER + legendreOrder) def _getInelasticScatterBlockNumIndex(self): return self._getScatterBlockNum(INELASTIC_SCATTER) def _getN2nScatterBlockNumIndex(self): return self._getScatterBlockNum(N2N_SCATTER) def _getTotalScatterBlockNumIndex(self): return self._getScatterBlockNum(TOTAL_SCATTER) def _setScatterMatrix(self, blockNumIndex, scatterMatrix): """ Sets scatter matrix data to the proper ``scatterMatrix`` for this ``blockNum``. blockNumIndex : int Index of a scattering block. """ if blockNumIndex == self._getElasticScatterBlockNumIndex(): self._getMicros().elasticScatter = scatterMatrix elif blockNumIndex == self._getInelasticScatterBlockNumIndex(): self._getMicros().inelasticScatter = scatterMatrix elif blockNumIndex == self._getN2nScatterBlockNumIndex(): self._getMicros().n2nScatter = scatterMatrix elif blockNumIndex == self._getTotalScatterBlockNumIndex(): self._getMicros().totalScatter = scatterMatrix elif blockNumIndex == self._getElasticScatterBlockNumIndex(1): self._getMicros().elasticScatter1stOrder = scatterMatrix else: self._getMicros().higherOrderScatter[blockNumIndex] = scatterMatrix def _getScatterMatrix(self, blockNumIndex): """ Get the scatter matrix for a particular blockNum. Notes ----- This logic could be combined with _setScatterMatrix. """ if blockNumIndex == self._getElasticScatterBlockNumIndex(): scatterMatrix = self._getMicros().elasticScatter elif blockNumIndex == self._getInelasticScatterBlockNumIndex(): scatterMatrix = self._getMicros().inelasticScatter elif blockNumIndex == self._getN2nScatterBlockNumIndex(): scatterMatrix = self._getMicros().n2nScatter elif blockNumIndex == self._getTotalScatterBlockNumIndex(): scatterMatrix = self._getMicros().totalScatter elif blockNumIndex == self._getElasticScatterBlockNumIndex(1): scatterMatrix = self._getMicros().elasticScatter1stOrder else: scatterMatrix = self._getMicros().higherOrderScatter.get(blockNumIndex, None) return scatterMatrix ================================================ FILE: armi/nuclearDataIO/cccc/labels.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Reads and writes region and composition label data from a LABELS interface file. LABELS files are produced by DIF3D/VARIANT. They are very similar in structure and format to CCCC files but are not officially in the CCCC documents. The file structure is listed here:: RECORD TYPE PRESENT IF =============================== ================ FILE IDENTIFICATION ALWAYS SPECIFICATIONS ALWAYS LABEL AND AREA DATA ALWAYS FINITE-GEOMETRY TRANSVERSE NHTS1.GT.0 OR DISTANCES NGTS2.GT.0 NUCLIDE SET LABELS NSETS.GT.1 ALIAS ZONE LABELS NALIAS.GT.0 GENERAL CONTROL-ROD MODEL DATA NBANKS.GT.0 ***********(REPEAT FOR ALL BANKS) * CONTROL-ROD BANK DATA NBANKS.GT.0 * * *******(REPEAT FOR ALL RODS IN BANK) * * CONTROL-ROD CHANNEL DATA (LLCHN+LLROD+MMESH).GT.0 ********** BURNUP DEPENDENT CROSS SECTION NVARY.GT.0 SPECIFICATIONS BURNUP DEPENDENT GROUPS MAXBRN.GT.0 BURNUP DEPENDENT FITTING MAXORD.GT.0 COEFFICIENTS Reference: [DIF3D]_. Examples -------- >>> labelData = LabelStream.readBinary("LABELS") """ from armi import runLog from armi.nuclearDataIO import cccc LABELS = "LABELS" FILE_SPEC_1D_KEYS = [ "numZones", "numRegions", "numAreas", "numRegionAreaAssignments", "numHalfHeightsDirection1", "numHalfHeightsDirection2", "numNuclideSets", "numZoneAliases", "numTrianglesPerHex", "numHexagonalRings", "numControlRodChannels", "numControlRodBanks", "numAxialFineMeshBins", "maxControlRodBankTimes", "maxControlRodsPerBank", "maxControlRodsMeshes", "maxControlRodPieces", "maxControlRodChannels", "numBurnupDependentIsotopes", "maxBurnupDependentGroups", "maxBurnupPolynomialOrder", "modelDimensions", ] class LabelsData(cccc.DataContainer): """ Data structure containing various region, zone, area, nuclide labels. This is the data structure that is read from or written to a LABELS file. """ def __init__(self): cccc.DataContainer.__init__(self) self.regionLabels = [] self.zoneLabels = [] self.areaLabels = [] self.regionAreaAssignments = [] self.halfHeightsDirection1 = [] self.halfHeightsDirection2 = [] self.extrapolationDistance1 = [] self.extrapolationDistance2 = [] self.nuclideSetLabels = [] self.aliasZoneLabels = [] class LabelsStream(cccc.StreamWithDataContainer): """ Class for reading and writing the LABELS interface file produced by DIF3D/VARIANT. Notes ----- Contains region and composition labels, area data, half heights, nuclide set labels, alias zone labels, control-rod model data, and burnup dependent cross section data. See Also -------- armi.nuclearDataIO.cccc.compxs """ @staticmethod def _getDataContainer() -> LabelsData: return LabelsData() def readWrite(self): runLog.info("{} LABELS data {}".format("Reading" if "r" in self._fileMode else "Writing", self)) self._rwFileID() self._rw1DRecord() self._rw2DRecord() if self._metadata["numHalfHeightsDirection1"] > 0 or self._metadata["numHalfHeightsDirection2"] > 0: self._rw3DRecord() if self._metadata["numNuclideSets"] > 1: self._rw4DRecord() if self._metadata["numZoneAliases"] > 0: self._rw5DRecord() if self._metadata["numControlRodBanks"] > 0: self._rw6DRecord() self._rw7DRecord() self._rw8DRecord() if self._metadata["numBurnupDependentIsotopes"] > 0: self._rw9DRecord() if self._metadata["maxBurnupDependentGroups"] > 0: self._rw10DRecord() if self._metadata["maxBurnupPolynomialOrder"] > 0: self._rw11DRecord() def _rwFileID(self): with self.createRecord() as record: for name in ["hname", "huse", "huse2"]: self._metadata[name] = record.rwString(self._metadata[name], 8) self._metadata["version"] = record.rwInt(self._metadata["version"]) def _rw1DRecord(self): """Read/write the file specification data.""" with self.createRecord() as record: for param in FILE_SPEC_1D_KEYS: self._metadata[param] = record.rwInt(self._metadata[param]) self._metadata["dummy"] = record.rwList(self._metadata["dummy"], "int", 2) def _rw2DRecord(self): """Read/write the label and area data.""" with self.createRecord() as record: self._data.zoneLabels = record.rwList(self._data.zoneLabels, "string", self._metadata["numZones"], 8) self._data.regionLabels = record.rwList( self._data.regionLabels, "string", self._metadata["numRegions"], 8, ) self._data.areaLabels = record.rwList(self._data.areaLabels, "string", self._metadata["numAreas"], 8) self._data.regionAreaAssignments = record.rwList( self._data.regionAreaAssignments, "string", self._metadata["numRegionAreaAssignments"], 8, ) def _rw3DRecord(self): """Read/write the finite-geometry transverse distances.""" with self.createRecord() as record: self._data.halfHeightsDirection1 = record.rwList( self._data.halfHeightsDirection1, "float", self._metadata["numHalfHeightsDirection1"], ) self._data.extrapolationDistance1 = record.rwList( self._data.extrapolationDistance1, "float", self._metadata["numHalfHeightsDirection1"], ) self._data.halfHeightsDirection2 = record.rwList( self._data.halfHeightsDirection2, "float", self._metadata["numHalfHeightsDirection2"], ) self._data.extrapolationDistance2 = record.rwList( self._data.extrapolationDistance2, "float", self._metadata["numHalfHeightsDirection2"], ) def _rw4DRecord(self): """Read/write the nuclide labels.""" with self.createRecord() as record: self._data.nuclideSetLabels = record.rwList( self._data.nuclideSetLabels, "string", self._metadata["numNuclideSets"], 8, ) def _rw5DRecord(self): """Read/write the zone aliases.""" with self.createRecord() as record: self._data.aliasZoneLabels = record.rwList( self._data.aliasZoneLabels, "string", self._metadata["numZoneAliases"], 8, ) def _rw6DRecord(self): """Read/write the general control-rod model data.""" raise NotImplementedError("Control rod data not implemented") def _rw7DRecord(self): """Read/write the control-rod bank data.""" raise NotImplementedError("Control rod data not implemented") def _rw8DRecord(self): """Read/write the control-rod channel data.""" raise NotImplementedError("Control rod data not implemented") def _rw9DRecord(self): """Read/write the burnup-dependent cross section specifications.""" raise NotImplementedError("BU dependent XS data not implemented") def _rw10DRecord(self): """Read/write the burnup-dependent group data.""" raise NotImplementedError("BU dependent XS data not implemented") def _rw11DRecord(self): """Read/write the burnup-dependent fitting coefficient data.""" raise NotImplementedError("BU dependent XS data not implemented") readBinary = LabelsStream.readBinary readAscii = LabelsStream.readAscii writeBinary = LabelsStream.writeBinary writeAscii = LabelsStream.writeAscii ================================================ FILE: armi/nuclearDataIO/cccc/nhflux.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ NHFLUX is a CCCC interface file that stores flux moments and partial currents from DIF3D-Nodal and DIF3D-VARIANT. Examples -------- >>> nhfluxData = NfluxStream.readBinary("NHFLUX") >>> NhfluxStream.writeAscii(nhfluxData, "nhflux.ascii") """ import numpy as np from armi.nuclearDataIO import cccc FILE_SPEC_1D_KEYS = ( "ndim", "ngroup", "ninti", "nintj", "nintk", "iter", "effk", "power", "nSurf", "nMom", "nintxy", "npcxy", "nscoef", "itrord", "iaprx", "ileak", "iaprxz", "ileakz", "iorder", ) FILE_SPEC_1D_KEYS_VARIANT11 = ( "npcbdy", "npcsym", "npcsec", "iwnhfl", "nMoms", ) class NHFLUX(cccc.DataContainer): """ An abstraction of a NHFLUX file. This format is defined in the DIF3D manual. Note that the format for DIF3D-Nodal and DIF3D-VARIANT are not the same. The VARIANT NHFLUX format has recently changed, so this reader is only compatible with files produced by v11.0 of the solver. Attributes ---------- metadata : file control The NHFLUX file control info (sort of global for this library). This is the contents of the 1D data block on the file. incomingPointersToAllAssemblies: 2-D list of floats This is an index map for the "internal surfaces" between DIF3D nodal indexing and DIF3D GEODST indexing. It can be used to process incoming partial currents. This uses the same ordering as the geodstCoordMap attribute. externalCurrentPointers : list of ints This is an index map for the "external surfaces" between DIF3D nodal indexing and DIF3D GEODST indexing. "External surfaces" are important because they contain the INCOMING partial currents from the outer reactor boundary. This uses the same ordering as geodstCoordMap, except that each assembly now has multiple subsequent indices. For example, for a hexagonal core, if hex of index n (0 to N-1) has a surface of index k (0 to 5) that lies on the vacuum boundary, then the index of that surface is N*6 + k + 1. geodstCoordMap : list of ints This is an index map between DIF3D nodal and DIF3D GEODST. It is necessary for interpreting the ordering of flux and partial current data in the NHFLUX file. Note that this mapping between DIF3D-Nodal and DIF3D-VARIANT is not the same. outgoingPCSymSeCPointers: list of ints This is an index map for the outpgoing partial currents on the symmetric and sector lateral boundary. It is only present for DIF3D-VARIANT for hexagonal cores. ingoingPCSymSeCPointers: list of ints This is an index map for the ingoing (or incoming) partial currents on the symmetric and sector lateral boundary. It is only present for DIF3D-VARIANT for hexagonal cores. fluxMomentsAll : 4-D list of floats This contains all the flux moments for all core assemblies. The jth planar flux moment of assembly i in group g in axial node k is fluxMoments[i][k][j][g]. The assemblies are ordered according to the geodstCoordMap attribute. For DIF3D-VARIANT, this includes both even and odd parity moments. partialCurrentsHexAll : 5-D list of floats This contains all the OUTGOING partial currents for all core assemblies. The OUTGOING partial current on surface j in assembly i in axial node k in group g is partialCurrentsHex[i][k][j][g][m], where m=0. The assemblies are ordered according to the geodstCoordMap attribute. For DIF3D-VARIANT, higher-order data is available for the m axis. partialCurrentsHex_extAll : 4-D list of floats This contains all the INCOMING partial currents on "external surfaces", which are adjacent to the reactor outer boundary (usually vacuum). Internal reflective surfaces are NOT included in this! These "external surfaces" are ordered according to externalCurrentPointers. For DIF3D-VARIANT, higher-order data is available for the last axis. partialCurrentsZAll : 5-D list of floats This contains all the upward and downward partial currents for all core assemblies. The assemblies are ordered according to the geodstCoordMap attribute. For DIF3D-VARIANT, higher- order data is available for the last axis. Warning ------- DIF3D outputs NHFLUX at every time node, but REBUS outputs NHFLUX only at every cycle. See Also -------- [VARIANT-95]_ and [VARIANT-2014]_. .. [VARIANT-95] G. Palmiotti, E. E. Lewis, and C. B. Carrico, VARIANT: VARIational Anisotropic Nodal Transport for Multidimensional Cartesian and Hexagonal Geometry Calculation, ANL-95/40, Argonne National Laboratory, Argonne, IL (October 1995). .. [VARIANT-2014] Smith, M. A., Lewis, E. E., and Shemon, E. R. DIF3D-VARIANT 11.0: A Decade of Updates. United States: N. p., 2014. Web. doi:10.2172/1127298. https://publications.anl.gov/anlpubs/2014/04/78313.pdf """ def __init__(self, fName="NHFLUX", variant=False, numDataSetsToRead=1): """ Initialize the NHFLUX or NAFLUX reader object. Parameters ---------- fName : str, optional Filename of the NHFLUX binary file to be read. variant : bool, optional Whether or not this NHFLUX/NAFLUX file has the DIF3D-VARIANT output format, which is different than the DIF3D-Nodal format. """ cccc.DataContainer.__init__(self) self.metadata["variantFlag"] = variant self.metadata["numDataSetsToRead"] = numDataSetsToRead # Initialize instance array variables self.incomingPointersToAllAssemblies: np.ndarray = np.array([]) self.externalCurrentPointers: np.ndarray = np.array([]) self.geodstCoordMap: np.ndarray = np.array([]) if self.metadata["variantFlag"]: self.outgoingPCSymSecPointers: np.ndarray = np.array([]) self.ingoingPCSymSecPointers: np.ndarray = np.array([]) self.fluxMomentsAll: np.ndarray = np.array([]) self.partialCurrentsHexAll: np.ndarray = np.array([]) self.partialCurrentsHex_extAll: np.ndarray = np.array([]) self.partialCurrentsZAll: np.ndarray = np.array([]) @property def fluxMoments(self): """ For DIF3D-Nodal, this property is equivalent to the attribute `fluxMomentsAll`. For DIF3D-VARIANT, this property represents the even-parity flux moments. Read-only property (there is no setter). """ nMom = self.metadata["nMom"] return self.fluxMomentsAll[..., :nMom, :] @property def partialCurrentsHex(self): """ For DIF3D-Nodal, this property is almost always equivalent to the attribute ``partialCurrentsHex``. For DIF3D-VARIANT, this property returns the zeroth-order moment of the outgoing radial currents. Read-only property (there is no setter). """ return self.partialCurrentsHexAll[..., 0] @property def partialCurrentsHex_ext(self): """ For DIF3D-Nodal, this property is almost always equivalent to the attribute `partialCurrentsHex_ext`. For DIF3D-VARIANT, this property returns the zeroth-order moment of the incoming/ingoing radial currents. Read-only property (there is no setter). """ return self.partialCurrentsHex_extAll[..., 0] @property def partialCurrentsZ(self): """ For DIF3D-Nodal, this property is almost always equivalent to the attribute `partialCurrentsZ`. For DIF3D-VARIANT, this property returns the zeroth-order moment of the axial currents. Read-only property (there is no setter). """ return self.partialCurrentsZAll[..., 0] class NhfluxStream(cccc.StreamWithDataContainer): @staticmethod def _getDataContainer() -> NHFLUX: return NHFLUX() def readWrite(self): """ Read everything from the DIF3D binary file NHFLUX. Read all surface-averaged partial currents, all planar moments, and the DIF3D nodal coordinate mapping system. Notes ----- This method should be private but conflicts with ``_readWrite`` so we need a better name. Parameters ---------- numDataSetsToRead : int, optional The number of whole-core flux data sets included in this NHFLUX/NAFLUX file that one wishes to be read. Some NHFLUX/NAFLUX files, such as NAFLUX files written by SASSYS/DIF3D-K, contain more than one flux data set. Each data set overwrites the previous one on the NHFLUX class object, which will contain only the ``numDataSetsToRead-th`` data set. The first numDataSetsToRead-1 data sets are essentially skipped over. """ self._rwFileID() self._rwBasicFileData1D() # This control info only exists for VARIANT. We can only process entries with 0 or 1. if self._metadata["variantFlag"] and self._metadata["iwnhfl"] == 2: msg = ( "This reader can only read VARIANT NHFLUX files where 'iwnhfl'=0 (both " "fluxes and currents are present) or 'iwnhfl'=1 (only fluxes are present). " ) raise ValueError(msg) # Read the hex ordering map between DIF3D nodal and DIF3D GEODST. Also read index # pointers to incoming partial currents on outer reactor surface (these don't # belong to any assembly). Incoming partial currents are non-zero due to flux # extrapolation self._rwGeodstCoordMap2D() # Number of energy groups ng = self._metadata["ngroup"] # Number of axial nodes (same for each assembly in DIF3D) nz = self._metadata["nintk"] # Number of XY partial currents on the boundary. Note that for the same model, this # number is not the same between Nodal and VARIANT; VARIANT has more. numPartialCurrentsHex_ext = self._metadata["npcxy"] - self._metadata["nintxy"] * self._metadata["nSurf"] # Typically, flux and current data has units of n/cm^2/s. However, when reading # an NHFLUX file produced by VARPOW (where 'iwnhfl'=1), the flux-only data has units # of W/cc (there is no current data written to the file). if self._data.fluxMomentsAll.size == 0: # Initialize using metadata info for reading totalMoments = ( self._metadata["nMom"] if not self._metadata["variantFlag"] else (self._metadata["nMom"] + self._metadata["nMoms"]) ) self._data.fluxMomentsAll = np.zeros((self._metadata["nintxy"], nz, totalMoments, ng)) if self._metadata["iwnhfl"] != 1: self._data.partialCurrentsHexAll = np.zeros( ( self._metadata["nintxy"], nz, self._metadata["nSurf"], ng, self._metadata["nscoef"], ) ) self._data.partialCurrentsHex_extAll = np.zeros( (numPartialCurrentsHex_ext, nz, ng, self._metadata["nscoef"]) ) self._data.partialCurrentsZAll = np.zeros( (self._metadata["nintxy"], nz + 1, 2, ng, self._metadata["nscoef"]) ) for _n in range(self._metadata["numDataSetsToRead"]): # Each record contains nodal data for ONE energy group in ONE axial core slice. # Must loop through all energy groups and all axial core slices. # The axial surface partial currents are indexed by axial surface (NOT by axial node), # so there are nz+1 records for z-surface currents # Loop through all energy groups: high-to-low for forward flux, low-to-high for # adjoint flux for g in range(ng): gEff = self._getEnergyGroupIndex(g) # Loop through axial nodes for z in range(nz): # Process flux moments self._data.fluxMomentsAll[:, z, :, gEff] = self._rwFluxMoments3D( self._data.fluxMomentsAll[:, z, :, gEff] ) # Process currents if self._metadata["iwnhfl"] != 1: # Loop through axial nodes for z in range(nz): ( self._data.partialCurrentsHexAll[:, z, :, gEff, :], self._data.partialCurrentsHex_extAll[:, z, gEff, :], ) = self._rwHexPartialCurrents4D( self._data.partialCurrentsHexAll[:, z, :, gEff, :], self._data.partialCurrentsHex_extAll[:, z, gEff, :], ) # Loop through axial surfaces (NOT axial nodes, because there is a "+1") for z in range(nz + 1): self._data.partialCurrentsZAll[:, z, :, gEff, :] = self._rwZPartialCurrents5D( self._data.partialCurrentsZAll[:, z, :, gEff, :] ) def _getNumOuterSurfacesHex(self): """ The word "outer" in the method name means along the outside of the core. Thus, this is the number of lateral hex surfaces on the outer core boundary (usually vacuum...internal reflective boundaries do NOT count). """ # Both Nodal and VARIANT files should return the same number, but they are calculated # differently between the two codes if self._metadata["variantFlag"]: numOuterSurfacesHex = self._metadata["npcbdy"] else: # Nodal does not have an "npcbdy" metadata parameter, so numOuterSurfacesHex # must be calculated differently. Performing the same calculation below in VARIANT, # which is possible to do, can return a different number, so that is why # we cannot use the same calculation for both codes. numOuterSurfacesHex = self._metadata["npcxy"] - self._metadata["nintxy"] * self._metadata["nSurf"] return numOuterSurfacesHex def _rwFileID(self): """ Read/write file id record. Notes ----- The username, version, etc are embedded in this string but it's usually blank. """ with self.createRecord() as record: self._metadata["label"] = record.rwString(self._metadata["label"], 28) def _rwBasicFileData1D(self): """Read basic data parameters (number of energy groups, assemblies, axial nodes, etc.).""" # Dummy values are stored because sometimes they get assigned # unexpected values anyway, and so we still want to preserve those values anyway if self._metadata["variantFlag"]: keys = FILE_SPEC_1D_KEYS + FILE_SPEC_1D_KEYS_VARIANT11 + tuple(f"IDUM{e:>02}" for e in range(1, 7)) else: keys = FILE_SPEC_1D_KEYS + tuple(tuple(f"IDUM{e:>02}" for e in range(1, 12))) with self.createRecord() as record: self._metadata.update(record.rwImplicitlyTypedMap(keys, self._metadata)) def _rwGeodstCoordMap2D(self): """ Read/write core geometry indexing from the NHFLUX 2D block. This reads the 2-D (x,y) indexing for assemblies. geodstCoordMap maps DIF3D nodal hex indexing to DIF3D GEODST indexing. This DIF3D GEODST indexing is different than (but similar to) the MCNP GEODST ordering. For Nodal, let N be the number of assemblies. Let M be the number of "external hex surfaces" exposed to the outer reactor boundary (usually vacuum). M does NOT include reflective surfaces! N = self._metadata['nintxy'] M = self._metadata['npcxy'] - self._metadata['nintxy']*6 N*6 + M = self._metadata['npcxy'] For VARIANT in hexagonal geometry, there are two additional datasets for outgoing and ingoing partial currents on the symmetric and sector xy-plane boundary. Examples -------- geodstCoordMap[NodalIndex] = geodstIndex See Also -------- nuclearDataIO.NHFLUX.__init__ nuclearDataIO.NHFLUX._rwHexPartialCurrents4D nuclearDataIO.ISOTXS.read2D nuclearDataIO.SPECTR.read2D """ with self.createRecord() as record: # Number of unique assemblies - this is N in the comments above nAssem = self._metadata["nintxy"] # Number of lateral surfaces per assembly (this is 6 for hexagonal cores) nSurf = self._metadata["nSurf"] numExternalSurfaces = self._getNumOuterSurfacesHex() # Initialize np arrays to store all node ordering (and node surface ordering) # data. We don't actually use incomingPointersToAllAssemblies (basically # equivalent to nearest neighbors indices), but it's here in case someone # needs it in the future. # Initialize data size when reading if self._data.incomingPointersToAllAssemblies.size == 0: # Index pointers to INCOMING partial currents on assemblies self._data.incomingPointersToAllAssemblies = np.zeros((nSurf, nAssem), dtype=int) # Index pointers to OUTGOING partial currents on core outer boundary self._data.externalCurrentPointers = np.zeros((numExternalSurfaces), dtype=int) # Index pointers to DIF3D GEODST ordering of each assembly self._data.geodstCoordMap = np.zeros(nAssem, dtype=int) self._data.incomingPointersToAllAssemblies = record.rwIntMatrix( self._data.incomingPointersToAllAssemblies, nAssem, nSurf ) self._data.externalCurrentPointers = record.rwList( self._data.externalCurrentPointers, "int", numExternalSurfaces ) self._data.geodstCoordMap = record.rwList(self._data.geodstCoordMap, "int", nAssem) # There is additional data to process for VARIANT if self._metadata["variantFlag"]: # Number of symmetry and sector surface pointers npcsto = self._metadata["npcsym"] + self._metadata["npcsec"] if self._data.outgoingPCSymSecPointers.size == 0: self._data.outgoingPCSymSecPointers = np.zeros(npcsto, dtype=int) self._data.ingoingPCSymSecPointers = np.zeros(npcsto, dtype=int) self._data.outgoingPCSymSecPointers = record.rwList(self._data.outgoingPCSymSecPointers, "int", npcsto) self._data.ingoingPCSymSecPointers = record.rwList(self._data.ingoingPCSymSecPointers, "int", npcsto) def _rwFluxMoments3D(self, contents): r""" Read/write multigroup flux moments from the NHFLUX 3D block. This reads/writes the planar moments for each DIF3D node on ONE x,y plane. The planar moments for DIF3D nodes on different x,y planes (different axial slices) are in a different 3D record, so this method must be repeatedly executed in order to process them all. Format is ``((FLUX(I,J),I=1,NMOM),J=1,NINTXY)`` so we must pass in ``NINTXY`` as the first item in the shape. However, the caller of this method wants the shape to be (nintxy, nMom) so we actually have to transpose it on the way in/out. nMom can also be nMoms when reading/writing for VARIANT. """ nMom = self._metadata["nMom"] with self.createRecord() as record: result = record.rwDoubleMatrix( contents[:, :nMom].T, self._metadata["nintxy"], nMom, ) contents[:, :nMom] = result.T # If we have VARIANT data, then we also need to process the odd-parity moments. if self._metadata["variantFlag"] and self._metadata["nMoms"] > 0: result = record.rwDoubleMatrix( contents[:, nMom:].T, self._metadata["nintxy"], self._metadata["nMoms"], ) contents[:, nMom:] = result.T return contents def _rwHexPartialCurrents4D(self, surfCurrents, externalSurfCurrents): r""" Read/write multigroup lateral partial OUTGOING currents from the NHFLUX 4D block. This reads all OUTGOING partial currents for all assembly block lateral surfaces at a fixed axial position. For a hexagonal core, there are 6 surfaces per assembly axial block. The data for the 2 axial surfaces of each block are in the 5D records. Each 4D record contains all the surface partial currents on ONE x,y plane. The surface data on different x,y planes (different axial slices) are in a different 4D record, so this method must be repeatedly executed in order to process them all. If the reactor contains N assemblies and M exterior surfaces (surfaces adjacent to vacuum boundary), this record will contain N*6 + M partial currents. The N*6 assembly OUTGOING partial currents are listed first, followed by the M INCOMING partial currents from the outer reactor edge. N = self._metadata['nintxy'] M = self._metadata['npcxy'] - self._metadata['nintxy']*6 N*6 + M = self._metadata['npcxy'] Notes ----- These data are harder to read with rwMatrix, though it could be done if we discarded the unwanted data at another level if that is much faster. """ with self.createRecord() as record: nAssem = self._metadata["nintxy"] nSurf = self._metadata["nSurf"] # This is equal to one for Nodal diffusion theory, but greater than one for # VARIANT. nscoef = self._metadata["nscoef"] numPartialCurrentsHex_ext = self._metadata["npcxy"] - self._metadata["nintxy"] * self._metadata["nSurf"] # Loop through all lateral surfaces of all assemblies for i in range(nAssem): for j in range(nSurf): for m in range(nscoef): # OUTGOING partial currents on each lateral surface in each assembly. # If m > 0, other NSCOEF options (i.e., half-angle integrated # flux when reading DIF3D-Nodal data, and higher current moments # when reading DIF3D-VARIANT data) are processed. surfCurrents[i, j, m] = record.rwDouble(surfCurrents[i, j, m]) for j in range(numPartialCurrentsHex_ext): for m in range(nscoef): # INCOMING current at each surface of outer core boundary. If m > 0, # other NSCOEF options (i.e., half-angle integrated flux when # reading DIF3D-Nodal data, and higher current moments when reading # DIF3D-VARIANT data) are processed. externalSurfCurrents[j, m] = record.rwDouble(externalSurfCurrents[j, m]) return surfCurrents, externalSurfCurrents def _rwZPartialCurrents5D(self, surfCurrents): """ Read/write multigroup axial partial currents from the NHFLUX 5D block. Most other NHFLUX data is indexed by DIF3D node (each axial core slice in its own record). HOWEVER, "top" and "bottom" surfaces of each DIF3D node are instead indexed by axial surface. If there are Z axial nodes, then there are Z+1 axial surfaces. Thus, there are Z+1 5D records, while there are only Z 3D and Z 4D records. Each 5D record (each axial surface) contains two partial currents for each assembly position. The first is the UPWARD partial current, while the second is the DOWNWARD partial current. Returns ------- surfCurrents : 3-D list of floats This contains all the upward and downward partial currents in all assemblies on ONE whole-core axial slice. The assemblies are ordered according to self.geodstCoordMap. See Also -------- nuclearDataIO.NHFLUX._rwBasicFileData1D nuclearDataIO.NHFLUX._rwGeodstCoordMap2D """ with self.createRecord() as record: nAssem = self._metadata["nintxy"] nSurf = 2 nscoef = self._metadata["nscoef"] # Loop through all (up and down) partial currents on all hexes # These loops are in a different order than in the 4D record above!!! # Here we loop through surface FIRST and assemblies SECOND!!! for j in range(nSurf): for i in range(nAssem): for m in range(nscoef): # Outward partial current. For m > 0, other NSCOEF options # (i.e., half-angle integrated flux when reading DIF3D-Nodal # data, and higher current moments when reading DIF3D-VARIANT # data) are processed. surfCurrents[i, j, m] = record.rwDouble(surfCurrents[i, j, m]) return surfCurrents def _getEnergyGroupIndex(self, g): """ Real fluxes stored in NHFLUX have "normal" (or "forward") energy groups. Also see the subclass method NAFLUX.getEnergyGroupIndex(). """ return g class NafluxStream(NhfluxStream): """ NAFLUX is similar in format to the NHFLUX, but contains adjoint flux. It has reversed energy group ordering. """ def _getEnergyGroupIndex(self, g): """Adjoint fluxes stored in NAFLUX have "reversed" (or "backward") energy groups.""" ng = self._metadata["ngroup"] return ng - g - 1 class NhfluxStreamVariant(NhfluxStream): """ Stream for VARIANT version of NHFLUX. Notes ----- Can be deleted after have the NHFLUX data container be the public interface. """ @staticmethod def _getDataContainer() -> NHFLUX: return NHFLUX(variant=True) class NafluxStreamVariant(NafluxStream): """ Stream for VARIANT version of NAFLUX. Notes ----- Can be deleted after have the NHFLUX data container be the public interface. """ @staticmethod def _getDataContainer() -> NHFLUX: return NHFLUX(variant=True) def getNhfluxReader(adjointFlag, variantFlag): """ Returns the appropriate DIF3D nodal flux binary file reader class, either NHFLUX (real) or NAFLUX (adjoint). """ if adjointFlag: reader = NafluxStreamVariant if variantFlag else NafluxStream else: reader = NhfluxStreamVariant if variantFlag else NhfluxStream return reader ================================================ FILE: armi/nuclearDataIO/cccc/pmatrx.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Module for reading PMATRX files which contain gamma productions from fission reactions. See [GAMSOR]_ and [MC23]_. .. [MC23] Lee, Changho, Jung, Yeon Sang, and Yang, Won Sik. MC2-3: Multigroup Cross Section Generation Code for Fast Reactor Analysis Nuclear. United States: N. p., 2018. Web. doi:10.2172/1483949. (`OSTI <https://www.osti.gov/biblio/1483949-mc2-multigroup-cross-section-generation-code-fast-reactor-analysis-nuclear>`__) """ import traceback from armi import runLog from armi.nuclearDataIO import cccc, xsLibraries, xsNuclides from armi.utils import properties def compare(lib1, lib2): """Compare two XSLibraries, and return True if equal, or False if not.""" equal = True # first check the lib properties (also need to unlock to prevent from getting an exception). equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, "neutronEnergyUpperBounds") equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, "gammaEnergyUpperBounds") equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, "neutronDoseConversionFactors") equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, "gammaDoseConversionFactors") # compare the meta data equal &= lib1.pmatrxMetadata.compare(lib2.pmatrxMetadata, lib1, lib2) # check the nuclides for nucName in set(lib1.nuclideLabels + lib2.nuclideLabels): nuc1 = lib1.get(nucName, None) nuc2 = lib2.get(nucName, None) if nuc1 is None or nuc2 is None: continue equal &= compareNuclideXS(nuc1, nuc2) return equal def compareNuclideXS(nuc1, nuc2): equal = nuc1.pmatrxMetadata.compare(nuc2.pmatrxMetadata, nuc1.container, nuc2.container) for attrName in [ "neutronHeating", "neutronDamage", "gammaHeating", "isotropicProduction", "linearAnisotropicProduction", "nOrderProductionMatrix", ]: val1 = getattr(nuc1, attrName) val2 = getattr(nuc2, attrName) if not properties.numpyHackForEqual(val1, val2): runLog.important( "{} and {} have different `{}` attributes:\n{}\n{}".format(nuc1, nuc2, attrName, val1, val2) ) equal &= False return equal def addDummyNuclidesToLibrary(lib, dummyNuclides): """ This method adds DUMMY nuclides to the current PMATRX library. Parameters ---------- lib : obj PMATRX library object dummyNuclides: list List of DUMMY nuclide objects that will be copied and added to the PMATRX file Notes ----- Since MC2-3 does not write DUMMY nuclide information for PMATRX files, this is necessary to provide a consistent set of nuclide-level data across all the nuclides in a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`. """ if not dummyNuclides: runLog.important("No dummy nuclide data provided to be added to {}".format(lib)) return False if len(lib.xsIDs) > 1: runLog.warning( "Cannot add dummy nuclide data to PMATRX library {} containing data for more than 1 XS ID.".format(lib) ) return False dummyNuclideKeysAddedToLibrary = [] for dummy in dummyNuclides: dummyKey = dummy.nucLabel + lib.xsIDs[0] if dummyKey in lib: continue runLog.debug("Adding {} nuclide data to {}".format(dummyKey, lib)) newDummy = xsNuclides.XSNuclide(lib, dummyKey) newDummy.pmatrxMetadata["hasNeutronHeatingAndDamage"] = False newDummy.pmatrxMetadata["maxScatteringOrder"] = 0 newDummy.pmatrxMetadata["hasGammaHeating"] = False newDummy.pmatrxMetadata["numberNeutronXS"] = 0 newDummy.pmatrxMetadata["collapsingRegionNumber"] = 0 lib[dummyKey] = newDummy dummyNuclideKeysAddedToLibrary.append(dummyKey) return any(dummyNuclideKeysAddedToLibrary) def readBinary(fileName): """Read a binary PMATRX file into an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` object.""" return _read(fileName, "rb") def readAscii(fileName): """Read an ASCII PMATRX file into an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` object.""" return _read(fileName, "r") def _read(fileName, fileMode): lib = xsLibraries.IsotxsLibrary() return _readWrite( lib, fileName, fileMode, lambda containerKey: xsNuclides.XSNuclide(lib, containerKey), ) def writeBinary(lib, fileName): """Write the PMATRX data from an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` object to a binary file. """ return _write(lib, fileName, "wb") def writeAscii(lib, fileName): """Write the PMATRX data from an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` object to an ASCII file. """ return _write(lib, fileName, "w") def _write(lib, fileName, fileMode): return _readWrite(lib, fileName, fileMode, lambda containerKey: lib[containerKey]) def _readWrite(lib, fileName, fileMode, getNuclideFunc): with PmatrxIO(fileName, lib, fileMode, getNuclideFunc) as rw: rw.readWrite() return lib class PmatrxIO(cccc.Stream): def __init__(self, fileName, xsLib, fileMode, getNuclideFunc): cccc.Stream.__init__(self, fileName, fileMode) self._lib = xsLib self._metadata = xsLib.pmatrxMetadata self._metadata.fileNames.append(fileName) self._getNuclide = getNuclideFunc self._dummyNuclideKeysAddedToLibrary = [] def _rwMessage(self): runLog.debug("{} PMATRX data {}".format("Reading" if "r" in self._fileMode else "Writing", self)) def readWrite(self): """Read and write PMATRX files. .. impl:: Tool to read and write PMATRX files. :id: I_ARMI_NUCDATA_PMATRX :implements: R_ARMI_NUCDATA_PMATRX Reading and writing PMATRX files is performed using the general nuclear data I/O functionalities described in :need:`I_ARMI_NUCDATA`. Reading/writing a PMATRX file is performed through the following steps: #. Read/write global information including: * Number of gamma energy groups * Number of neutron energy groups * Maximum scattering order * Maximum number of compositions * Maximum number of materials * Maximum number of regions #. Read/write energy group structure for neutrons and gammas #. Read/write dose conversion factors #. Read/write gamma production matrices for each nuclide, as well as other reaction constants related to neutron-gamma production. """ self._rwMessage() properties.unlockImmutableProperties(self._lib) try: numNucs = self._rwFileID() self._rwGroupStructure() self._rwDoseConversionFactor() self._rwIsotopes(numNucs) except Exception: runLog.error(traceback.format_exc()) raise OSError("Failed to read/write {}".format(self)) finally: properties.lockImmutableProperties(self._lib) def _rwFileID(self): with self.createRecord() as record: for name in [ "numberCollapsingSpatialRegions", "numGammaGroups", "numNeutronGroups", ]: self._metadata[name] = record.rwInt(self._metadata[name]) self._metadata["hasInPlateData"] = record.rwBool(self._metadata["hasInPlateData"]) numNucs = record.rwInt(len(self._lib)) self._metadata["hasDoseConversionFactor"] = record.rwBool(self._metadata["hasDoseConversionFactor"]) for name in [ "maxScatteringOrder", "maxNumberOfCompositions", "maxMaterials", "maxNumberOfRegions", "maxNumberOfCollapsingRegions", "_dummy1", "_dummy2", ]: self._metadata[name] = record.rwInt(self._metadata[name]) return numNucs def _rwGroupStructure(self): with self.createRecord() as record: self._lib.neutronEnergyUpperBounds = record.rwMatrix( self._lib.neutronEnergyUpperBounds, self._metadata["numNeutronGroups"] ) self._metadata["minimumNeutronEnergy"] = record.rwFloat(self._metadata["minimumNeutronEnergy"]) # The lower bound energy is included in this list. We'll drop it to maintain consistency with other # libs by holding only the upper bounds. self._lib.gammaEnergyUpperBounds = record.rwMatrix( self._lib.gammaEnergyUpperBounds, self._metadata["numGammaGroups"] ) self._metadata["minimumGammaEnergy"] = record.rwFloat(self._metadata["minimumGammaEnergy"]) def _rwDoseConversionFactor(self): if self._metadata["hasDoseConversionFactor"]: with self.createRecord() as record: self._lib.neutronDoseConversionFactors = record.rwList( self._lib.neutronDoseConversionFactors, "float", self._metadata["numNeutronGroups"], ) self._lib.gammaDoseConversionFactors = record.rwList( self._lib.gammaDoseConversionFactors, "float", self._metadata["numGammaGroups"], ) def _rwIsotopes(self, numNucs): with self.createRecord() as record: nuclideLabels = record.rwList(self._lib.nuclideLabels, "string", numNucs, 8) record.rwList([1000] * numNucs, "int", numNucs) numNeutronGroups = self._metadata["numNeutronGroups"] numGammaGroups = self._metadata["numGammaGroups"] for nucLabel in nuclideLabels: nuclide = self._getNuclide(nucLabel) nuclide.updateBaseNuclide() nuclideReader = _PmatrxNuclideIO(nuclide, self, numNeutronGroups, numGammaGroups) nuclideReader.rwNuclide() if "r" in self._fileMode: # on add nuclides when reading self._lib[nucLabel] = nuclide def _rwCompositions(self): if self._metadata["hasInPlateData"]: raise NotImplementedError() class _PmatrxNuclideIO: def __init__(self, nuclide, pmatrixIO, numNeutronGroups, numGammaGroups): self._nuclide = nuclide self._metadata = nuclide.pmatrxMetadata self._pmatrixIO = pmatrixIO self._numNeutronGroups = numNeutronGroups self._numGammaGroups = numGammaGroups def rwNuclide(self): self._rwNuclideHeading() self._rwNeutronHeatingAndDamage() self._rwReactionXS() self._rwGammaHeating() self._rwCellAveragedProductionMatrix() def _rwNuclideHeading(self): with self._pmatrixIO.createRecord() as record: self._metadata["hasNeutronHeatingAndDamage"] = record.rwBool(self._metadata["hasNeutronHeatingAndDamage"]) self._metadata["maxScatteringOrder"] = record.rwInt(self._metadata["maxScatteringOrder"]) self._metadata["hasGammaHeating"] = record.rwBool(self._metadata["hasGammaHeating"]) self._metadata["numberNeutronXS"] = record.rwInt(self._metadata["numberNeutronXS"]) self._metadata["collapsingRegionNumber"] = record.rwInt(self._metadata["collapsingRegionNumber"]) def _rwNeutronHeatingAndDamage(self): if not self._metadata["hasNeutronHeatingAndDamage"]: return with self._pmatrixIO.createRecord() as record: self._nuclide.neutronHeating = record.rwMatrix(self._nuclide.neutronHeating, self._numNeutronGroups) self._nuclide.neutronDamage = record.rwMatrix(self._nuclide.neutronDamage, self._numNeutronGroups) def _rwReactionXS(self): numActivationXS = self._metadata["numberNeutronXS"] pmatrixParams = self._metadata activationXS = self._metadata["activationXS"] = self._metadata["activationXS"] or [None] * numActivationXS activationMT = self._metadata["activationMT"] = self._metadata["activationMT"] or [None] * numActivationXS activationMTU = self._metadata["activationMTU"] = self._metadata["activationMTU"] or [None] * numActivationXS for xsNum in range(numActivationXS): with self._pmatrixIO.createRecord() as record: pmatrixParams["activationXS"][xsNum] = record.rwList(activationXS[xsNum], self._numNeutronGroups) pmatrixParams["activationMT"][xsNum] = record.rwInt(activationMT[xsNum]) pmatrixParams["activationMTU"][xsNum] = record.rwInt(activationMTU[xsNum]) def _rwGammaHeating(self): if not self._metadata["hasGammaHeating"]: return with self._pmatrixIO.createRecord() as record: self._nuclide.gammaHeating = record.rwMatrix(self._nuclide.gammaHeating, self._numGammaGroups) def _rwCellAveragedProductionMatrix(self): for lrd in range(1, 1 + self._metadata["maxScatteringOrder"]): with self._pmatrixIO.createRecord() as record: prodMatrix = self._getProductionMatrix(lrd) prodMatrix = record.rwMatrix(prodMatrix, self._numNeutronGroups, self._numGammaGroups) self._setProductionMatrix(lrd, prodMatrix) def _getProductionMatrix(self, order): if order == 1: return self._nuclide.isotropicProduction elif order == 2: return self._nuclide.linearAnisotropicProduction else: return self._nuclide.nOrderProductionMatrix[order] def _setProductionMatrix(self, order, matrix): if order == 1: self._nuclide.isotropicProduction = matrix elif order == 2: self._nuclide.linearAnisotropicProduction = matrix else: self._nuclide.nOrderProductionMatrix[order] = matrix ================================================ FILE: armi/nuclearDataIO/cccc/pwdint.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Read/write a CCCC PWDINT power density definition file. PWDINT files power density at mesh intervals. File format definition is from [CCCC-IV]_. Examples -------- >>> pwr = pwdint.readBinary("PWDINT") >>> pwdint.writeBinary(pwr, "PWDINT2") """ import numpy as np from armi.nuclearDataIO import cccc PWDINT = "PWDINT" # See CCCC-IV documentation for definitions FILE_SPEC_1D_KEYS = ( "TIME", "POWER", "VOL", "NINTI", "NINTJ", "NINTK", "NCY", "NBLOK", ) class PwdintData(cccc.DataContainer): """ Data representation that can be read from or written to a PWDINT file. This contains a mapping from the i,j,k GEODST mesh to power density in Watts/cm^3. """ def __init__(self): cccc.DataContainer.__init__(self) self.powerDensity = np.array([]) class PwdintStream(cccc.StreamWithDataContainer): """ Stream for reading to/writing from with PWDINT data. Parameters ---------- power : PwdintData Data structure fileName: str path to pwdint file fileMode: str string indicating if ``fileName`` is being read or written, and in ascii or binary format """ @staticmethod def _getDataContainer() -> PwdintData: return PwdintData() def readWrite(self): """ Step through the structure of a PWDINT file and read/write it. Logic to control which records will be present is here, which comes directly off the File specification. """ self._rwFileID() self._rw1DRecord() self._rw2DRecord() def _rwFileID(self): with self.createRecord() as record: self._metadata["hname"] = record.rwString(self._metadata["hname"], 8) for name in ["huse", "huse2"]: self._metadata[name] = record.rwString(self._metadata[name], 6) self._metadata["version"] = record.rwInt(self._metadata["version"]) self._metadata["mult"] = record.rwInt(self._metadata["mult"]) def _rw1DRecord(self): """Read/write File specifications on 1D record.""" with self.createRecord() as record: self._metadata.update(record.rwImplicitlyTypedMap(FILE_SPEC_1D_KEYS, self._metadata)) def _rw2DRecord(self): """Read/write power density by mesh point.""" imax = self._metadata["NINTI"] jmax = self._metadata["NINTJ"] kmax = self._metadata["NINTK"] nblck = self._metadata["NBLOK"] if self._data.powerDensity.size == 0: # initialize all-zeros here before reading now that we # have the matrix dimension metadata available. self._data.powerDensity = np.zeros( (imax, jmax, kmax), dtype=np.float32, ) for ki in range(kmax): for bi in range(nblck): jL, jU = cccc.getBlockBandwidth(bi + 1, jmax, nblck) with self.createRecord() as record: self._data.powerDensity[:, jL : jU + 1, ki] = record.rwMatrix( self._data.powerDensity[:, jL : jU + 1, ki], jU - jL + 1, imax, ) readBinary = PwdintStream.readBinary readAscii = PwdintStream.readAscii writeBinary = PwdintStream.writeBinary writeAscii = PwdintStream.writeAscii ================================================ FILE: armi/nuclearDataIO/cccc/rtflux.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Read and write the Regular Total flux from a RTFLUX CCCC interface file. RTFLUX is a CCCC standard data file for storing multigroup total flux on a mesh of any geometry type. It is defined in [CCCC-IV]_. ATFLUX is in the same format but holds adjoint flux rather than regular flux. Examples -------- >>> flux = rtflux.RtfluxStream.readBinary("RTFLUX") >>> rtflux.RtfluxStream.writeBinary(flux, "RTFLUX2") >>> adjointFlux = rtflux.AtfluxStream.readBinary("ATFLUX") See Also -------- NHFLUX Reads/write nodal hex flux moments RZFLUX Reads/writes total fluxes from zones """ import numpy as np from armi.nuclearDataIO import cccc RTFLUX = "RTFLUX" ATFLUX = "ATFLUX" # See CCCC-IV documentation for definitions FILE_SPEC_1D_KEYS = ( "NDIM", "NGROUP", "NINTI", "NINTJ", "NINTK", "ITER", "EFFK", "POWER", "NBLOK", ) class RtfluxData(cccc.DataContainer): """ Multigroup flux as a function of i,j,k and g indices. The metadata also contains the power and k-eff. This is the data structure that is read from or written to a RTFLUX file. """ def __init__(self): cccc.DataContainer.__init__(self) self.groupFluxes: np.ndarray = np.array([]) """Maps i,j,k,g indices to total real or adjoint flux in n/cm^2-s""" class RtfluxStream(cccc.StreamWithDataContainer): """ Stream for reading/writing a RTFLUX or ATFLUX file. Parameters ---------- flux : RtfluxData Data structure fileName: str path to RTFLUX file fileMode: str string indicating if ``fileName`` is being read or written, and in ascii or binary format """ @staticmethod def _getDataContainer() -> RtfluxData: return RtfluxData() def readWrite(self): """Step through the structure of the file and read/write it.""" self._rwFileID() self._rw1DRecord() if self._metadata["NDIM"] == 1: self._rw2DRecord() elif self._metadata["NDIM"] >= 2: self._rw3DRecord() else: raise ValueError(f"Invalid NDIM value {self._metadata['NDIM']} in {self}.") def _rwFileID(self): """ Read/write file id record. Notes ----- The username, version, etc are embedded in this string but it's usually blank. """ with self.createRecord() as record: self._metadata["label"] = record.rwString(self._metadata["label"], 28) def _rw1DRecord(self): """Read/write File specifications on 1D record.""" with self.createRecord() as record: self._metadata.update(record.rwImplicitlyTypedMap(FILE_SPEC_1D_KEYS, self._metadata)) def _rw2DRecord(self): """Read/write 1-dimensional regular total flux.""" raise NotImplementedError("1-D RTFLUX files are not yet implemented.") def _rw3DRecord(self): """ Read/write multi-dimensional regular total flux. The records contain blocks of values in the i-j planes. """ ng = self._metadata["NGROUP"] imax = self._metadata["NINTI"] jmax = self._metadata["NINTJ"] kmax = self._metadata["NINTK"] nblck = self._metadata["NBLOK"] if self._data.groupFluxes.size == 0: self._data.groupFluxes = np.zeros((imax, jmax, kmax, ng)) for gi in range(ng): gEff = self.getEnergyGroupIndex(gi) for k in range(kmax): # data in i-j plane may be blocked for bi in range(nblck): # compute blocking parameters jLow, jUp = cccc.getBlockBandwidth(bi + 1, jmax, nblck) numZonesInBlock = jUp - jLow + 1 with self.createRecord() as record: # pass in shape in fortran (read) order self._data.groupFluxes[:, jLow : jUp + 1, k, gEff] = record.rwDoubleMatrix( self._data.groupFluxes[:, jLow : jUp + 1, k, gEff], numZonesInBlock, imax, ) def getEnergyGroupIndex(self, g): r""" Real fluxes stored in RTFLUX have "normal" (or "forward") energy groups. Also see the subclass method ATFLUX.getEnergyGroupIndex(). 0 based, so if NG=33 and you want the third group, this return 2. """ return g class AtfluxStream(RtfluxStream): r""" This is a subclass for the ATFLUX file, which is identical in format to the RTFLUX file except that it contains the adjoint flux and has reversed energy group ordering. """ def getEnergyGroupIndex(self, g): r""" Adjoint fluxes stored in ATFLUX have "reversed" (or "backward") energy groups. 0 based, so if NG=33 and you want the third group (g=2), this returns 30. """ ng = self._metadata["NGROUP"] return ng - g - 1 def getFDFluxReader(adjointFlag): r""" Returns the appropriate DIF3D FD flux binary file reader class, either RTFLUX (real) or ATFLUX (adjoint). """ if adjointFlag: return AtfluxStream else: return RtfluxStream ================================================ FILE: armi/nuclearDataIO/cccc/rzflux.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Module for reading/writing RZFLUX CCCC interface files. RZFLUX contains Regular Zone Flux, or multigroup flux by neutron energy group in each zone. It also can hold some convergence and neutron balance information. The format is defined in [CCCC-IV]_. Examples -------- >>> flux = rzflux.readBinary("RZFLUX") >>> flux.groupFluxes[2, 0] *= 1.1 >>> rzflux.writeBinary(flux, "RZFLUX2") >>> rzflux.writeAscii(flux, "RZFLUX2.ascii") """ from enum import Enum import numpy as np from armi.nuclearDataIO import cccc RZFLUX = "RZFLUX" # See CCCC-IV documentation for definitions FILE_SPEC_1D_KEYS = ( "TIME", "POWER", "VOL", "EFFK", "EIVS", "DKDS", "TNL", "TNA", "TNSL", "TNBL", "TNBAL", "TNCRA", "X1", "X2", "X3", "NBLOK", "ITPS", "NZONE", "NGROUP", "NCY", ) class Convergence(Enum): """Convergence behavior flags for ITPS from RZFLUX file.""" NO_ITERATIONS = 0 CONVERGED = 1 CONVERGING = 2 DIVERGING = 3 class RzfluxData(cccc.DataContainer): """ Data representation that can be read from or written to a RZFLUX file. Notes ----- Analogous to a IsotxsLibrary for ISTOXS files. """ def __init__(self): cccc.DataContainer.__init__(self) # 2D data self.groupFluxes = None class RzfluxStream(cccc.StreamWithDataContainer): """ Stream for reading to/writing from with RZFLUX data. Parameters ---------- flux : RzfluxData Data structure fileName: str path to RZFLUX file fileMode: str string indicating if ``fileName`` is being read or written, and in ascii or binary format """ @staticmethod def _getDataContainer() -> RzfluxData: return RzfluxData() def readWrite(self): """Step through the structure of the file and read/write it.""" self._rwFileID() self._rw1DRecord() self._rw2DRecord() def _rwFileID(self): """ Read/write file id record. Notes ----- The username, version, etc are embedded in this string but it's usually blank. The number 28 was actually obtained from a hex editor and may be code specific. """ with self.createRecord() as record: self._metadata["label"] = record.rwString(self._metadata["label"], 28) def _rw1DRecord(self): """Read/write File specifications on 1D record.""" with self.createRecord() as record: vals = record.rwImplicitlyTypedMap(FILE_SPEC_1D_KEYS, self._metadata) self._metadata.update(vals) def _rw2DRecord(self): """ Read/write the multigroup fluxes (n/cm^2-s) into a NxG matrix. Notes ----- Zones are blocked into multiple records so we have to block or unblock them. rwMatrix reverses the indices into FORTRAN data order so be very careful with the indices. """ nz = self._metadata["NZONE"] ng = self._metadata["NGROUP"] nb = self._metadata["NBLOK"] if self._data.groupFluxes is None: # initialize all-zeros here before reading now that we # have the matrix dimension metadata available. self._data.groupFluxes = np.zeros( (ng, nz), dtype=np.float32, ) for bi in range(nb): jLow, jUp = cccc.getBlockBandwidth(bi + 1, nz, nb) numZonesInBlock = jUp - jLow + 1 with self.createRecord() as record: # pass in shape in fortran (read) order self._data.groupFluxes[:, jLow : jUp + 1] = record.rwMatrix( self._data.groupFluxes[:, jLow : jUp + 1], numZonesInBlock, ng, ) readBinary = RzfluxStream.readBinary readAscii = RzfluxStream.readAscii writeBinary = RzfluxStream.writeBinary writeAscii = RzfluxStream.writeAscii ================================================ FILE: armi/nuclearDataIO/cccc/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/nuclearDataIO/cccc/tests/test_cccc.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test CCCC.""" import io import unittest from armi.nuclearDataIO import cccc class CcccIOStreamTests(unittest.TestCase): def test_initWithFileMode(self): self.assertIsInstance(cccc.Stream("some-file", "rb"), cccc.Stream) self.assertIsInstance(cccc.Stream("some-file", "wb"), cccc.Stream) self.assertIsInstance(cccc.Stream("some-file", "r"), cccc.Stream) self.assertIsInstance(cccc.Stream("some-file", "w"), cccc.Stream) with self.assertRaises(KeyError): cccc.Stream("some-file", "bacon") class CcccBinaryRecordTests(unittest.TestCase): @classmethod def setUpClass(cls): cls.writerClass = cccc.BinaryRecordWriter cls.readerClass = cccc.BinaryRecordReader def setUp(self): self.streamCls = io.BytesIO def test_writeAndReadSimpleIntegerRecord(self): value = 42 stream = self.streamCls() with self.writerClass(stream) as writer: writer.rwInt(value) with self.readerClass(self.streamCls(stream.getvalue())) as reader: self.assertEqual(writer.numBytes, reader.numBytes) self.assertEqual(value, reader.rwInt(None)) self.assertEqual(4, writer.numBytes) def test_writeAndReadSimpleFloatRecord(self): stream = self.streamCls() value = -33.322222 with self.writerClass(stream) as writer: writer.rwFloat(value) with self.readerClass(self.streamCls(stream.getvalue())) as reader: self.assertEqual(writer.numBytes, reader.numBytes) self.assertAlmostEqual(value, reader.rwFloat(None), 5) self.assertEqual(4, writer.numBytes) def test_writeAndReadSimpleStringRecord(self): stream = self.streamCls() value = "Howdy, partner!" size = 8 * 8 with self.writerClass(stream) as writer: writer.rwString(value, size) with self.readerClass(self.streamCls(stream.getvalue())) as reader: self.assertEqual(writer.numBytes, reader.numBytes) self.assertEqual(value, reader.rwString(None, size)) self.assertEqual(size, writer.numBytes) def test_readPartialRecord(self): """Not reading an entire record raises an exception.""" # I'm going to create a record with two pieces of data, and only read one... stream = self.streamCls() value = 99 with self.writerClass(stream) as writer: writer.rwInt(value) writer.rwInt(value) self.assertEqual(8, writer.numBytes) with self.assertRaises(BufferError): with self.readerClass(self.streamCls(stream.getvalue())) as reader: self.assertEqual(value, reader.rwInt(None)) def test_readingBeyondRecordRaisesException(self): # I'm going to create a record with two pieces of data, and only read one... stream = self.streamCls() value = 77 with self.writerClass(stream) as writer: writer.rwInt(value) self.assertEqual(4, writer.numBytes) with self.assertRaises(BufferError): with self.readerClass(self.streamCls(stream.getvalue())) as reader: self.assertEqual(value, reader.rwInt(None)) self.assertEqual(4, reader.rwInt(None)) class CcccAsciiRecordTests(CcccBinaryRecordTests): """Runs the same tests as CcccBinaryRecordTests, but using ASCII readers and writers.""" @classmethod def setUpClass(cls): cls.writerClass = cccc.AsciiRecordWriter cls.readerClass = cccc.AsciiRecordReader def setUp(self): self.streamCls = io.StringIO ================================================ FILE: armi/nuclearDataIO/cccc/tests/test_compxs.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test the COMPXS reader/writer with a simple problem.""" import os import unittest import numpy as np from scipy.sparse import csc_matrix from armi import nuclearDataIO from armi.nuclearDataIO.cccc import compxs from armi.nuclearDataIO.xsLibraries import CompxsLibrary from armi.tests import COMPXS_PATH from armi.utils.directoryChangers import TemporaryDirectoryChanger class TestCompxs(unittest.TestCase): """Test the compxs reader/writer.""" @property def binaryWritePath(self): return os.path.join(self._testMethodName + "compxs-b") @property def asciiWritePath(self): return os.path.join(self._testMethodName + "compxs-a.txt") @classmethod def setUpClass(cls): try: cls.lib = compxs.readAscii(COMPXS_PATH) except Exception as ee: raise Exception("Failed to load COMPXS ascii.\n{}".format(ee)) cls.fissileRegion = cls.lib.regions[1] cls.numGroups = cls.lib.compxsMetadata["numGroups"] def test_libraryData(self): """Test library data including energy group information and number of compositions.""" self.assertEqual(11, self.numGroups) self.assertEqual(14190675.0, max(self.lib.neutronEnergyUpperBounds)) self.assertAlmostEqual(0.41745778918, min(self.lib.neutronEnergyUpperBounds)) def test_regionPrimaryXS(self): """Test the primary cross sections for the second region - fissile.""" expectedMacros = { "absorption": [ 0.00810444, 0.0049346, 0.00329084, 0.00500318, 0.00919719, 0.01548523, 0.02816499, 0.04592259, 0.09402685, 0.12743879, 0.20865865, ], "fission": [ 0.00720288, 0.00398085, 0.00181345, 0.00236554, 0.00341723, 0.00564286, 0.0110835, 0.0211668, 0.04609869, 0.09673319, 0.16192732, ], "total": [ 0.18858715, 0.18624092, 0.22960965, 0.27634201, 0.33255093, 0.61437815, 0.42582573, 0.48091191, 0.4931102, 0.49976887, 0.58214497, ], "removal": [ 0.07268185, 0.03577923, 0.01127517, 0.01003666, 0.01254067, 0.02686466, 0.02881869, 0.04606618, 0.09605395, 0.13462841, 0.20865865, ], "transport": [ 0.10812569, 0.13096095, 0.18227532, 0.24610402, 0.29647433, 0.55842311, 0.40818328, 0.45512788, 0.45669781, 0.49153138, 0.55067248, ], "nuSigF": [ 0.02247946, 0.01047702, 0.00449566, 0.00576889, 0.00829842, 0.01373361, 0.02697533, 0.05151573, 0.11224934, 0.23570964, 0.39456832, ], "chi": [ [1.38001099e-01], [6.28044390e-01], [2.04412257e-01], [2.63437497e-02], [2.85959793e-03], [3.03098935e-04], [3.19825784e-05], [3.42715844e-06], [3.00034836e-07], [3.87667231e-08], [2.66151779e-13], ], } for xsName, expectedXS in expectedMacros.items(): actualXS = self.fissileRegion.macros[xsName] self.assertTrue(np.allclose(actualXS, expectedXS)) def test_totalScatterMatrix(self): """ Test the total scattering matrix by comparing the sparse components. Sparse matrices can be constructed from three vectors: data, indices, and indptr. For column matrix, the row indices for column ``j`` are stored in ``indices[indptr[j]:indptr[j + 1]]`` and the corresponding data is stored in ``data[indptr[j]:indptr[j + 1]]``. See Also -------- scipy.sparse.csc_matrix """ expectedSparseData = np.array( [ 1.15905297e-01, 1.50461698e-01, 4.19181830e-02, 2.18334481e-01, 2.66726391e-02, 2.06841438e-02, 2.66305350e-01, 7.93398724e-03, 3.74972053e-03, 2.82068371e-03, 3.20010257e-01, 4.98916288e-03, 4.64327778e-05, 3.62943322e-04, 2.33116653e-04, 5.87513494e-01, 3.33728477e-03, 4.05355062e-05, 3.40557886e-06, 5.05978110e-05, 2.44368007e-05, 3.97007043e-01, 1.13794357e-02, 5.81324838e-06, 3.57958695e-06, 4.21100811e-07, 6.02755319e-06, 3.70765519e-06, 4.34845744e-01, 6.53692627e-04, 3.65838392e-07, 1.91840932e-07, 6.47891881e-08, 4.70903065e-07, 7.53010883e-07, 3.97056267e-01, 1.43584939e-04, 1.69959524e-08, 7.63482393e-09, 1.07996799e-08, 7.79766262e-08, 1.42976480e-07, 3.65140459e-01, 2.02709238e-03, 1.62021799e-09, 1.25812112e-09, 3.39504415e-09, 2.13443401e-06, 7.75326455e-06, 3.73486301e-01, 7.18962870e-03, 4.72605255e-15, 5.11975260e-13, 1.25417930e-08, 4.57563838e-08, ] ) expectedSparseIndices = [ 0, 1, 0, 2, 1, 0, 3, 2, 1, 0, 4, 3, 2, 1, 0, 5, 4, 3, 2, 1, 0, 6, 5, 4, 3, 2, 1, 0, 7, 6, 4, 3, 2, 1, 0, 8, 7, 4, 3, 2, 1, 0, 9, 8, 4, 3, 2, 1, 0, 10, 9, 4, 2, 1, 0, ] expectedSparseIndptr = [0, 1, 3, 6, 10, 15, 21, 28, 35, 42, 49, 55] actualTotalScatter = self.fissileRegion.macros.totalScatter.toarray() expectedTotalScatter = csc_matrix( (expectedSparseData, expectedSparseIndices, expectedSparseIndptr), actualTotalScatter.shape, ).toarray() self.assertTrue(np.allclose(actualTotalScatter, expectedTotalScatter)) def test_binaryRW(self): """Test to make sure the binary read/writer reads/writes the exact same library.""" with TemporaryDirectoryChanger(): compxs.writeBinary(self.lib, self.binaryWritePath) self.assertTrue(compxs.compare(self.lib, compxs.readBinary(self.binaryWritePath))) def test_asciiRW(self): """Test to make sure the ascii reader/writer reads/writes the exact same library.""" with TemporaryDirectoryChanger(): compxs.writeAscii(self.lib, self.asciiWritePath) self.assertTrue(compxs.compare(self.lib, compxs.readAscii(self.asciiWritePath))) def test_mergeCompxsLibraries(self): """Test to verify the compxs merging returns a library with new regions.""" someLib = CompxsLibrary() someLib.merge(self.lib) self.assertEqual(len(self.lib.regions), len(someLib.regions)) self.assertTrue(self.lib.compxsMetadata.compare(someLib.compxsMetadata, self.lib, someLib)) def test_getCOMPXSFileName(self): self.assertEqual(nuclearDataIO.getExpectedCOMPXSFileName(cycle=0), "COMPXS-c0") self.assertEqual(nuclearDataIO.getExpectedCOMPXSFileName(cycle=1), "COMPXS-c1") self.assertEqual(nuclearDataIO.getExpectedCOMPXSFileName(cycle=23), "COMPXS-c23") self.assertEqual(nuclearDataIO.getExpectedCOMPXSFileName(), "COMPXS") ================================================ FILE: armi/nuclearDataIO/cccc/tests/test_dif3d.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test reading/writing of DIF3D binary input.""" import os import unittest from armi.nuclearDataIO.cccc import dif3d from armi.utils.directoryChangers import TemporaryDirectoryChanger THIS_DIR = os.path.dirname(__file__) SIMPLE_HEXZ_INP = os.path.join(THIS_DIR, "../../tests", "simple_hexz.inp") SIMPLE_HEXZ_DIF3D = os.path.join(THIS_DIR, "fixtures", "simple_hexz.dif3d") class TestDif3dSimpleHexz(unittest.TestCase): @classmethod def setUpClass(cls): """ Load DIF3D data from binary file. This binary file was generated by running dif3d.exe v11.0r3284 on the SIMPLE_HEXZ_INP file above (and renaming the DIF3D binary file to simple_hexz.dif3d). """ cls.df = dif3d.Dif3dStream.readBinary(SIMPLE_HEXZ_DIF3D) def test__rwFileID(self): """Verify the file identification info. .. test:: Test reading DIF3D files. :id: T_ARMI_NUCDATA_DIF3D0 :tests: R_ARMI_NUCDATA_DIF3D """ self.assertEqual(self.df.metadata["HNAME"], "DIF3D") self.assertEqual(self.df.metadata["HUSE1"], "") self.assertEqual(self.df.metadata["HUSE2"], "") self.assertEqual(self.df.metadata["VERSION"], 1) def test__rwFile1DRecord(self): """Verify the rest of the metadata. .. test:: Test reading DIF3D files. :id: T_ARMI_NUCDATA_DIF3D1 :tests: R_ARMI_NUCDATA_DIF3D """ TITLE_A6 = ["3D Hex", "-Z to", "genera", "te NHF", "LUX fi", "le"] EXPECTED_TITLE = TITLE_A6 + [""] * 5 for i in range(dif3d.TITLE_RANGE): self.assertEqual(self.df.metadata[f"TITLE{i}"], EXPECTED_TITLE[i]) self.assertEqual(self.df.metadata["MAXSIZ"], 10000) self.assertEqual(self.df.metadata["MAXBLK"], 1800000) self.assertEqual(self.df.metadata["IPRINT"], 0) def test__rw2DRecord(self): """Verify the control parameters.""" EXPECTED_2D = [ 0, 0, 0, 10000, 30, 0, 1000000000, 5, 0, 0, 50, 0, 1, 1, 0, 0, 0, 110, 10, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 10, 40, 32, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ] for i, param in enumerate(dif3d.FILE_SPEC_2D_PARAMS): self.assertEqual(self.df.twoD[param], EXPECTED_2D[i]) def test__rw3DRecord(self): """Verify the convergence criteria and other floating point data.""" EXPECTED_3D = [ 1e-7, 1e-5, 1e-5, 3.823807613470224e-01, 1e-3, 4e-2, 1e0, 0e0, 0e0, 9.999999747378752e-05, ] + [0.0 for i in range(1, 21)] for i, param in enumerate(dif3d.FILE_SPEC_3D_PARAMS): self.assertEqual(self.df.threeD[param], EXPECTED_3D[i]) def test__rw4DRecord(self): """Verify the optimum overrelaxation factors.""" self.assertEqual(self.df.fourD, None) def test__rw5DRecord(self): """Verify the axial coarse-mesh rebalance boundaries.""" self.assertEqual(self.df.fiveD, None) def test_writeBinary(self): """Verify binary equivalence of written DIF3D file. .. test:: Test writing DIF3D files. :id: T_ARMI_NUCDATA_DIF3D2 :tests: R_ARMI_NUCDATA_DIF3D """ with TemporaryDirectoryChanger(): dif3d.Dif3dStream.writeBinary(self.df, "DIF3D2") with open(SIMPLE_HEXZ_DIF3D, "rb") as f1, open("DIF3D2", "rb") as f2: expectedData = f1.read() actualData = f2.read() for expected, actual in zip(expectedData, actualData): self.assertEqual(expected, actual) class TestDif3dEmptyRecords(unittest.TestCase): def test_empty4and5Records(self): """Since the inputs results in these being None, get test coverage another way.""" df = dif3d.Dif3dStream.readBinary(SIMPLE_HEXZ_DIF3D) # Hack some values that allow 4 and 5 records to be populated \ # and then populate them df.twoD["NUMORP"] = 1 df.twoD["NCMRZS"] = 1 df.fourD = {"OMEGA1": 1.0} df.fiveD = {"ZCMRC1": 1.0, "NZINTS1": 10} with TemporaryDirectoryChanger(): # Write then read a new one dif3d.Dif3dStream.writeBinary(df, "DIF3D2") df2 = dif3d.Dif3dStream.readBinary("DIF3D2") # Kinda a null test, but this coverage caught some code mistakes! self.assertEqual(df2.fourD, df.fourD) self.assertEqual(df2.fiveD, df.fiveD) ================================================ FILE: armi/nuclearDataIO/cccc/tests/test_fixsrc.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test the reading and writing of the DIF3D FIXSRC file format.""" import os import unittest import numpy as np from armi.nuclearDataIO.cccc import fixsrc from armi.utils.directoryChangers import TemporaryDirectoryChanger # ruff: noqa: E501 FIXSRC_ASCII = """0 0 0 0 0 0 0.4008E+10 0.4210E+10 0.4822E+10 0.5154E+10 0.4926E+10 0.4621E+10 0.4246E+10 0.3757E+10 0.3311E+10 0.3479E+10 0.357E+10 0.324E+10 0.2942E+10 0.2903E+10 0.2925E+10 0.2763E+10 0.2414E+10 0.2036E+10 0.1656E+10 0.1477E+10 0.1455E+10 0.1434E+10 0.1297E+10 0.1153E+10 0.101E+10 0.8841E+9 0.7923E+9 0.7266E+9 0.6575E+9 0.589E+9 0.5027E+9 0.4146E+9 0.3474E+9 0.3015E+9 0.2403E+9 0.2356E+9 0.1634E+9 0.1521E+9 0.1258E+9 0.9032E+8 0.6156E+8 0.3983E+8 0.3134E+8 0.303E+8 0.2983E+8 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0""" FIXSRC_ARRAY = np.array(FIXSRC_ASCII.split(), dtype=np.float32).reshape((3, 3, 2, 4)) class TestFixsrc(unittest.TestCase): def test_writeReadBinaryLoop(self): with TemporaryDirectoryChanger() as newDir: fileName = "fixsrc_writeBinary.bin" binaryFilePath = os.path.join(newDir.destination, fileName) fixsrc.writeBinary(binaryFilePath, FIXSRC_ARRAY) self.assertIn(fileName, os.listdir(newDir.destination)) self.assertGreater(os.path.getsize(binaryFilePath), 0) ================================================ FILE: armi/nuclearDataIO/cccc/tests/test_gamiso.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test GAMISO reading and writing.""" import os import unittest from copy import deepcopy from armi.nuclearDataIO import xsLibraries from armi.nuclearDataIO.cccc import gamiso, isotxs from armi.nuclearDataIO.xsNuclides import XSNuclide from armi.utils.directoryChangers import TemporaryDirectoryChanger THIS_DIR = os.path.dirname(__file__) FIXTURE_DIR = os.path.join(THIS_DIR, "..", "..", "tests", "fixtures") GAMISO_AA = os.path.join(FIXTURE_DIR, "AA.gamiso") class TestGamiso(unittest.TestCase): def setUp(self): self.xsLib = xsLibraries.IsotxsLibrary() def test_compare(self): """Compare the input binary GAMISO file. .. test:: Test reading GAMISO files. :id: T_ARMI_NUCDATA_GAMISO0 :tests: R_ARMI_NUCDATA_GAMISO """ gamisoAA = gamiso.readBinary(GAMISO_AA) self.xsLib.merge(deepcopy(gamisoAA)) self.assertTrue(gamiso.compare(self.xsLib, gamisoAA)) def test_writeBinary(self): """Write a binary GAMISO file. .. test:: Test writing GAMISO files. :id: T_ARMI_NUCDATA_GAMISO1 :tests: R_ARMI_NUCDATA_GAMISO """ with TemporaryDirectoryChanger(): data = gamiso.readBinary(GAMISO_AA) binData = gamiso.writeBinary(data, "gamiso.out") self.assertTrue(gamiso.compare(data, binData)) def test_addDummyNuclidesToLibrary(self): dummyNuclides = [XSNuclide(None, "U238AA")] before = self.xsLib.getNuclides("") self.assertEqual(len(self.xsLib.xsIDs), 0) self.assertTrue(gamiso.addDummyNuclidesToLibrary(self.xsLib, dummyNuclides)) self.assertEqual(len(self.xsLib.xsIDs), 1) self.assertEqual(list(self.xsLib.xsIDs)[0], "38") after = self.xsLib.getNuclides("") self.assertGreater(len(after), len(before)) diff = set(after).difference(set(before)) self.assertEqual(len(diff), 1) self.assertEqual(list(diff)[0].xsId, "38") def test_addDummyNuclidesToLibraryNumGroups(self): isoLib = isotxs.readBinary(os.path.join(FIXTURE_DIR, "ISOAA")) gamLib = gamiso.readBinary(GAMISO_AA) gamLib.gamisoMetadata["numGroups"] = 50 dummyNuc = XSNuclide(isoLib, "DMP1AA") dummyNuc.isotxsMetadata = isoLib.getNuclides("AA")[0].isotxsMetadata gamiso.addDummyNuclidesToLibrary(gamLib, [dummyNuc]) self.assertEqual(gamLib["DMP1AA"].nucLabel, "DMP1") self.assertEqual(gamLib["DMP1AA"].gamisoMetadata["jband"][(49, 3)], 1) ================================================ FILE: armi/nuclearDataIO/cccc/tests/test_geodst.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test GEODST reading and writing.""" import os import unittest from numpy.testing import assert_equal from armi.nuclearDataIO.cccc import geodst from armi.utils.directoryChangers import TemporaryDirectoryChanger THIS_DIR = os.path.dirname(__file__) SIMPLE_GEODST = os.path.join(THIS_DIR, "fixtures", "simple_hexz.geodst") class TestGeodst(unittest.TestCase): """ Tests the GEODST class. This reads from a GEODST file that was created using DIF3D 11 on a small test hex reactor in 1/3 geometry. """ def test_readGeodst(self): """Ensure we can read a GEODST file. .. test:: Test reading GEODST files. :id: T_ARMI_NUCDATA_GEODST0 :tests: R_ARMI_NUCDATA_GEODST """ geo = geodst.readBinary(SIMPLE_GEODST) self.assertEqual(geo.metadata["IGOM"], 18) self.assertAlmostEqual(geo.xmesh[1], 16.79, places=5) # hex pitch self.assertAlmostEqual(geo.zmesh[-1], 448.0, places=5) # top of reactor in cm self.assertEqual(geo.coarseMeshRegions.shape, (10, 10, len(geo.zmesh) - 1)) self.assertEqual(geo.coarseMeshRegions.min(), 0) self.assertEqual(geo.coarseMeshRegions.max(), geo.metadata["NREG"]) def test_writeGeodst(self): """Ensure that we can write a modified GEODST. .. test:: Test writing GEODST files. :id: T_ARMI_NUCDATA_GEODST1 :tests: R_ARMI_NUCDATA_GEODST """ with TemporaryDirectoryChanger(): geo = geodst.readBinary(SIMPLE_GEODST) geo.zmesh[-1] *= 2 geodst.writeBinary(geo, "GEODST2") geo2 = geodst.readBinary("GEODST2") self.assertAlmostEqual(geo2.zmesh[-1], 448.0 * 2, places=5) assert_equal(geo.kintervals, geo2.kintervals) ================================================ FILE: armi/nuclearDataIO/cccc/tests/test_isotxs.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the workings of the library wrappers.""" import unittest from armi import nuclearDataIO from armi.nucDirectory.nuclideBases import NuclideBases from armi.nuclearDataIO import xsLibraries from armi.nuclearDataIO.cccc import isotxs from armi.tests import ISOAA_PATH from armi.utils.directoryChangers import TemporaryDirectoryChanger class TestIsotxs(unittest.TestCase): """Tests the ISOTXS class.""" @classmethod def setUpClass(cls): # load a library that is in the ARMI tree. This should # be a small library with LFPs, Actinides, structure, and coolant cls.lib = isotxs.readBinary(ISOAA_PATH) def test_writeBinary(self): """Test reading in an ISOTXS file, and then writing it back out again. Now, the library here can't guarantee the output will be the same as the input. But we can guarantee the written file is still valid, by reading it again. .. test:: Write ISOTSX binary files. :id: T_ARMI_NUCDATA_ISOTXS0 :tests: R_ARMI_NUCDATA_ISOTXS """ with TemporaryDirectoryChanger(): origLib = isotxs.readBinary(ISOAA_PATH) fname = self._testMethodName + "temp-aa.isotxs" isotxs.writeBinary(origLib, fname) lib = isotxs.readBinary(fname) # validate the written file is still valid nucs = lib.nuclides self.assertTrue(nucs) self.assertIn("AA", lib.xsIDs) nuc = lib["U235AA"] self.assertIsNotNone(nuc) with self.assertRaises(KeyError): lib.getNuclide("nonexistent", "zz") def test_isotxsGeneralData(self): nucs = self.lib.nuclides self.assertTrue(nucs) self.assertIn("AA", self.lib.xsIDs) nuc = self.lib["U235AA"] self.assertIsNotNone(nuc) with self.assertRaises(KeyError): self.lib.getNuclide("nonexistent", "zz") def test_isotxsDetailedData(self): self.assertEqual(50, len(self.lib.nuclides)) groups = self.lib.neutronEnergyUpperBounds self.assertEqual(33, len(groups)) self.assertEqual(14072911.0, max(groups)) self.assertEqual(0.4139941930770874, min(groups)) # file-wide chi self.assertEqual(33, len(self.lib.isotxsMetadata["chi"])) self.assertEqual(1.0000016745038094, sum(self.lib.isotxsMetadata["chi"])) def test_getScatteringWeights(self): self.assertEqual(1650, len(self.lib.getScatterWeights())) refVector = [ 0.0, 0.9924760291647134, 0.007523970835286507, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] for v1, v2 in zip(refVector, self.lib.getScatterWeights()["U235AA", 1].todense().T.tolist()[0]): self.assertAlmostEqual(v1, v2) def test_getNuclide(self): nuclideBases = NuclideBases() self.assertEqual(nuclideBases.byName["U235"], self.lib.getNuclide("U235", "AA")._base) self.assertEqual(nuclideBases.byName["PU239"], self.lib.getNuclide("PU239", "AA")._base) def test_n2nIsReactionBased(self): """ ARMI assumes ISOTXS n2n reactions are all reaction-based. Test this. The alternative is production based. Previous studies show that MC**2-2 is reaction based. """ nuc = self.lib.getNuclide("U235", "AA") fromMatrix = nuc.micros.n2nScatter.sum(axis=0).getA1() # convert to ndarray for base, matrix in zip(fromMatrix, nuc.micros.n2n): self.assertAlmostEqual(base, matrix) def test_getScatterWeights(self): scatWeights = self.lib.getScatterWeights() vals = scatWeights["U235AA", 4] self.assertAlmostEqual(sum(vals), 1.0) def test_getISOTXSFileName(self): self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(cycle=0), "ISOTXS-c0") self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(cycle=1), "ISOTXS-c1") self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(cycle=0, node=1), "ISOTXS-c0n1") self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(cycle=23), "ISOTXS-c23") self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(xsID="AA"), "ISOAA") self.assertEqual( nuclearDataIO.getExpectedISOTXSFileName(xsID="AA", suffix="test"), "ISOAA-test", ) self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(), "ISOTXS") with self.assertRaises(ValueError): # Error when over specified nuclearDataIO.getExpectedISOTXSFileName(cycle=10, xsID="AA") def test_getGAMISOFileName(self): self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(cycle=0), "cycle0.gamiso") self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(cycle=1), "cycle1.gamiso") self.assertEqual( nuclearDataIO.getExpectedGAMISOFileName(cycle=1, node=3), "cycle1node3.gamiso", ) self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(cycle=23), "cycle23.gamiso") self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(xsID="AA"), "AA.gamiso") self.assertEqual( nuclearDataIO.getExpectedGAMISOFileName(xsID="AA", suffix="test"), "AA-test.gamiso", ) self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(), "GAMISO") with self.assertRaises(ValueError): # Error when over specified nuclearDataIO.getExpectedGAMISOFileName(cycle=10, xsID="AA") class Isotxs_merge_Tests(unittest.TestCase): def test_mergeMccV2FilesRemovesTheFileWideChi(self): """Test merging ISOTXS files. .. test:: Read ISOTXS files. :id: T_ARMI_NUCDATA_ISOTXS1 :tests: R_ARMI_NUCDATA_ISOTXS """ isoaa = isotxs.readBinary(ISOAA_PATH) self.assertAlmostEqual(1.0, sum(isoaa.isotxsMetadata["chi"]), 5) self.assertAlmostEqual(1, isoaa.isotxsMetadata["fileWideChiFlag"]) someIsotxs = xsLibraries.IsotxsLibrary() # semi-copy... someIsotxs.merge(isoaa) self.assertAlmostEqual(1.0, sum(someIsotxs.isotxsMetadata["chi"]), 5) self.assertEqual(1, someIsotxs.isotxsMetadata["fileWideChiFlag"]) # OK, now I need to delete all the nuclides, so we can merge again. for key in someIsotxs.nuclideLabels: del someIsotxs[key] someIsotxs.merge(isotxs.readBinary(ISOAA_PATH)) self.assertEqual(None, someIsotxs.isotxsMetadata["chi"]) ================================================ FILE: armi/nuclearDataIO/cccc/tests/test_labels.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test the reading and writing of the DIF3D/VARIANT LABELS interface file.""" import os import unittest from armi.nuclearDataIO.cccc import labels from armi.utils.directoryChangers import TemporaryDirectoryChanger THIS_DIR = os.path.dirname(__file__) LABELS_FILE_BIN = os.path.join(THIS_DIR, "fixtures", "labels.binary") LABELS_FILE_ASCII = os.path.join(THIS_DIR, "fixtures", "labels.ascii") class TestLabels(unittest.TestCase): """Tests for labels.""" def test_readLabelsBinary(self): expectedName = "LABELS" expectedTrianglesPerHex = 6 expectedNumZones = 5800 expectedNumRegions = 2900 expectedNumHexagonalRings = 13 labelsData = labels.readBinary(LABELS_FILE_BIN) self.assertEqual(labelsData.metadata["hname"], expectedName) self.assertEqual(labelsData.metadata["numTrianglesPerHex"], expectedTrianglesPerHex) self.assertEqual(labelsData.metadata["numZones"], expectedNumZones) self.assertEqual(labelsData.metadata["numRegions"], expectedNumRegions) self.assertEqual(labelsData.metadata["numHexagonalRings"], expectedNumHexagonalRings) self.assertEqual(len(labelsData.regionLabels), expectedNumRegions) def test_writeLabelsAscii(self): with TemporaryDirectoryChanger(): labelsData = labels.readBinary(LABELS_FILE_BIN) labels.writeAscii(labelsData, self._testMethodName + "labels.ascii") with open(self._testMethodName + "labels.ascii", "r") as f: actualData = f.read().splitlines() with open(LABELS_FILE_ASCII) as f: expectedData = f.read().splitlines() for expected, actual in zip(expectedData, actualData): self.assertEqual(expected, actual) ================================================ FILE: armi/nuclearDataIO/cccc/tests/test_nhflux.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test reading/writing of NHFLUX dataset.""" import os import unittest import numpy as np from armi.nuclearDataIO.cccc import nhflux from armi.utils.directoryChangers import TemporaryDirectoryChanger THIS_DIR = os.path.dirname(__file__) SIMPLE_HEXZ_INP = os.path.join(THIS_DIR, "../../tests", "simple_hexz.inp") SIMPLE_HEXZ_NHFLUX = os.path.join(THIS_DIR, "fixtures", "simple_hexz.nhflux") SIMPLE_HEXZ_NHFLUX_VARIANT = os.path.join(THIS_DIR, "fixtures", "simple_hexz.nhflux.variant") class TestNhflux(unittest.TestCase): @classmethod def setUpClass(cls): """Load NHFLUX data from binary file.""" cls.nhf = nhflux.NhfluxStream.readBinary(SIMPLE_HEXZ_NHFLUX) def test_fc(self): """Verify the file control info.""" self.assertEqual(self.nhf.metadata["ndim"], 3) self.assertEqual(self.nhf.metadata["ngroup"], 4) self.assertEqual(self.nhf.metadata["ninti"], 5) self.assertEqual(self.nhf.metadata["nintj"], 5) self.assertEqual(self.nhf.metadata["nintk"], 6) self.assertEqual(self.nhf.metadata["nSurf"], 6) self.assertEqual(self.nhf.metadata["nMom"], 5) self.assertEqual(self.nhf.metadata["nintxy"], 19) self.assertEqual(self.nhf.metadata["npcxy"], 144) self.assertEqual(self.nhf.metadata["iaprx"], 4) self.assertEqual(self.nhf.metadata["iaprxz"], 3) variantControlInfo = nhflux.FILE_SPEC_1D_KEYS_VARIANT11 for info in variantControlInfo: self.assertTrue(info not in self.nhf.metadata) def test_fluxMoments(self): """ Verify that the flux moments are properly read. The 5 flux moments values are manually verified for two nodes. The indices are converted to zero based from the original by subtracting one. """ # node 1 (ring=1, position=1), axial=3, group=2 i = 0 # first one in node map (ring=1, position=1) # 13 = 2*5 + 2 + 1 => (i=2, j=2) self.assertEqual(self.nhf.geodstCoordMap[i], 13) iz, ig = 2, 1 # zero based self.assertTrue( np.allclose( self.nhf.fluxMoments[i, iz, :, ig], [1.424926e08, -2.018375e-01, 2.018375e-01, -2.018374e-01, 1.758205e06], ) ) # node 8 (ring=3, position=2), axial=6, group=1 i = 7 # ring=3, position=2 self.assertEqual(self.nhf.geodstCoordMap[i], 20) # 20 = 3*5 + 4 + 1 => (i=4, j=3) iz, ig = 5, 0 # zero based self.assertTrue( np.allclose( self.nhf.fluxMoments[i, iz, :, ig], [7.277324e06, -1.453915e06, -1.453915e06, 2.362100e-02, -8.626439e05], ) ) def test_xyPartialCurrents(self): """ Verify that the XY-directed partial currents can be read. The surface partial currents can be used to reconstruct the surface flux and corner flux values. This test shows that the outgoing current in one hex is identical to the incoming current in the adjacent hex. """ # node 2 (ring=3, position=1), axial=4, group=2, surface=4, outgoing iNode, iSurf, iz, ig = 1, 3, 3, 1 # zero based self.assertEqual(self.nhf.geodstCoordMap[iNode], 15) self.assertAlmostEqual(self.nhf.partialCurrentsHex[iNode, iz, iSurf, ig] / 1.5570424e07, 1.0) # node 14 (ring=2, position=1), axial=4, group=2, surface=1, incoming iNode, iSurf = 13, 0 ipcpnt = self.nhf.incomingPointersToAllAssemblies[iSurf, iNode] iNode1, iSurf1 = divmod(ipcpnt - 1, self.nhf.metadata["nSurf"]) self.assertEqual(iNode1, 1) # node 2 self.assertEqual(iSurf1, 3) # surface 4 def test_zPartialCurrents(self): """ Verify that the Z-directed partial currents can be read. The Z-directed partial currents are manually checked for one node surface. """ # node 15 (ring=2, position=3), axial=3, group=3, j=1 (z-plus) iNode, iz, ig, j = 14, 2, 2, 0 self.assertAlmostEqual(self.nhf.partialCurrentsZ[iNode, iz, j, ig] / 1.6928521e06, 1.0) def test_write(self): """Verify binary equivalence of written binary file.""" with TemporaryDirectoryChanger(): nhflux.NhfluxStream.writeBinary(self.nhf, "NHFLUX2") with open(SIMPLE_HEXZ_NHFLUX, "rb") as f1, open("NHFLUX2", "rb") as f2: expectedData = f1.read() actualData = f2.read() for expected, actual in zip(expectedData, actualData): self.assertEqual(expected, actual) class TestNhfluxVariant(unittest.TestCase): @classmethod def setUpClass(cls): """Load NHFLUX data from binary file. This file was produced using VARIANT v11.0.""" cls.nhf = nhflux.NhfluxStreamVariant.readBinary(SIMPLE_HEXZ_NHFLUX_VARIANT) def test_fc(self): """Verify the file control info.""" # These entries exist for both Nodal and VARIANT, but have different values # for the same model print(self.nhf.metadata.items()) self.assertEqual(self.nhf.metadata["nMom"], 35) self.assertEqual(self.nhf.metadata["nscoef"], 3) # These entries are only for VARIANT self.assertEqual(self.nhf.metadata["npcbdy"], 30) self.assertEqual(self.nhf.metadata["npcsym"], 0) self.assertEqual(self.nhf.metadata["npcsec"], 0) self.assertEqual(self.nhf.metadata["iwnhfl"], 0) self.assertEqual(self.nhf.metadata["nMoms"], 0) def test_fluxMoments(self): # node 1 (ring=1, position=1), axial=3, group=2 i = 0 self.assertEqual(self.nhf.geodstCoordMap[i], 13) iz, ig = 2, 1 fluxMoments = self.nhf.fluxMoments[i, iz, :, ig] numZeroFluxMoments = fluxMoments[fluxMoments == 0.0].shape[0] self.assertTrue(numZeroFluxMoments == 23) actualNonzeroFluxMoments = fluxMoments[fluxMoments != 0.0] expectedNonzeroFluxMoments = [ 1.42816534e08, -5.97642574e06, -1.54354423e06, -2.15736929e06, -1.53415481e06, 5.54278533e04, 7.74699855e04, 2.38133712e04, 6.69907176e03, 5.49027950e03, 9.01170812e03, 1.05852790e04, ] self.assertTrue(np.allclose(actualNonzeroFluxMoments, expectedNonzeroFluxMoments)) def test_write(self): """Verify binary equivalence of written binary file.""" with TemporaryDirectoryChanger(): nhflux.NhfluxStreamVariant.writeBinary(self.nhf, "NHFLUX2") with open(SIMPLE_HEXZ_NHFLUX_VARIANT, "rb") as f1, open("NHFLUX2", "rb") as f2: expectedData = f1.read() actualData = f2.read() for expected, actual in zip(expectedData, actualData): self.assertEqual(expected, actual) ================================================ FILE: armi/nuclearDataIO/cccc/tests/test_pmatrx.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the workings of the library wrappers.""" import filecmp import unittest from armi import nuclearDataIO from armi.nuclearDataIO.cccc import pmatrx from armi.nuclearDataIO.tests import test_xsLibraries from armi.utils import properties from armi.utils.directoryChangers import TemporaryDirectoryChanger class TestPmatrxNuclides(unittest.TestCase): @classmethod def setUpClass(cls): # load a library that is in the ARMI tree. This should # be a small library with LFPs, Actinides, structure, and coolant cls.libAA = pmatrx.readBinary(test_xsLibraries.PMATRX_AA) cls.libAB = pmatrx.readBinary(test_xsLibraries.PMATRX_AB) def _nuclideGeneralHelper(self, u235): self.assertEqual(0, len(u235.pmatrxMetadata["activationXS"])) self.assertEqual(0, len(u235.pmatrxMetadata["activationMT"])) self.assertEqual(0, len(u235.pmatrxMetadata["activationMTU"])) self.assertEqual(33, len(u235.neutronHeating)) self.assertEqual(33, len(u235.neutronDamage)) self.assertEqual(21, len(u235.gammaHeating)) # if there are more scattering orders, should add tests for them as well... self.assertEqual(1, u235.pmatrxMetadata["maxScatteringOrder"]) self.assertEqual((21, 33), u235.isotropicProduction.shape) def test_pmatrxNuclideDataAA(self): self._nuclideGeneralHelper(self.libAA["U235AA"]) def test_pmatrxNuclideDataAB(self): self._nuclideGeneralHelper(self.libAB["U235AB"]) def test_nuclideDataIsDifferent(self): aa = self.libAA["U235AA"] ab = self.libAB["U235AB"] self.assertFalse((aa.isotropicProduction == ab.isotropicProduction).all()) def test_getPMATRXFileName(self): self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(cycle=0), "cycle0.pmatrx") self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(cycle=1), "cycle1.pmatrx") self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(cycle=23), "cycle23.pmatrx") self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(xsID="AA"), "AA.pmatrx") self.assertEqual( nuclearDataIO.getExpectedPMATRXFileName(xsID="AA", suffix="test"), "AA-test.pmatrx", ) self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(), "PMATRX") with self.assertRaises(ValueError): # Error when over specified nuclearDataIO.getExpectedPMATRXFileName(cycle=10, xsID="AA") class TestPmatrx(unittest.TestCase): """Tests the Pmatrx gamma production matrix.""" @classmethod def setUpClass(cls): # load a library that is in the ARMI tree. This should # be a small library with LFPs, Actinides, structure, and coolant cls.lib = pmatrx.readBinary(test_xsLibraries.PMATRX_AA) def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() def tearDown(self): self.td.__exit__(None, None, None) def test_pmatrxGammaEnergies(self): energies = [ 20000000.0, 10000000.0, 8000000.0, 7000000.0, 6000000.0, 5000000.0, 4000000.0, 3000000.0, 2500000.0, 2000000.0, 1500000.0, 1000000.0, 700000.0, 450000.0, 300000.0, 150000.0, 100000.0, 74999.8984375, 45000.0, 30000.0, 20000.0, ] self.assertTrue((energies == self.lib.gammaEnergyUpperBounds).all()) def test_pmatrxNeutronEnergies(self): energies = [ 14190675.0, 10000000.0, 6065306.5, 3678794.75, 2231302.0, 1353353.125, 820850.0, 497870.625, 301973.75, 183156.34375, 111089.875, 67379.390625, 40867.66796875, 24787.498046875, 15034.3779296875, 9118.810546875, 5530.8388671875, 3354.624267578125, 2034.6827392578125, 1234.097412109375, 748.5178833007812, 453.9991149902344, 275.36444091796875, 167.01695251464844, 101.30089569091797, 61.44210433959961, 37.26651382446289, 22.6032772064209, 13.709582328796387, 8.31528091430664, 3.9278604984283447, 0.5315780639648438, 0.41745778918266296, ] self.assertTrue((energies == self.lib.neutronEnergyUpperBounds).all()) def test_pmatrxNuclideNames(self): names = [ "U235AA", "U238AA", "PU39AA", "FE54AA", "FE56AA", "FE57AA", "FE58AA", "NA23AA", "ZR90AA", "ZR91AA", "ZR92AA", "ZR93AA", "ZR94AA", "ZR95AA", "ZR96AA", "XE28AA", "XE29AA", "XE30AA", "XE31AA", "XE32AA", "XE33AA", "XE34AA", "XE35AA", "XE36AA", "FP40AA", ] self.assertEqual(names, self.lib.nuclideLabels) def test_pmatrxDoesntHaveDoseConversionFactors(self): with self.assertRaises(properties.ImmutablePropertyError): _bacon = self.lib.neutronDoseConversionFactors with self.assertRaises(properties.ImmutablePropertyError): _turkey = self.lib.gammaDoseConversionFactors # bravo! class TestProdMatrix(TestPmatrx): """ Tests related to reading a PMATRX that was written by ARMI. Note that this runs all the tests from TestPmatrx. """ def test_writtenIsIdenticalToOriginal(self): """Make sure our writer produces something identical to the original. .. test:: Test reading and writing PMATRIX files. :id: T_ARMI_NUCDATA_PMATRX :tests: R_ARMI_NUCDATA_PMATRX """ origLib = pmatrx.readBinary(test_xsLibraries.PMATRX_AA) fname = self._testMethodName + "temp-aa.pmatrx" pmatrx.writeBinary(origLib, fname) _lib = pmatrx.readBinary(fname) self.assertTrue(filecmp.cmp(test_xsLibraries.PMATRX_AA, fname)) class TestProdMatrixFromAscii(TestPmatrx): """ Tests that show you can read and write pmatrx files from ascii libraries. Notes ----- This runs all the tests from TestPmatrx. """ @classmethod def setUpClass(cls): cls.origLib = pmatrx.readBinary(test_xsLibraries.PMATRX_AA) def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() self.fname = self._testMethodName + "temp-aa.pmatrx.ascii" lib = pmatrx.readBinary(test_xsLibraries.PMATRX_AA) pmatrx.writeAscii(lib, self.fname) self.lib = pmatrx.readAscii(self.fname) def tearDown(self): self.td.__exit__(None, None, None) ================================================ FILE: armi/nuclearDataIO/cccc/tests/test_pwdint.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test PWDINT reading and writing.""" import os import unittest from armi.nuclearDataIO.cccc import pwdint from armi.utils.directoryChangers import TemporaryDirectoryChanger THIS_DIR = os.path.dirname(__file__) SIMPLE_PWDINT = os.path.join(THIS_DIR, "fixtures", "simple_cartesian.pwdint") class TestGeodst(unittest.TestCase): r""" Tests the PWDINT class. This reads from a PWDINT file that was created using DIF3D 11 on a small test hex reactor in 1/3 geometry. """ def test_readGeodst(self): """Ensure we can read a PWDINT file.""" pwr = pwdint.readBinary(SIMPLE_PWDINT) self.assertGreater(pwr.powerDensity.min(), 0.0) def test_writeGeodst(self): """Ensure that we can write a modified PWDINT.""" with TemporaryDirectoryChanger(): pwr = pwdint.readBinary(SIMPLE_PWDINT) pwdint.writeBinary(pwr, "PWDINT2") pwr2 = pwdint.readBinary("PWDINT2") self.assertTrue((pwr2.powerDensity == pwr.powerDensity).all()) ================================================ FILE: armi/nuclearDataIO/cccc/tests/test_rtflux.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test rtflux reading and writing.""" import os import unittest from armi.nuclearDataIO.cccc import rtflux from armi.utils.directoryChangers import TemporaryDirectoryChanger THIS_DIR = os.path.dirname(__file__) # This rtflux was made by DIF3D 11 in a Cartesian test case. SIMPLE_RTFLUX = os.path.join(THIS_DIR, "fixtures", "simple_cartesian.rtflux") class Testrtflux(unittest.TestCase): r"""Tests the rtflux class.""" def test_readrtflux(self): """Ensure we can read a rtflux file.""" flux = rtflux.RtfluxStream.readBinary(SIMPLE_RTFLUX) self.assertEqual( flux.groupFluxes.shape, ( flux.metadata["NINTI"], flux.metadata["NINTJ"], flux.metadata["NINTK"], flux.metadata["NGROUP"], ), ) def test_writertflux(self): """Ensure that we can write a modified rtflux file.""" with TemporaryDirectoryChanger(): flux = rtflux.RtfluxStream.readBinary(SIMPLE_RTFLUX) # perturb off-diag item to check row/col ordering flux.groupFluxes[2, 1, 3, 5] *= 1.1 flux.groupFluxes[1, 2, 4, 6] *= 1.2 rtflux.RtfluxStream.writeBinary(flux, "rtflux2") flux2 = rtflux.RtfluxStream.readBinary("rtflux2") self.assertAlmostEqual(flux2.groupFluxes[2, 1, 3, 5], flux.groupFluxes[2, 1, 3, 5]) def test_rwAscii(self): """Ensure that we can read/write in ascii format.""" with TemporaryDirectoryChanger(): flux = rtflux.RtfluxStream.readBinary(SIMPLE_RTFLUX) rtflux.RtfluxStream.writeAscii(flux, "rtflux.ascii") flux2 = rtflux.RtfluxStream.readAscii("rtflux.ascii") self.assertTrue((flux2.groupFluxes == flux.groupFluxes).all()) def test_adjoint(self): """Ensure adjoint reads energy groups differently.""" real = rtflux.RtfluxStream.readBinary(SIMPLE_RTFLUX) adjoint = rtflux.AtfluxStream.readBinary(SIMPLE_RTFLUX) self.assertFalse((real.groupFluxes == adjoint.groupFluxes).all()) g = 3 self.assertTrue( (real.groupFluxes[:, :, :, g] == adjoint.groupFluxes[:, :, :, real.metadata["NGROUP"] - g - 1]).all() ) ================================================ FILE: armi/nuclearDataIO/cccc/tests/test_rzflux.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test rzflux reading and writing.""" import os import unittest from armi.nuclearDataIO.cccc import rzflux from armi.utils.directoryChangers import TemporaryDirectoryChanger THIS_DIR = os.path.dirname(__file__) # This RZFLUX was made by DIF3D 11 in a Cartesian test case. SIMPLE_RZFLUX = os.path.join(THIS_DIR, "fixtures", "simple_cartesian.rzflux") class TestRzflux(unittest.TestCase): """Tests the rzflux class.""" def test_readRzflux(self): """Ensure we can read a RZFLUX file.""" flux = rzflux.readBinary(SIMPLE_RZFLUX) self.assertEqual(flux.groupFluxes.shape, (flux.metadata["NGROUP"], flux.metadata["NZONE"])) def test_writeRzflux(self): """Ensure that we can write a modified RZFLUX file.""" with TemporaryDirectoryChanger(): flux = rzflux.readBinary(SIMPLE_RZFLUX) rzflux.writeBinary(flux, "RZFLUX2") self.assertTrue(binaryFilesEqual(SIMPLE_RZFLUX, "RZFLUX2")) # perturb off-diag item to check row/col ordering flux.groupFluxes[2, 10] *= 1.1 flux.groupFluxes[12, 1] *= 1.2 rzflux.writeBinary(flux, "RZFLUX3") flux2 = rzflux.readBinary("RZFLUX3") self.assertAlmostEqual(flux2.groupFluxes[12, 1], flux.groupFluxes[12, 1]) def test_rwAscii(self): """Ensure that we can read/write in ascii format.""" with TemporaryDirectoryChanger(): flux = rzflux.readBinary(SIMPLE_RZFLUX) rzflux.writeAscii(flux, "RZFLUX.ascii") flux2 = rzflux.readAscii("RZFLUX.ascii") self.assertTrue((flux2.groupFluxes == flux.groupFluxes).all()) def binaryFilesEqual(fn1, fn2): """True if two files are bytewise identical.""" with open(fn1, "rb") as f1, open(fn2, "rb") as f2: for byte1, byte2 in zip(f1, f2): if byte1 != byte2: return False return True ================================================ FILE: armi/nuclearDataIO/nuclearFileMetadata.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Assists in reconstruction/rewriting nuclear data files. One might refer to the information stored in these files as the scaffolding or blueprints. Some of it can/could be derived based on data within the overall file; however, not all of it could be and it is always necessary to retain this type of data while reading the file. """ from armi import runLog from armi.utils import properties COMPXS_POWER_CONVERSION_FACTORS = ["fissionWattSeconds", "captureWattSeconds"] REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF = [ "powerConvMult", "d1Multiplier", "d1Additive", "d1Multiplier", "d2Additive", "d3Multiplier", "d3Additive", ] class _Metadata: """Simple dictionary wrapper, that returns :code:`None` if the key does not exist. Notes ----- Cannot use a dictionary directly because it is difficult to subclass and broadcast them with MPI. """ def __init__(self): self._data = {} def __getitem__(self, key): return self._data.get(key, None) def __setitem__(self, key, value): self._data[key] = value def __iter__(self): return iter(self._data) def items(self): """Returns items similar to the dict implementation.""" return self._data.items() def __len__(self): return len(self._data) def keys(self): """Returns keys similar to the dict implementation.""" return self._data.keys() def values(self): return self._data.values() def update(self, other): """Updates the underlying dictionary, similar to the dict implementation.""" self._data.update(other._data) def merge(self, other, selfContainer, otherContainer, fileType, exceptionClass): """ Merge the contents of two metadata instances. Parameters ---------- other: Similar Metadata class as self Metadata to be compared against selfContainer: class otherContainer: class Objects that hold the two metadata instances fileType: str File type that created this metadata. Examples: ``'ISOTXS', 'GAMISO', 'COMPXS'``` exceptionClass: Exception Type of exception to raise in the event of dissimilar metadata values Returns ------- mergedData: Metadata Returns a metadata instance of similar type as ``self`` and ``other`` containing the correctly merged data of the two """ mergedData = self.__class__() if not (any(self.keys()) and any(other.keys())): mergedData.update(self) mergedData.update(other) return mergedData self._mergeLibrarySpecificData(other, selfContainer, otherContainer, mergedData) skippedKeys = self._getSkippedKeys(other, selfContainer, otherContainer, mergedData) for key in set(list(self.keys()) + list(other.keys())) - skippedKeys: selfVal = self[key] otherVal = other[key] mergedVal = None if not properties.numpyHackForEqual(selfVal, otherVal): exceptionMsg = ( "{libType} {key} metadata differs between {lib1} and {lib2}; Cannot Merge\n" "{key} has values of {val1} and {val2}" ) raise exceptionClass( exceptionMsg.format( libType=fileType, lib1=selfContainer, lib2=otherContainer, key=key, val1=selfVal, val2=otherVal, ) ) else: mergedVal = selfVal mergedData[key] = mergedVal return mergedData def _getSkippedKeys(self, other, selfContainer, otherContainer, mergedData): return set() def _mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData): pass def compare(self, other, selfContainer, otherContainer, tolerance=0.0): """ Compare the metadata for two libraries. Parameters ---------- other: Similar Metadata class as self Metadata to be compared against selfContainer: class otherContainer: class Objects that hold the two metadata instances tolerance: float Acceptable difference between two metadata values Returns ------- equal: bool If the metadata are equal or not. """ equal = True for propName in set(list(self.keys()) + list(other.keys())): selfVal = self[propName] otherVal = other[propName] if not properties.areEqual(selfVal, otherVal, tolerance): runLog.important( "{} and {} {} have different {}:\n{}\n{}".format( selfContainer, otherContainer, self.__class__.__name__, propName, selfVal, otherVal, ) ) equal = False return equal class FileMetadata(_Metadata): """ Metadata description for a file. Attributes ---------- fileNames : list string list of file names """ def __init__(self): _Metadata.__init__(self) self.fileNames = [] def update(self, other): """Update this metadata with metadata from another file.""" _Metadata.update(self, other) self.fileNames += other.fileNames def _mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData): mergedData.fileNames = self.fileNames + other.fileNames class NuclideXSMetadata(FileMetadata): """Metadata for library files containing nuclide cross sections, e.g. ``ISOTXS``.""" def _getSkippedKeys(self, other, selfContainer, otherContainer, mergedData): skippedKeys = set(["chi", "libraryLabel"]) if self["chi"] is not None or other["chi"] is not None: runLog.warning( "File-wide chi is removed merging libraries {lib1} and {lib2}.\n" "This should not impact the calculation, as the file-wide chi is used as" " the nuclide-specific chi.\n The nuclides in {lib2} may be modified as well.".format( lib1=selfContainer, lib2=otherContainer ) ) mergedData["fileWideChiFlag"] = 0 skippedKeys.add("fileWideChiFlag") mergedData["chi"] = None for nuc in [nn for nn in selfContainer.nuclides + otherContainer.nuclides]: if nuc.isotxsMetadata["fisFlag"] > 0: nuc.isotxsMetadata["chiFlag"] = 1 return skippedKeys def _mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData): FileMetadata._mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData) mergedData["libraryLabel"] = self["libraryLabel"] or other["libraryLabel"] class RegionXSMetadata(FileMetadata): """Metadata for library files containing region cross sections, e.g. ``COMPXS``.""" def _mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData): FileMetadata._mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData) for datum in COMPXS_POWER_CONVERSION_FACTORS: mergedData[datum] = self[datum] + other[datum] mergedData["compFamiliesWithPrecursors"] = ( self["compFamiliesWithPrecursors"] + other["compFamiliesWithPrecursors"] ) mergedData["numFissComps"] = self["numFissComps"] + other["numFissComps"] def _getSkippedKeys(self, other, selfContainer, otherContainer, mergedData): return set(["numComps", "compFamiliesWithPrecursors", "numFissComps"] + COMPXS_POWER_CONVERSION_FACTORS) class NuclideMetadata(_Metadata): """Simple dictionary for providing metadata about how to read/write a nuclide to/from a file.""" ================================================ FILE: armi/nuclearDataIO/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/nuclearDataIO/tests/library-file-generation/combine-AA-AB.inp ================================================ $control c_isotxs_conversion = bin2asc / $material t_composition(:,1) = U235_7 "U235AA" 1.00000E-03 873.000 ! Fuel U238_7 "U238AA" 1.00000E-03 873.000 ! Fuel PU2397 "PU39AA" 1.00000E-04 873.000 ! Fuel FE54_7 "FE54AA" 1.00000E-03 743.000 ! Structure FE56_7 "FE56AA" 1.00000E-02 743.000 ! Structure FE57_7 "FE57AA" 1.00000E-04 743.000 ! Structure FE58_7 "FE58AA" 1.00000E-05 743.000 ! Structure NA23_7 "NA23AA" 1.00000E-03 738.000 ! Coolant ZR90_7 "ZR90AA" 1.00000E-03 873.000 ! Composite fission product ZR91_7 "ZR91AA" 1.00000E-04 873.000 ! Composite fission product ZR92_7 "ZR92AA" 1.00000E-04 873.000 ! Composite fission product ZR93_7 "ZR93AA" 1.00000E-15 873.000 ! Fission product ZR94_7 "ZR94AA" 1.00000E-04 873.000 ! Composite fission product ZR95_7 "ZR95AA" 1.00000E-15 873.000 ! Fission product ZR96_7 "ZR96AA" 1.00000E-05 873.000 ! Composite fission product XE1287 "XE28AA" 1.00000E-15 873.000 ! Fission product XE1297 "XE29AA" 1.00000E-15 873.000 ! Fission product XE1307 "XE30AA" 1.00000E-15 873.000 ! Fission product XE1317 "XE31AA" 1.00000E-15 873.000 ! Fission product XE1327 "XE32AA" 1.00000E-15 873.000 ! Fission product XE1337 "XE33AA" 1.00000E-15 873.000 ! Fission product XE1347 "XE34AA" 1.00000E-15 873.000 ! Fission product XE1357 "XE35AA" 1.00000E-15 873.000 ! Fission product XE1367 "XE36AA" 1.00000E-15 873.000 ! Fission product FP40AA FP40AA 1.0 873.0 U235_7 "U235AB" 1.10000E-03 873.000 ! Fuel U238_7 "U238AB" 1.10000E-03 873.000 ! Fuel PU2397 "PU39AB" 1.10000E-04 873.000 ! Fuel FE54_7 "FE54AB" 1.10000E-03 743.000 ! Structure FE56_7 "FE56AB" 1.10000E-02 743.000 ! Structure FE57_7 "FE57AB" 1.10000E-04 743.000 ! Structure FE58_7 "FE58AB" 1.10000E-05 743.000 ! Structure NA23_7 "NA23AB" 1.10000E-03 738.000 ! Coolant ZR90_7 "ZR90AB" 1.10000E-03 873.000 ! Composite fission product ZR91_7 "ZR91AB" 1.10000E-04 873.000 ! Composite fission product ZR92_7 "ZR92AB" 1.10000E-04 873.000 ! Composite fission product ZR93_7 "ZR93AB" 1.10000E-15 873.000 ! Fission product ZR94_7 "ZR94AB" 1.10000E-04 873.000 ! Composite fission product ZR95_7 "ZR95AB" 1.10000E-15 873.000 ! Fission product ZR96_7 "ZR96AB" 1.10000E-05 873.000 ! Composite fission product XE1287 "XE28AB" 1.10000E-15 873.000 ! Fission product XE1297 "XE29AB" 1.10000E-15 873.000 ! Fission product XE1307 "XE30AB" 1.10000E-15 873.000 ! Fission product XE1317 "XE31AB" 1.10000E-15 873.000 ! Fission product XE1327 "XE32AB" 1.10000E-15 873.000 ! Fission product XE1337 "XE33AB" 1.10000E-15 873.000 ! Fission product XE1347 "XE34AB" 1.10000E-15 873.000 ! Fission product XE1357 "XE35AB" 1.10000E-15 873.000 ! Fission product XE1367 "XE36AB" 1.10000E-15 873.000 ! Fission product FP40AB FP40AB 1.0 873.0 / $output c_isotxs_file = "../mc2v3-AA.isotxs" "../mc2v3-AB.isotxs" / ================================================ FILE: armi/nuclearDataIO/tests/library-file-generation/combine-and-lump-AA-AB.inp ================================================ $control c_isotxs_conversion = bin2asc / $material t_composition(:,1) = U235_7 "U235AA" 1.00000E-03 873.000 ! Fuel U238_7 "U238AA" 1.00000E-03 873.000 ! Fuel PU2397 "PU39AA" 1.00000E-04 873.000 ! Fuel FE54_7 "FE54AA" 1.00000E-03 743.000 ! Structure FE56_7 "FE56AA" 1.00000E-02 743.000 ! Structure FE57_7 "FE57AA" 1.00000E-04 743.000 ! Structure FE58_7 "FE58AA" 1.00000E-05 743.000 ! Structure NA23_7 "NA23AA" 1.00000E-03 738.000 ! Coolant ZR90_7 "ZR90AA" 1.00000E-03 873.000 ! Composite fission product ZR91_7 "ZR91AA" 1.00000E-04 873.000 ! Composite fission product ZR92_7 "ZR92AA" 1.00000E-04 873.000 ! Composite fission product ZR94_7 "ZR94AA" 1.00000E-04 873.000 ! Composite fission product ZR96_7 "ZR96AA" 1.00000E-05 873.000 ! Composite fission product FP40AA FP40AA 11.0 873.0 U235_7 "U235AB" 1.10000E-03 873.000 ! Fuel U238_7 "U238AB" 1.10000E-03 873.000 ! Fuel PU2397 "PU39AB" 1.10000E-04 873.000 ! Fuel FE54_7 "FE54AB" 1.10000E-03 743.000 ! Structure FE56_7 "FE56AB" 1.10000E-02 743.000 ! Structure FE57_7 "FE57AB" 1.10000E-04 743.000 ! Structure FE58_7 "FE58AB" 1.10000E-05 743.000 ! Structure NA23_7 "NA23AB" 1.10000E-03 738.000 ! Coolant ZR90_7 "ZR90AB" 1.10000E-03 873.000 ! Composite fission product ZR91_7 "ZR91AB" 1.10000E-04 873.000 ! Composite fission product ZR92_7 "ZR92AB" 1.10000E-04 873.000 ! Composite fission product ZR94_7 "ZR94AB" 1.10000E-04 873.000 ! Composite fission product ZR96_7 "ZR96AB" 1.10000E-05 873.000 ! Composite fission product FP40AB FP40AB 1.0 873.0 / $output c_isotxs_file = "../mc2v3-AA.isotxs" "../mc2v3-AB.isotxs" c_lump_name( 1) = FP35AA t_lump_isotope(:, 1) = ZR90_7 1.00000E-03 ZR91_7 1.00000E-02 ZR92_7 1.00000E-02 ZR94_7 1.00000E-02 ZR96_7 1.00000E-02 XE1287 1.00000E-05 XE1297 1.00000E-07 XE1307 1.00000E-04 XE1317 1.00000E-02 XE1327 1.00000E-02 XE1347 1.00000E-02 XE1367 1.00000E-02 ZR93_7 1.00000E-02 ZR95_7 1.00000E-03 XE1357 1.00000E-05 XE1337 1.00000E-04 c_lump_name( 2) = FP38AA t_lump_isotope(:, 2) = ZR90_7 1.00000E-03 ZR91_7 1.00000E-02 ZR92_7 1.00000E-02 ZR94_7 1.00000E-02 ZR96_7 1.00000E-02 XE1287 1.00000E-05 XE1297 1.00000E-07 XE1307 1.00000E-05 XE1317 1.00000E-02 XE1327 1.00000E-02 XE1347 1.00000E-02 XE1367 1.00000E-02 ZR93_7 1.00000E-02 ZR95_7 1.00000E-03 XE1357 1.00000E-05 XE1337 1.00000E-04 c_lump_name( 3) = FP39AA t_lump_isotope(:, 3) = ZR90_7 1.00000E-04 ZR91_7 1.00000E-02 ZR92_7 1.00000E-02 ZR94_7 1.00000E-02 ZR96_7 1.00000E-02 XE1287 1.00000E-04 XE1297 1.00000E-07 XE1307 1.00000E-04 XE1317 1.00000E-02 XE1327 1.00000E-02 XE1347 1.00000E-02 XE1367 1.00000E-02 ZR93_7 1.00000E-02 ZR95_7 1.00000E-03 XE1357 1.00000E-05 XE1337 1.00000E-04 c_lump_name( 4) = FP40AA t_lump_isotope(:, 4) = ZR90_7 1.00000E-04 ZR91_7 1.00000E-02 ZR92_7 1.00000E-02 ZR94_7 1.00000E-02 ZR96_7 1.00000E-02 XE1287 1.00000E-05 XE1297 1.00000E-07 XE1307 1.00000E-04 XE1317 1.00000E-02 XE1327 1.00000E-02 XE1347 1.00000E-02 XE1367 1.00000E-02 ZR93_7 1.00000E-02 ZR95_7 1.00000E-03 XE1357 1.00000E-05 XE1337 1.00000E-04 c_lump_name( 5) = FP41AA t_lump_isotope(:, 5) = ZR90_7 1.00000E-04 ZR91_7 1.00000E-02 ZR92_7 1.00000E-02 ZR94_7 1.00000E-02 ZR96_7 1.00000E-02 XE1287 1.00000E-05 XE1297 1.00000E-07 XE1307 1.00000E-04 XE1317 1.00000E-02 XE1327 1.00000E-02 XE1347 1.00000E-02 XE1367 1.00000E-02 ZR93_7 1.00000E-02 ZR95_7 1.00000E-03 XE1357 1.00000E-05 XE1337 1.00000E-04 / ================================================ FILE: armi/nuclearDataIO/tests/library-file-generation/mc2v3-AA.inp ================================================ $control c_group_structure = ANL33 i_number_region = 1 l_external_inelasticpn = F c_geometry_type = mixture l_buckling_search = T r_eps_buckling = 0.00001 l_gamma = T / $library c_mcclibdir ="\\path\to\mc2\3.2.2\libraries\endfb-vii.0\lib.mcc.e70" c_gammalibdir = "\\path\to\mc2\3.2.2\libraries\endfb-vii.0\lib.gamma.e70" / $material t_composition(:,1) = U235_7 "U235AA" 1.00000E-03 873.000 ! Fuel U238_7 "U238AA" 1.00000E-03 873.000 ! Fuel PU2397 "PU39AA" 1.00000E-04 873.000 ! Fuel FE54_7 "FE54AA" 1.00000E-03 743.000 ! Structure FE56_7 "FE56AA" 1.00000E-02 743.000 ! Structure FE57_7 "FE57AA" 1.00000E-04 743.000 ! Structure FE58_7 "FE58AA" 1.00000E-05 743.000 ! Structure NA23_7 "NA23AA" 1.00000E-03 738.000 ! Coolant ZR90_7 "ZR90AA" 1.00000E-03 873.000 ! Composite fission product ZR91_7 "ZR91AA" 1.00000E-04 873.000 ! Composite fission product ZR92_7 "ZR92AA" 1.00000E-04 873.000 ! Composite fission product ZR93_7 "ZR93AA" 1.00000E-15 873.000 ! Fission product ZR94_7 "ZR94AA" 1.00000E-04 873.000 ! Composite fission product ZR95_7 "ZR95AA" 1.00000E-15 873.000 ! Fission product ZR96_7 "ZR96AA" 1.00000E-05 873.000 ! Composite fission product XE1287 "XE28AA" 1.00000E-15 873.000 ! Fission product XE1297 "XE29AA" 1.00000E-15 873.000 ! Fission product XE1307 "XE30AA" 1.00000E-15 873.000 ! Fission product XE1317 "XE31AA" 1.00000E-15 873.000 ! Fission product XE1327 "XE32AA" 1.00000E-15 873.000 ! Fission product XE1337 "XE33AA" 1.00000E-15 873.000 ! Fission product XE1347 "XE34AA" 1.00000E-15 873.000 ! Fission product XE1357 "XE35AA" 1.00000E-15 873.000 ! Fission product XE1367 "XE36AA" 1.00000E-15 873.000 ! Fission product / $output l_edit_flux = T c_check_memory = "long" c_lump_name( 1) = FP40AA t_lump_isotope(:, 1) = ZR90_7 0.090 ZR91_7 0.091 ZR92_7 0.092 ZR93_7 0.093 ZR94_7 0.094 ZR95_7 0.095 ZR96_7 0.096 XE1287 0.128 XE1297 0.129 XE1307 0.130 XE1317 0.131 XE1327 0.132 XE1337 0.133 XE1347 0.134 XE1357 0.135 XE1367 0.136 / ================================================ FILE: armi/nuclearDataIO/tests/library-file-generation/mc2v3-AB.inp ================================================ $control c_group_structure = ANL33 i_number_region = 1 l_external_inelasticpn = F c_geometry_type = mixture l_buckling_search = T r_eps_buckling = 0.00001 l_gamma = T / $library c_mcclibdir ="\\path\to\mc2\3.2.2\libraries\endfb-vii.0\lib.mcc.e70" c_gammalibdir = "\\path\to\mc2\3.2.2\libraries\endfb-vii.0\lib.gamma.e70" / $material t_composition(:,1) = U235_7 "U235AB" 1.10000E-03 873.000 ! Fuel U238_7 "U238AB" 1.10000E-03 873.000 ! Fuel PU2397 "PU39AB" 1.10000E-04 873.000 ! Fuel FE54_7 "FE54AB" 1.10000E-03 743.000 ! Structure FE56_7 "FE56AB" 1.10000E-02 743.000 ! Structure FE57_7 "FE57AB" 1.10000E-04 743.000 ! Structure FE58_7 "FE58AB" 1.10000E-05 743.000 ! Structure NA23_7 "NA23AB" 1.10000E-03 738.000 ! Coolant ZR90_7 "ZR90AB" 1.10000E-03 873.000 ! Composite fission product ZR91_7 "ZR91AB" 1.10000E-04 873.000 ! Composite fission product ZR92_7 "ZR92AB" 1.10000E-04 873.000 ! Composite fission product ZR93_7 "ZR93AB" 1.10000E-15 873.000 ! Fission product ZR94_7 "ZR94AB" 1.10000E-04 873.000 ! Composite fission product ZR95_7 "ZR95AB" 1.10000E-15 873.000 ! Fission product ZR96_7 "ZR96AB" 1.10000E-05 873.000 ! Composite fission product XE1287 "XE28AB" 1.10000E-15 873.000 ! Fission product XE1297 "XE29AB" 1.10000E-15 873.000 ! Fission product XE1307 "XE30AB" 1.10000E-15 873.000 ! Fission product XE1317 "XE31AB" 1.10000E-15 873.000 ! Fission product XE1327 "XE32AB" 1.10000E-15 873.000 ! Fission product XE1337 "XE33AB" 1.10000E-15 873.000 ! Fission product XE1347 "XE34AB" 1.10000E-15 873.000 ! Fission product XE1357 "XE35AB" 1.10000E-15 873.000 ! Fission product XE1367 "XE36AB" 1.10000E-15 873.000 ! Fission product / $output c_check_memory = "long" c_lump_name( 1) = FP40AB t_lump_isotope(:, 1) = ZR90_7 0.090 ZR91_7 0.091 ZR92_7 0.092 ZR93_7 0.093 ZR94_7 0.094 ZR95_7 0.095 ZR96_7 0.096 XE1287 0.128 XE1297 0.129 XE1307 0.130 XE1317 0.131 XE1327 0.132 XE1337 0.133 XE1347 0.134 XE1357 0.135 XE1367 0.136 / ================================================ FILE: armi/nuclearDataIO/tests/simple_hexz.inp ================================================ BLOCK=STP021,3 UNFORM=A.DIF3D 01 3D Hex-Z to generate NHFLUX file 02 10000 1800000 03 0 0 04 1 0 0 00 110 10 100 1 05 1.0E-7 1.0E-5 1.0E-5 06 1.0 0.001 0.04 1.0 UNFORM=A.NIP3 01 3D Hex-Z core 02 0 1 03 120 $ full core in plane 04 4 4 4 4 4 4 09 Z 1 12.0 09 Z 4 60.0 09 Z 1 72.0 14 M1 I1 1.0 14 M4 I4 1.0 15 M1 IC 15 M4 AB 29 12.0 30 AB 1 0 0 0.0 72.0 30 IC 1 0 0 12.0 60.0 30 AB 2 0 0 0.0 72.0 30 IC 2 0 0 12.0 60.0 30 AB 3 0 0 0.0 72.0 30 IC 3 0 0 12.0 60.0 NOSORT=A.ISO 0V ISOTXS *GFK 3D BNCH * 1 1D 4 6 0 3 0 1 1 1 2D *NA COOLED FBR BENCHMARK FOUR GROUP CROSS SECTIONS * * * I1 I2 I3 I4 I5 I6 0.768 0.232 0.0 0.0 1.72336E+09 4.02463E+08 7.97003E+07 3.15946E+07 1.05 E+07 8.00 E+05 10000. 1000. 0.0 0 3 6 9 12 15 4D I1 GFK 1 100. 0.0 0.0 0.0 0.0 0.0 0 0 1 0 0 0 0 0 1 1 0 200 1 1 2 3 4 1 1 1 1 5D .11587 .21220 .46137 .34571 .11587 .21220 .46137 .34571 .69059 E-03 1.83076E-03 .92948 E-02 .17305 E-01 .39123 E-02 .18286 E-02 .36334 E-02 .92415 E-02 3.03607 2.91217 2.88187 2.87951 7D 0.0 0.0 .023597 0.0 .16153 E-02 .40791 E-05 0.0 .46838 E-02 .42309 E-07 .44493 E-07 4D I2 GFK 1 100. 0.0 0.0 0.0 0.0 0.0 0 0 1 0 0 0 0 0 1 1 0 200 1 1 2 3 4 1 1 1 1 5D .11588 .21213 .46770 .35349 .11588 .21213 .46770 .35349 .66221 E-031.83956 E-03 1.00354E-02 .20476 E-01 .48531 E-02 .26377 E-02 .51332 E-02 .13238 E-01 3.07906 2.91493 2.88495 2.88254 7D 0.0 0.0 .023262 0.0 .15718 E-02 .46451 E-05 0.0 .43414 E-02 .40724 E-07 .49968 E-07 4D I3 GFK 1 100. 0.0 0.0 0.0 0.0 0.0 0 0 1 0 0 0 0 0 1 1 0 200 1 1 2 3 4 1 1 1 1 5D .14584 .28443 .52703 .40732 .14584 .28443 .52703 .40732 1.11527E-03 3.06346E-03 1.00212E-02 .129995E-01 .27688 E-02 .44347 E-04 .12274 E-03 .34952 E-03 2.796410 2.44098 2.42317 2.42295 7D 0.0 0.0 .032071 0.0 .27776 E-02 .38880 E-05 0.0 .58971 E-02 .90018 E-07 .45039 E-07 4D I4 GFK 1 100. 0.0 0.0 0.0 0.0 0.0 0 0 1 0 0 0 0 0 1 1 0 200 1 1 2 3 4 1 1 1 1 5D .12270 .23133 .46274 .33749 .12270 .23133 .46274 .33749 8.2278 E-04 2.17087E-03 7.64083E-03 .97185 E-02 .19453 E-02 .31065 E-04 .87566 E-04 .23769 E-03 2.79026 2.441880 2.42309 2.42299 7D 0.0 0.0 .026322 0.0 .22889 E-02 .28907 E-05 0.0 .53536 E-02 .62133 E-07 .33248 E-07 4D I5 GFK 1 100. 0.0 0.0 0.0 0.0 0.0 0 0 0 0 0 0 0 0 1 1 0 200 1 1 2 3 4 1 1 1 1 5D .13317 .25355 .58044 .54168 .13317 .25355 .58044 .54168 .186696E-02 .126433E-01 .634405E-01 .16868 7D 0.0 0.0 .022946 0.0 .37687 E-02 .10320 E-05 0.0 .86815 E-02 .70361 E-11 .10489 E-07 4D I6 GFK 1 100. 0.0 0.0 0.0 0.0 0.0 0 0 0 0 0 0 0 0 1 1 0 200 1 1 2 3 4 1 1 1 1 5D .072206 .11487 .32642 .19272 .072206 .11487 .32642 .19272 .216305E-03 .16880 E-03 .11468 E-02 .78660 E-03 7D 0.0 0.0 .012942 0.0 .12871 E-02 .68780 E-06 0.0 .34533 E-02 .43633 E-11 .69903 E-08 ================================================ FILE: armi/nuclearDataIO/tests/test_xsCollections.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module that tests methods within xsCollections.""" import os import unittest from armi import settings from armi.nuclearDataIO import isotxs, xsCollections from armi.reactor.blocks import HexBlock from armi.tests import ISOAA_PATH from armi.utils.directoryChangers import TemporaryDirectoryChanger from armi.utils.plotting import plotNucXs class TestXsCollections(unittest.TestCase): @classmethod def setUpClass(cls): cls.microLib = isotxs.readBinary(ISOAA_PATH) def setUp(self): self.mc = xsCollections.MacroscopicCrossSectionCreator(minimumNuclideDensity=1e-13) self.block = MockBlock() self.block.setNumberDensity("U235", 0.02) self.block.setNumberDensity("FE", 0.01) def test_genTotScatteringMatrix(self): """Generates the total scattering matrix by summing elastic, inelastic, and n2n scattering matrices.""" nuc = self.microLib.nuclides[0] totalScatter = nuc.micros.getTotalScatterMatrix() self.assertAlmostEqual( totalScatter[0, 0], (nuc.micros.elasticScatter[0, 0] + nuc.micros.inelasticScatter[0, 0] + 2.0 * nuc.micros.n2nScatter[0, 0]), ) def test_totalScatteringMatrixWithMissingData(self): """ Generates the total scattering matrix by summing elastic and n2n scattering matrices. Notes ----- This tests that the total scattering matrix can be produced when the inelastic scattering matrix is not defined. """ nuc = self.microLib.nuclides[0] nuc.micros.inelasticScatter = None totalScatter = nuc.micros.getTotalScatterMatrix() self.assertAlmostEqual( totalScatter[0, 0], (nuc.micros.elasticScatter[0, 0] + 2.0 * nuc.micros.n2nScatter[0, 0]), ) def test_plotNucXs(self): """Testing this plotting method here because we need a XS library to run the test.""" fName = "test_plotNucXs.png" with TemporaryDirectoryChanger(): plotNucXs(self.microLib, "U235AA", "fission", fName=fName) self.assertTrue(os.path.exists(fName)) def test_createMacrosFromMicros(self): """Test calculating macroscopic cross sections from microscopic cross sections. .. test:: Compute macroscopic cross sections from microscopic cross sections and number densities. :id: T_ARMI_NUCDATA_MACRO :tests: R_ARMI_NUCDATA_MACRO """ self.assertEqual(self.mc.minimumNuclideDensity, 1e-13) self.mc.createMacrosFromMicros(self.microLib, self.block) totalMacroFissionXs = 0.0 totalMacroAbsXs = 0.0 for nuc, density in self.mc.densities.items(): nuclideXS = self.mc.microLibrary.getNuclide(nuc, "AA") for microXs in nuclideXS.micros.fission: totalMacroFissionXs += microXs * density for microXsName in xsCollections.ABSORPTION_XS: for microXs in getattr(nuclideXS.micros, microXsName): totalMacroAbsXs += microXs * density self.assertAlmostEqual(sum(self.mc.macros.fission), totalMacroFissionXs) self.assertAlmostEqual(sum(self.mc.macros.absorption), totalMacroAbsXs) def test_collapseCrossSection(self): """ Tests cross section collapsing. Notes ----- The expected 1 group cross section was generated by running the collapse cross section method. This tests that this method has not been modified to produce a different result. """ expected1gXs = 2.35725262208 micros = self.microLib["U235AA"].micros flux = list(reversed(range(33))) self.assertAlmostEqual(micros.collapseCrossSection(micros.nGamma, flux), expected1gXs) class MockReactor: def __init__(self): self.blueprints = MockBlueprints() self.spatialGrid = None class MockBlueprints: # this is only needed for allNuclidesInProblem and attributes were acting funky, so this was made. def __getattribute__(self, *args, **kwargs): return ["U235", "U235", "FE", "NA23"] class MockBlock(HexBlock): def __init__(self, name=None, cs=None): self.density = {} HexBlock.__init__(self, name or "MockBlock", cs or settings.Settings()) self.r = MockReactor() @property def r(self): return self._r @r.setter def r(self, r): self._r = r def getVolume(self, *args, **kwargs): """Return the volume of a block.""" return 1.0 def getNuclideNumberDensities(self, nucNames): """Return a list of number densities in atoms/barn-cm for the nuc names requested.""" return [self.density.get(nucName, 0.0) for nucName in nucNames] def _getNdensHelper(self): return {nucName: density for nucName, density in self.density.items()} def setNumberDensity(self, key, val, *args, **kwargs): """Set the number density of this nuclide to this value.""" self.density[key] = val def getNuclides(self): """Determine which nuclides are present in this armi block.""" return self.density.keys() ================================================ FILE: armi/nuclearDataIO/tests/test_xsLibraries.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for xsLibraries.IsotxsLibrary.""" import copy import filecmp import os import pickle import traceback import unittest from time import sleep import numpy as np from armi.nucDirectory.nuclideBases import NuclideBases from armi.nuclearDataIO import xsLibraries from armi.nuclearDataIO.cccc import gamiso, isotxs, pmatrx from armi.tests import mockRunLogs from armi.utils import properties from armi.utils.directoryChangers import TemporaryDirectoryChanger # test input pathing THIS_DIR = os.path.dirname(__file__) RUN_DIR = os.path.join(THIS_DIR, "library-file-generation") FIXTURE_DIR = os.path.join(THIS_DIR, "fixtures") # specific tests files GAMISO_AA = os.path.join(FIXTURE_DIR, "AA.gamiso") GAMISO_AA_AB = os.path.join(FIXTURE_DIR, "combined-AA-AB.gamiso") GAMISO_AB = os.path.join(FIXTURE_DIR, "AB.gamiso") GAMISO_LUMPED = os.path.join(FIXTURE_DIR, "combined-and-lumped-AA-AB.gamiso") ISOTXS_AA = os.path.join(FIXTURE_DIR, "ISOAA") ISOTXS_AA_AB = os.path.join(FIXTURE_DIR, "combined-AA-AB.isotxs") ISOTXS_AB = os.path.join(FIXTURE_DIR, "ISOAB") ISOTXS_LUMPED = os.path.join(FIXTURE_DIR, "combined-and-lumped-AA-AB.isotxs") PMATRX_AA = os.path.join(FIXTURE_DIR, "AA.pmatrx") PMATRX_AA_AB = os.path.join(FIXTURE_DIR, "combined-AA-AB.pmatrx") PMATRX_AB = os.path.join(FIXTURE_DIR, "AB.pmatrx") PMATRX_LUMPED = os.path.join(FIXTURE_DIR, "combined-and-lumped-AA-AB.pmatrx") UFG_FLUX_EDIT = os.path.join(FIXTURE_DIR, "mc2v3-AA.flux_ufg") # CCCC fixtures are less fancy than these merging ones. FIXTURE_DIR_CCCC = os.path.join(os.path.dirname(isotxs.__file__), "tests", "fixtures") DLAYXS_MCC3 = os.path.join(FIXTURE_DIR_CCCC, "mc2v3.dlayxs") class TempFileMixin: """A helpful test tooling; creating temporary directories and nucdata test file path.""" def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() def tearDown(self): self.td.__exit__(None, None, None) @property def testFileName(self): return os.path.join(self.td.destination, f"{self.__class__.__name__}-{self._testMethodName}.nucdata") class TestXSLibrary(TempFileMixin, unittest.TestCase): @classmethod def setUpClass(cls): cls.isotxsAA = isotxs.readBinary(ISOTXS_AA) cls.gamisoAA = gamiso.readBinary(GAMISO_AA) cls.pmatrxAA = pmatrx.readBinary(PMATRX_AA) cls.xsLib = xsLibraries.IsotxsLibrary() cls.xsLibGenerationErrorStack = None try: cls.xsLib.merge(copy.deepcopy(cls.isotxsAA)) cls.xsLib.merge(copy.deepcopy(cls.gamisoAA)) cls.xsLib.merge(copy.deepcopy(cls.pmatrxAA)) except Exception: cls.xsLibGenerationErrorStack = traceback.format_exc() def test_canPickleAndUnpickleISOTXS(self): pikAA = pickle.loads(pickle.dumps(self.isotxsAA)) self.assertTrue(xsLibraries.compare(pikAA, self.isotxsAA)) def test_canPickleAndUnpickleGAMISO(self): pikAA = pickle.loads(pickle.dumps(self.gamisoAA)) self.assertTrue(xsLibraries.compare(pikAA, self.gamisoAA)) def test_canPickleAndUnpicklePMATRX(self): pikAA = pickle.loads(pickle.dumps(self.pmatrxAA)) self.assertTrue(xsLibraries.compare(pikAA, self.pmatrxAA)) def test_compareWorks(self): self.assertTrue(xsLibraries.compare(self.isotxsAA, self.isotxsAA)) self.assertTrue(xsLibraries.compare(self.pmatrxAA, self.pmatrxAA)) aa = isotxs.readBinary(ISOTXS_AA) del aa[aa.nuclideLabels[0]] self.assertFalse(xsLibraries.compare(aa, self.isotxsAA)) def test_compareComponentsOfXSLibrary(self): """Compare different components of a XS library.""" self.assertTrue(xsLibraries.compare(self.isotxsAA, self.isotxsAA)) self.assertTrue(xsLibraries.compare(self.pmatrxAA, self.pmatrxAA)) aa = isotxs.readBinary(ISOTXS_AA) del aa[aa.nuclideLabels[0]] self.assertFalse(xsLibraries.compare(aa, self.isotxsAA)) def test_mergeFailsWithNonIsotxsFiles(self): dummyFileName = "ISOSOMEFILE" with open(dummyFileName, "w") as someFile: someFile.write("hi") try: with mockRunLogs.BufferLog() as log: lib = xsLibraries.IsotxsLibrary() with self.assertRaises(OSError): xsLibraries.mergeXSLibrariesInWorkingDirectory(lib, "ISOTXS", "") self.assertIn(dummyFileName, log.getStdout()) finally: os.remove(dummyFileName) with TemporaryDirectoryChanger(): dummyFileName = "ISO[]" with open(dummyFileName, "w") as file: file.write( "This is a file that starts with the letters 'ISO' but will break the regular expression search." ) try: with mockRunLogs.BufferLog() as log: lib = xsLibraries.IsotxsLibrary() xsLibraries.mergeXSLibrariesInWorkingDirectory(lib) self.assertIn(f"{dummyFileName} in the merging of ISOXX files", log.getStdout()) finally: pass def _xsLibraryAttributeHelper( self, lib, neutronEnergyLength, neutronVelLength, gammaEnergyLength, neutronDoseLength, gammaDoseLength, ): for attrName, listLength in [ ("neutronEnergyUpperBounds", neutronEnergyLength), ("neutronVelocity", neutronVelLength), ("gammaEnergyUpperBounds", gammaEnergyLength), ("neutronDoseConversionFactors", neutronDoseLength), ("gammaDoseConversionFactors", gammaDoseLength), ]: if listLength > 0: self.assertEqual(listLength, len(getattr(lib, attrName))) else: with self.assertRaises(properties.ImmutablePropertyError): print(f"Getting the value {attrName}") print(getattr(lib, attrName)) def test_isotxsLibraryAttributes(self): self._xsLibraryAttributeHelper( self.isotxsAA, neutronEnergyLength=33, neutronVelLength=33, gammaEnergyLength=0, neutronDoseLength=0, gammaDoseLength=0, ) def test_gamisoLibraryAttributes(self): self._xsLibraryAttributeHelper( self.gamisoAA, neutronEnergyLength=0, neutronVelLength=0, gammaEnergyLength=21, neutronDoseLength=0, gammaDoseLength=0, ) def test_pmatrxLibraryAttributes(self): self._xsLibraryAttributeHelper( self.pmatrxAA, neutronEnergyLength=33, neutronVelLength=0, gammaEnergyLength=21, neutronDoseLength=0, gammaDoseLength=0, ) def test_mergeXSLibrariesWithDifferentDataWorks(self): if self.xsLibGenerationErrorStack is not None: print(self.xsLibGenerationErrorStack) raise Exception("see stdout for stack trace") # check to make sure they labels overlap, or are actually the same labels = set(self.xsLib.nuclideLabels) self.assertEqual(labels, set(self.isotxsAA.nuclideLabels)) self.assertEqual(labels, set(self.gamisoAA.nuclideLabels)) self.assertEqual(labels, set(self.pmatrxAA.nuclideLabels)) # the whole thing is different from the sum of its components self.assertFalse(xsLibraries.compare(self.xsLib, self.isotxsAA)) self.assertFalse(xsLibraries.compare(self.xsLib, self.gamisoAA)) self.assertFalse(xsLibraries.compare(self.xsLib, self.pmatrxAA)) # individual components are the same self.assertTrue(isotxs.compare(self.xsLib, self.isotxsAA)) self.assertTrue(gamiso.compare(self.xsLib, self.gamisoAA)) self.assertTrue(pmatrx.compare(self.xsLib, self.pmatrxAA)) def test_canWriteIsotxsFromCombinedXSLibrary(self): self._canWritefromCombined(isotxs, ISOTXS_AA) def test_canWriteGamisoFromCombinedXSLibrary(self): self._canWritefromCombined(gamiso, GAMISO_AA) def test_canWritePmatrxFromCombinedXSLibrary(self): self._canWritefromCombined(pmatrx, PMATRX_AA) def _canWritefromCombined(self, writer, refFile): if self.xsLibGenerationErrorStack is not None: print(self.xsLibGenerationErrorStack) raise Exception("See stdout for stack trace") # check to make sure they labels overlap, or are actually the same writer.writeBinary(self.xsLib, self.testFileName) self.assertTrue(filecmp.cmp(refFile, self.testFileName)) class TestGetISOTXSFilesWorkDir(unittest.TestCase): def test_getISOTXSFilesWithoutLibrarySuffix(self): shouldBeThere = ["ISOAA", "ISOBA", os.path.join("file-path", "ISOCA")] shouldNotBeThere = [ "ISOBA-n2", "ISOTXS", "ISOTXS-c2", "dummyISOTXS", "ISOTXS.BCD", "ISOAA.BCD", ] filesInDirectory = shouldBeThere + shouldNotBeThere toMerge = xsLibraries.getISOTXSLibrariesToMerge("", filesInDirectory) self.assert_contains_only(toMerge, shouldBeThere, shouldNotBeThere) def test_getISOTXSFilesWithLibrarySuffix(self): shouldBeThere = [ "ISOAA-n23", "ISOAAF-n23", "ISOBA-n23", "ISODA", os.path.join("file-path", "ISOCA-n23"), ] shouldNotBeThere = [ "ISOAA", "ISOAA-n24", "ISOBA-ISO", "ISOBA-n2", "ISOBA", "ISOTXS", "ISOTXS-c2", "dummyISOTXS", "ISOTXS.BCD", "ISOAA.BCD", "ISOCA-doppler", "ISOSA-void", os.path.join("file-path", "ISOCA"), ] filesInDirectory = shouldBeThere + shouldNotBeThere toMerge = xsLibraries.getISOTXSLibrariesToMerge("-n23", filesInDirectory) self.assert_contains_only(toMerge, shouldBeThere, shouldNotBeThere) def assert_contains_only(self, container, shouldBeThere, shouldNotBeThere): """ Utility method for saying what things contain. This could just check the contents and length, but the error produced from shouldNotBeThere is much nicer. """ container = set(container) self.assertEqual(container, set(shouldBeThere)) self.assertEqual(set(), container & set(shouldNotBeThere)) class AbstractTestXSlibraryMerging(TempFileMixin): """ A shared class that defines tests that should be true for all IsotxsLibrary merging. Notes ----- This is a base class; it is not run directly. """ def _readFileAttempts(self, path): """Run the file read a few times, because sometimes GitHub CI is flaky with these tests.""" maxAttempts = 5 for a in range(maxAttempts): try: return self.getReadFunc()(path) except OSError as e: if a >= (maxAttempts - 1): raise e sleep(1) def setUp(self): TempFileMixin.setUp(self) # Load a library in the ARMI tree. This should be a small library with LFPs, Actinides, structure, and coolant. self.libAA = self._readFileAttempts(self.getLibAAPath()) self.libAB = self._readFileAttempts(self.getLibABPath()) self.libCombined = self._readFileAttempts(self.getLibAA_ABPath()) self.libLumped = self._readFileAttempts(self.getLibLumpedPath()) self.nuclideBases = NuclideBases() def getErrorType(self): raise NotImplementedError() def getReadFunc(self): raise NotImplementedError() def getWriteFunc(self): raise NotImplementedError() def getLibAAPath(self): raise NotImplementedError() def getLibABPath(self): raise NotImplementedError() def getLibAA_ABPath(self): raise NotImplementedError() def getLibLumpedPath(self): raise NotImplementedError() def test_mergeXSLibSameNucNames(self): """Cannot merge XS libraries with the same nuclide names.""" with self.assertRaises(AttributeError): self.libAA.merge(self.libCombined) with self.assertRaises(AttributeError): self.libAA.merge(self.libAA) with self.assertRaises(AttributeError): self.libAA.merge(self.libCombined) with self.assertRaises(AttributeError): self.libCombined.merge(self.libAA) def test_mergeXSLibxDiffGroupStructure(self): """Cannot merge XS libraries with different group structure.""" dummyXsLib = xsLibraries.IsotxsLibrary() dummyXsLib.neutronEnergyUpperBounds = np.array([1, 2, 3]) dummyXsLib.gammaEnergyUpperBounds = np.array([1, 2, 3]) with self.assertRaises(properties.ImmutablePropertyError): dummyXsLib.merge(self.libCombined) def test_mergeEmptyXSLibWithClones(self): """Merge empty XS libraries with clones of others.""" emptyXSLib = xsLibraries.IsotxsLibrary() emptyXSLib.merge(self.libAA) self.libAA = None self.getWriteFunc()(emptyXSLib, self.testFileName) sleep(1) self.assertTrue(os.path.exists(self.testFileName)) self.assertGreater(os.path.getsize(self.testFileName), 0) self.assertTrue(filecmp.cmp(self.getLibAAPath(), self.testFileName)) def test_mergeTwoXSLibFiles(self): emptyXSLib = xsLibraries.IsotxsLibrary() emptyXSLib.merge(self.libAA) self.libAA = None emptyXSLib.merge(self.libAB) self.libAB = None self.assertEqual(set(self.libCombined.nuclideLabels), set(emptyXSLib.nuclideLabels)) self.assertTrue(xsLibraries.compare(emptyXSLib, self.libCombined)) self.getWriteFunc()(emptyXSLib, self.testFileName) sleep(1) self.assertTrue(os.path.exists(self.testFileName)) self.assertGreater(os.path.getsize(self.testFileName), 0) self.assertTrue(filecmp.cmp(self.getLibAA_ABPath(), self.testFileName)) class TestPmatrxMerge(AbstractTestXSlibraryMerging, unittest.TestCase): def getErrorType(self): raise OSError def getReadFunc(self): return pmatrx.readBinary def getWriteFunc(self): return pmatrx.writeBinary def getLibAAPath(self): return PMATRX_AA def getLibABPath(self): return PMATRX_AB def getLibAA_ABPath(self): return PMATRX_AA_AB def getLibLumpedPath(self): return PMATRX_LUMPED def test_cannotMergeXSLibsWithDiffGammaGroups(self): """Test that we cannot merge XS Libs with different Gamma Group Structures.""" dummyXsLib = xsLibraries.IsotxsLibrary() dummyXsLib.gammaEnergyUpperBounds = np.array([1, 2, 3]) with self.assertRaises(properties.ImmutablePropertyError): dummyXsLib.merge(self.libCombined) class TestIsotxsMerge(AbstractTestXSlibraryMerging, unittest.TestCase): def getErrorType(self): raise OSError def getReadFunc(self): return isotxs.readBinary def getWriteFunc(self): return isotxs.writeBinary def getLibAAPath(self): return ISOTXS_AA def getLibABPath(self): return ISOTXS_AB def getLibAA_ABPath(self): return ISOTXS_AA_AB def getLibLumpedPath(self): return ISOTXS_LUMPED def test_canRemoveIsotopes(self): emptyXSLib = xsLibraries.IsotxsLibrary() emptyXSLib.merge(self.libAA) self.libAA = None emptyXSLib.merge(self.libAB) self.libAB = None for nucId in [ "ZR93_7", "ZR95_7", "XE1287", "XE1297", "XE1307", "XE1317", "XE1327", "XE1337", "XE1347", "XE1357", "XE1367", ]: nucLabel = self.nuclideBases.byMcc3Id[nucId].label del emptyXSLib[nucLabel + "AA"] del emptyXSLib[nucLabel + "AB"] self.assertEqual(set(self.libLumped.nuclideLabels), set(emptyXSLib.nuclideLabels)) self.getWriteFunc()(emptyXSLib, self.testFileName) self.assertTrue(filecmp.cmp(self.getLibLumpedPath(), self.testFileName)) class TestGamisoMerge(AbstractTestXSlibraryMerging, unittest.TestCase): def getErrorType(self): raise OSError def getReadFunc(self): return gamiso.readBinary def getWriteFunc(self): return gamiso.writeBinary def getLibAAPath(self): return GAMISO_AA def getLibABPath(self): return GAMISO_AB def getLibAA_ABPath(self): return GAMISO_AA_AB def getLibLumpedPath(self): return GAMISO_LUMPED def test_canRemoveIsotopes(self): emptyXSLib = xsLibraries.IsotxsLibrary() emptyXSLib.merge(self.libAA) self.libAA = None emptyXSLib.merge(self.libAB) self.libAB = None for nucId in [ "ZR93_7", "ZR95_7", "XE1287", "XE1297", "XE1307", "XE1317", "XE1327", "XE1337", "XE1347", "XE1357", "XE1367", ]: nucLabel = self.nuclideBases.byMcc3Id[nucId].label del emptyXSLib[nucLabel + "AA"] del emptyXSLib[nucLabel + "AB"] self.assertEqual(set(self.libLumped.nuclideLabels), set(emptyXSLib.nuclideLabels)) self.getWriteFunc()(emptyXSLib, self.testFileName) self.assertTrue(filecmp.cmp(self.getLibLumpedPath(), self.testFileName)) class TestCombinedMerge(unittest.TestCase): def setUp(self): # Load a library in the ARMI tree. This should be a small library with LFPs, Actinides, structure, and coolant. self.isotxsAA = isotxs.readBinary(ISOTXS_AA) self.gamisoAA = gamiso.readBinary(GAMISO_AA) self.pmatrxAA = pmatrx.readBinary(PMATRX_AA) self.isotxsAB = isotxs.readBinary(ISOTXS_AB) self.gamisoAB = gamiso.readBinary(GAMISO_AB) self.pmatrxAB = pmatrx.readBinary(PMATRX_AB) self.libCombined = isotxs.readBinary(ISOTXS_AA_AB) def test_mergeAllXSLibFiles(self): lib = xsLibraries.IsotxsLibrary() xsLibraries.mergeXSLibrariesInWorkingDirectory( lib, xsLibrarySuffix="", mergeGammaLibs=True, alternateDirectory=FIXTURE_DIR ) self.assertEqual(set(lib.nuclideLabels), set(self.libCombined.nuclideLabels)) ================================================ FILE: armi/nuclearDataIO/tests/test_xsNuclides.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for xs nuclides.""" import unittest from armi.nucDirectory import nuclideBases from armi.nuclearDataIO import isotxs, xsLibraries, xsNuclides from armi.tests import ISOAA_PATH, mockRunLogs class NuclideTests(unittest.TestCase): @classmethod def setUpClass(cls): cls.lib = isotxs.readBinary(ISOAA_PATH) def test_badNameFailure(self): """Creating nuclide from label fails on bad name.""" nuc = xsNuclides.XSNuclide(None, "BACONAA") nuc.isotxsMetadata["nuclideId"] = "BACN87" with self.assertRaises(OSError): nuc.updateBaseNuclide() def test_creatingNucNoSideEffects(self): """Creating nuclide does not mes with underlying nuclide dictionary.""" nuc = nuclideBases.byName["U238"] self.assertFalse(hasattr(nuc, "xsId")) nrAA = xsNuclides.XSNuclide(None, "U238AA") nrAA.isotxsMetadata["nuclideId"] = nuc.name nrAA.updateBaseNuclide() self.assertEqual("AA", nrAA.xsId) self.assertFalse(hasattr(nuc, "xsId")) def test_odifyingNucAttrUpdatesIsotxs(self): """Modifying nuclide attribute updates the ISOTXS nuclide data.""" lib = xsLibraries.IsotxsLibrary() nuc = nuclideBases.byName["FE"] nrAA = xsNuclides.XSNuclide(lib, "FEAA") lib["FEAA"] = nrAA nrAA.isotxsMetadata["nuclideId"] = nuc.name nrAA.updateBaseNuclide() self.assertEqual(len(nuc.trans), len(nrAA.trans)) nuc.trans.append("whatever") self.assertEqual(len(nuc.trans), len(nrAA.trans)) self.assertEqual("whatever", nuc.trans[-1]) self.assertEqual("whatever", nrAA.trans[-1]) # We have modified the underlying nuclide; need to reset. nuc.trans.pop() def test_moLabelsNoWarnings(self): """New nuclide labels do not cause warnings.""" with mockRunLogs.BufferLog() as logCapture: self.assertEqual("", logCapture.getStdout()) fe = nuclideBases.byName["FE"] feNuc = xsNuclides.XSNuclide(None, "FEAA") feNuc.isotxsMetadata["nuclideId"] = fe.name feNuc.updateBaseNuclide() self.assertEqual(fe, feNuc._base) self.assertEqual("", logCapture.getStdout()) def test_nuclide_oldLabelsCauseWarnings(self): with mockRunLogs.BufferLog() as logCapture: self.assertEqual("", logCapture.getStdout()) pu = nuclideBases.byName["PU239"] puNuc = xsNuclides.XSNuclide(None, "PLUTAA") puNuc.isotxsMetadata["nuclideId"] = pu.name puNuc.updateBaseNuclide() self.assertEqual(pu, puNuc._base) length = len(logCapture.getStdout()) self.assertGreater(length, 15) # now get it with a legitimate same label, length should not change puNuc = xsNuclides.XSNuclide(None, "PLUTAB") puNuc.isotxsMetadata["nuclideId"] = pu.name puNuc.updateBaseNuclide() self.assertEqual(pu, puNuc._base) self.assertEqual(length, len(logCapture.getStdout())) def test_nuclideBaseMethodsNoFail(self): """Nuclide base method should not fail.""" for nuc in self.lib.nuclides: self.assertIsInstance(nuc.getDatabaseName(), str) self.assertIsInstance(nuc.getMcc3Id(), str) def test_nuclideIsoaaDetails(self): nuc = self.lib["U235AA"] self.assertEqual(935.9793848991394, sum(nuc.micros.fission)) self.assertEqual(1.0000000956962505, sum(nuc.micros.chi)) nuc = self.lib["B10AA"] self.assertEqual(0.7499475518734471, sum(nuc.micros.nGamma)) nuc = self.lib["B11AA"] self.assertEqual(0.0008645406924188137, sum(nuc.micros.n2n)) self.assertEqual(0.008091875669521187, sum(nuc.micros.nGamma)) def test_2dDataCoords(self): """Manually compare some 2d XS data to ensure the correct coordinates.""" u235 = self.lib["U235AA"] self.assertAlmostEqual(5.76494979858, u235.micros.total[0, 0]) self.assertAlmostEqual(6.5928812027, u235.micros.total[1, 0]) self.assertAlmostEqual(113.00479126, u235.micros.total[31, 0]) self.assertAlmostEqual(606.100097656, u235.micros.total[32, 0]) self.assertAlmostEqual(5.7647356987, u235.micros.total[0, 1]) self.assertAlmostEqual(6.58178663254, u235.micros.total[1, 1]) self.assertAlmostEqual(112.154449463, u235.micros.total[31, 1]) self.assertAlmostEqual(606.100097656, u235.micros.total[32, 1]) pu239 = self.lib["PU39AA"] self.assertAlmostEqual(5.83128976821, pu239.micros.total[0, 0]) self.assertAlmostEqual(6.64091205597, pu239.micros.total[1, 0]) self.assertAlmostEqual(394.632354736, pu239.micros.total[31, 0]) self.assertAlmostEqual(973.399902343, pu239.micros.total[32, 0]) self.assertAlmostEqual(5.83086299896, pu239.micros.total[0, 1]) self.assertAlmostEqual(6.63103675842, pu239.micros.total[1, 1]) self.assertAlmostEqual(383.891998291, pu239.micros.total[31, 1]) self.assertAlmostEqual(973.399902343, pu239.micros.total[32, 1]) def test_scatterXSdataCoords(self): """Manually compare scatter XS data to ensure the correct coordinates.""" u235 = self.lib["U235AA"] elasticScatter = u235.micros.elasticScatter n2nScatter = u235.micros.n2nScatter inelasticScatter = u235.micros.inelasticScatter self.assertAlmostEqual(0.0304658822715, elasticScatter[(2, 1)]) self.assertAlmostEqual(0.0331721678376, inelasticScatter[(2, 0)]) self.assertAlmostEqual(0.0310171917081, inelasticScatter[(2, 1)]) self.assertAlmostEqual(0.0893433615565, inelasticScatter[(2, 2)]) self.assertAlmostEqual(8.41606015456e-05, inelasticScatter[(16, 2)]) self.assertAlmostEqual(3.23279074621e-08, inelasticScatter[(17, 2)]) self.assertAlmostEqual(1.96078691062e-08, inelasticScatter[(18, 2)]) self.assertAlmostEqual(1.18927703241e-08, inelasticScatter[(19, 2)]) self.assertAlmostEqual(7.21333170972e-09, inelasticScatter[(20, 2)]) self.assertAlmostEqual(3.66581343059e-09, inelasticScatter[(21, 2)]) self.assertAlmostEqual(3.81337583732e-09, inelasticScatter[(22, 2)]) self.assertAlmostEqual(1.35068589646e-09, inelasticScatter[(23, 2)]) self.assertAlmostEqual(3.96180976914e-10, inelasticScatter[(24, 2)]) self.assertAlmostEqual(4.85626551381e-05, n2nScatter[(1, 0)]) self.assertAlmostEqual(4.61509245042e-07, n2nScatter[(1, 1)]) self.assertAlmostEqual(9.67319720075e-05, n2nScatter[(2, 1)]) self.assertAlmostEqual(3.39554608217e-05, n2nScatter[(16, 1)]) self.assertAlmostEqual(1.12633460958e-05, n2nScatter[(17, 1)]) self.assertAlmostEqual(6.964501722e-07, n2nScatter[(18, 1)]) pu239 = self.lib["PU39AA"] elasticScatter = pu239.micros.elasticScatter inelasticScatter = pu239.micros.inelasticScatter n2nScatter = pu239.micros.n2nScatter self.assertAlmostEqual(1.7445316189e-05, n2nScatter[(1, 0)]) self.assertAlmostEqual(4.12698773289e-06, n2nScatter[(17, 1)]) self.assertAlmostEqual(6.80282767007e-07, n2nScatter[(1, 1)]) self.assertAlmostEqual(1.56137302838e-05, n2nScatter[(16, 1)]) self.assertAlmostEqual(9.7953477507e-07, n2nScatter[(18, 1)]) self.assertAlmostEqual(0.000104939324956, n2nScatter[(2, 1)]) self.assertAlmostEqual(0.0206335708499, elasticScatter[(2, 1)]) self.assertAlmostEqual(0.000585122266784, inelasticScatter[(2, 0)]) self.assertAlmostEqual(0.0352461636066, inelasticScatter[(2, 1)]) self.assertAlmostEqual(0.457990020514, inelasticScatter[(2, 2)]) self.assertAlmostEqual(1.16550609164e-07, n2nScatter[(19, 1)]) self.assertAlmostEqual(5.22556074429e-05, inelasticScatter[(16, 2)]) # the code below is very useful for generating the above test information """ for key, xs in pu239Scatter.items(): mk = max(key[1:]) if len(key) == 5 and 1 in key and 2 in key and (mk <= 2 or mk > 15): print ('self.assertAlmostEqual({}, pu239.micros[{}])' .format(xs, key)) """ def test_getMicroXS(self): """Check whether getMicroXS method returns the correct cross sections for the input nuclide.""" u235Nuc = self.lib["U235AA"] for i in range(self.lib.numGroups): refFissionXS = u235Nuc.micros.fission[i] curFissionXS = u235Nuc.getMicroXS("fission", i) self.assertAlmostEqual(refFissionXS, curFissionXS) # error raised if you attempt a bad group index with self.assertRaises(IndexError): u235Nuc.getMicroXS("fission", -999) # zero returned if you try to grab a non-existent interaction self.assertEqual(u235Nuc.getMicroXS("fake", 1), 0) def test_getXS(self): u235Nuc = self.lib["U235AA"] refFission = u235Nuc.micros.fission curFission = u235Nuc.getXS("fission") self.assertAlmostEqual(len(refFission), len(curFission)) self.assertAlmostEqual(refFission[0], curFission[0]) self.assertAlmostEqual(refFission[1], curFission[1]) ================================================ FILE: armi/nuclearDataIO/xsCollections.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Cross section collections contain cross sections for a single nuclide or region. Specifically, they are used as attributes of :py:class:`~armi.nuclearDataIO.xsNuclides.XSNuclide`, which then are combined as a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`. These may represent microscopic or macroscopic neutron or photon cross sections. When they are macroscopic, they generally represent a whole region with many nuclides, though this is not required. See Also -------- armi.nuclearDataIO.xsCollection.XSCollection : object that gets created. Examples -------- # creating a MicroscopicXSCollection by loading one from ISOTXS. microLib = armi.nuclearDataIO.ISOTXS('ISOTXS') micros = myLib.nuclides['U235AA'].micros # creating macroscopic XS: mc = MacroscopicCrossSectionCreator() macroCollection = mc.createMacrosFromMicros(microLib, block) blocksWithMacros = mc.createMacrosOnBlocklist(microLib, blocks) """ import numpy as np from scipy import sparse from armi import runLog from armi.utils import properties, units # Basic cross-section types that are represented by a 1-D vector in the multigroup approximation # No one is particularly proud of these names...we can claim # they have some origin in the ISOTXS file format card 04 definition # fmt: off NGAMMA = "nGamma" # radiative capture NALPHA = "nalph" # (n, alpha) NP = "np" # (n, proton) ND = "nd" # (n, deuteron) NT = "nt" # (n, triton) FISSION_XS = "fission" # (n, fission) N2N_XS = "n2n" # (n,2n) NUSIGF = "nuSigF" NU = "neutronsPerFission" # fmt: on CAPTURE_XS = [NGAMMA, NALPHA, NP, ND, NT] # Cross section types that are represented by 2-D matrices in the multigroup approximation BASIC_SCAT_MATRIX = ["elasticScatter", "inelasticScatter", "n2nScatter"] OTHER_SCAT_MATRIX = ["totalScatter", "elasticScatter1stOrder"] HIGHORDER_SCATTER = "higherOrderScatter" # Subset of vector xs used to evaluate absorption cross-section ABSORPTION_XS = CAPTURE_XS + [FISSION_XS, N2N_XS] # Subset of vector xs evaluated by _convertBasicXS BASIC_XS = ABSORPTION_XS + [NUSIGF] # Subset vector xs that are derived from basic cross sections DERIVED_XS = ["absorption", "removal"] # Total and transport are treated differently since they are 2D (can have multiple moments) TOTAL_XS = ["total", "transport"] # Subset of all basic cross sections that include removal and scattering ALL_XS = BASIC_XS + BASIC_SCAT_MATRIX + OTHER_SCAT_MATRIX + DERIVED_XS + TOTAL_XS # All xs collection data ALL_COLLECTION_DATA = ALL_XS + [ "chi", NU, "strpd", HIGHORDER_SCATTER, "diffusionConstants", ] E_CAPTURE = "ecapt" E_FISSION = "efiss" class XSCollection: """A cross section collection.""" _zeroes = {} """ A dict of numpy arrays set to the size of XSLibrary.numGroups. This is used to initialize cross sections which may not exist for the specific nuclide. Consequently, there should never be a situation where a cross section does not exist. In addition, they are all pointers to the same array, so we're not generating too much unnecessary data. Notes ----- This is a dict so that it can store multiple 0_g "matrices", i.e. vectors. Realistically, during any given run there will only be a set of groups, e.g. 33. """ @classmethod def getDefaultXs(cls, numGroups): default = cls._zeroes.get(numGroups, None) if default is None: default = np.zeros(numGroups) cls._zeroes[numGroups] = default return default def __init__(self, parent): """ Construct a NuclideCollection. Parameters ---------- parent : object The parent container, which may be a region, a nuclide, a block, etc. """ self.numGroups = None self.transport = None self.total = None self.nGamma = None self.fission = None self.neutronsPerFission = None self.chi = None self.nalph = None self.np = None self.n2n = None self.nd = None self.nt = None self.strpd = None self.elasticScatter = None self.inelasticScatter = None self.n2nScatter = None self.elasticScatter1stOrder = None self.totalScatter = None self.absorption = None self.diffusionConstants = None self.removal = None self.nuSigF = None self.higherOrderScatter = {} self.source = "{}".format(parent) def __getitem__(self, key): """ Access cross sections by key string (e.g. micros['fission'] = micros.fission. Notes ----- These containers were originally dicts, but upgraded to objects with numpy values as specialization was needed. This access method could/should be phased out. """ return self.__dict__[key] def __setitem__(self, key, value): self.__dict__[key] = value def get(self, key, default): try: return self[key] except (IndexError, KeyError, TypeError): return default def getAbsorptionXS(self): """Return total absorption XS, which is the sum of capture + fission + others.""" absXS = [ self.nGamma, self.fission, self.nalph, self.np, self.nd, self.nt, self.n2n, ] return absXS def getTotalScatterMatrix(self): """ Sum up scatter matrices to produce total scatter matrix. Multiply reaction-based n2n scatter matrix by 2.0 to convert to production-based. .. warning:: Not all lattice codes store (n,2n) matrices consistently. Some are production-based and some are absorption-based. If you use an absorption-based one, your scatter matrix will be off, generally leading to about a percent error in your neutron balance. Notes ----- The total scattering matrix is produced by summing the elastic, inelastic, and n2n scattering matrices. If a specific scattering matrix does not exist for a composition (nuclide or region) then it is skipped and a warning is displayed stating that the scattering reaction is not available and is not included in the total scattering matrix. Example: When producing macroscopic cross sections in MC2-3 the code internally merges the elastic and inelastic scattering matrices into a single elastic scattering matrix. """ scatters = [] totalScatterComponents = { "elastic": self.elasticScatter, "inelastic": self.inelasticScatter, "n2n": self.n2nScatter * 2.0, } for sType, sMatrix in totalScatterComponents.items(): if sMatrix is not None: scatters.append(sMatrix) else: runLog.warning( "{} scattering matrix in {} is not defined. Generating total scattering matrix" " without this data".format(sType.title(), self), single=True, ) return sum(scatters) def clear(self): """Zero out all the cross sections; this is useful for creating dummy cross sections.""" for xsAttr in ALL_XS: value = getattr(self, xsAttr) # it should either be a list, a numpy array, or a sparse matrix if isinstance(value, list): value = [0.0] * len(value) elif isinstance(value, np.ndarray): value = np.zeros(value.shape) elif value is None: # assume it is scipy.sparse pass elif value.nnz >= 0: value = sparse.csr_matrix(value.shape) setattr(self, xsAttr, value) # need to do the same thing for the higherOrderScatter for kk, currentMatrix in self.higherOrderScatter.items(): self.higherOrderScatter[kk] = sparse.csr_matrix(currentMatrix.shape) @staticmethod def collapseCrossSection(crossSection, weights): r""" Collapse a cross section into 1-group. This is extremely useful for many analyses such as doing a shielding efficacy survey or computing one-group reaction rates. .. math:: \bar{\sigma} = \frac{\sum_g{\sigma_g \phi_g}}{\sum_g{\phi_g}} Parameters ---------- crossSection : list Multigroup cross section values weights : list energy group weights to apply (usually the multigroup flux) Returns ------- oneGroupXS : float The one group cross section in the same units as the input cross section. """ mult = np.array(crossSection) * np.array(weights) return sum(mult) / sum(weights) def compare(self, other, flux, relativeTolerance=0, verbose=False, nucName=""): """Compare the cross sections between two XSCollections objects.""" nuclideIDMsg = f"Nuclide {nucName} " if nucName else "" equal = True for xsName in ALL_COLLECTION_DATA: myXsData = self.__dict__[xsName] theirXsData = other.__dict__[xsName] if xsName == HIGHORDER_SCATTER: for actualList, expectedList in zip(myXsData, theirXsData): if actualList != expectedList: equal = False runLog.important( " {}{} {:<30} cross section is different.".format( nuclideIDMsg, self.source, xsName, ) ) elif sparse.issparse(myXsData) and sparse.issparse(theirXsData): if not np.allclose( myXsData.todense(), theirXsData.todense(), rtol=relativeTolerance, atol=0.0, ): verboseData = "" if not verbose else "\n{},\n\n{}".format(myXsData, theirXsData) runLog.important( " {}{} {:<30} cross section is different.{}".format( nuclideIDMsg, self.source, xsName, verboseData ) ) equal = False elif isinstance(myXsData, dict) and myXsData != theirXsData: # there are no dicts currently so code is untested raise NotImplementedError("there are no dicts") elif not properties.areEqual(myXsData, theirXsData, relativeTolerance): verboseData = "" if not verbose else "\n{},\n\n{}".format(myXsData, theirXsData) runLog.important( " {}{} {:<30} cross section is different.{}".format(nuclideIDMsg, self.source, xsName, verboseData) ) equal = False return equal def merge(self, other): """ Merge the cross sections of two collections. Notes ----- 1. This can only merge if one hasn't been assigned at all, because it doesn't try to figure out how to account for overlapping cross sections. 2. Update the current library (self) with values from the other library if all attributes in the library except ones in `attributesToIgnore` are None. 3. Libraries are already merged if all attributes in the other library are None (This is nothing to merge!). """ attributesToIgnore = ["source", HIGHORDER_SCATTER] if all(v is None for k, v in self.__dict__.items() if k not in attributesToIgnore): self.__dict__.update(other.__dict__) # See note 2 elif all(v is None for k, v in other.__dict__.items() if k not in attributesToIgnore): pass # See note 3 else: overlappingAttrs = set(k for k, v in self.__dict__.items() if v is not None and k != "source") overlappingAttrs &= set(k for k, v in other.__dict__.items() if v is not None and k != "source") raise AttributeError( "Cannot merge {} and {}.\n Cross sections overlap in attributes: {}.".format( self.source, other.source, ", ".join(overlappingAttrs) ) ) class MacroscopicCrossSectionCreator: """ Create macroscopic cross sections from micros and number density. Object encapsulating all high-level methods related to the creation of macroscopic cross sections. """ def __init__(self, buildScatterMatrix=True, minimumNuclideDensity=0.0): self.densities = None self.macros = None self.micros = None self.minimumNuclideDensity = minimumNuclideDensity self.buildScatterMatrix = buildScatterMatrix self.block = None def createMacrosOnBlocklist(self, microLibrary, blockList, nucNames=None, libType="micros"): """Create macroscopic cross sections for a list of blocks.""" for block in blockList: block.macros = self.createMacrosFromMicros(microLibrary, block, nucNames, libType=libType) return blockList def createMacrosFromMicros(self, microLibrary, block, nucNames=None, libType="micros"): """ Creates a macroscopic cross section set based on a microscopic XS library using a block object. Micro libraries have lots of nuclides, but macros only have 1. Parameters ---------- microLibrary : xsCollection.XSCollection Input micros block : Block Object whose number densities should be used to generate macros nucNames : list, optional List of nuclides to include in the macros. Defaults to all in block. libType : str, optional The block attribute containing the desired microscopic XS for this block: either "micros" for neutron XS or "gammaXS" for gamma XS. Returns ------- macros : xsCollection.XSCollection A new XSCollection full of macroscopic cross sections """ runLog.debug("Building macroscopic cross sections for {0}".format(block)) if nucNames is None: nucNames = block.getNuclides() self.microLibrary = microLibrary self.block = block self.xsSuffix = block.getMicroSuffix() self.macros = XSCollection(parent=block) self.densities = dict( filter( lambda x: x[1] > self.minimumNuclideDensity, zip(nucNames, block.getNuclideNumberDensities(nucNames)), ) ) self.ng = getattr(self.microLibrary, "numGroups" + _getLibTypeSuffix(libType)) self._initializeMacros() self._convertBasicXS(libType=libType) self._computeAbsorptionXS() self._convertScatterMatrices(libType=libType) self._computeDiffusionConstants() self._buildTotalScatterMatrix() self._computeRemovalXS() self.macros.chi = computeBlockAverageChi(b=self.block, isotxsLib=self.microLibrary) return self.macros def _initializeMacros(self): m = self.macros for xsName in BASIC_XS + DERIVED_XS: setattr(m, xsName, np.zeros(self.ng)) for matrixName in BASIC_SCAT_MATRIX: # lil_matrices are good for indexing but bad for certain math operations. # use csr for faster math setattr(m, matrixName, sparse.csr_matrix((self.ng, self.ng))) def _convertBasicXS(self, libType="micros"): """ Converts basic XS such as fission, nGamma, etc. Parameters ---------- libType : str, optional The block attribute containing the desired microscopic XS for this block: either "micros" for neutron XS or "gammaXS" for gamma XS. """ reactions = BASIC_XS + TOTAL_XS if NUSIGF in reactions: reactions.remove(NUSIGF) self.macros[NUSIGF] = computeMacroscopicGroupConstants( FISSION_XS, self.densities, self.microLibrary, self.xsSuffix, libType=libType, multConstant=NU, ) for reaction in reactions: self.macros[reaction] = computeMacroscopicGroupConstants( reaction, self.densities, self.microLibrary, self.xsSuffix, libType=libType, ) def _convertScatterMatrices(self, libType="micros"): """ Build macroscopic scatter matrices. Parameters ---------- libType : str, optional The block attribute containing the desired microscopic XS for this block: either "micros" for neutron XS or "gammaXS" for gamma XS. """ if not self.buildScatterMatrix: return for nuclide in self.microLibrary.getNuclides(self.xsSuffix): microCollection = getattr(nuclide, libType) nDens = self.densities.get(nuclide.name, 0.0) if microCollection.elasticScatter is not None: self.macros.elasticScatter += microCollection.elasticScatter * nDens if microCollection.inelasticScatter is not None: self.macros.inelasticScatter += microCollection.inelasticScatter * nDens if microCollection.n2nScatter is not None: self.macros.n2nScatter += microCollection.n2nScatter * nDens def _computeAbsorptionXS(self): """ Absorption = sum of all absorption reactions. Must be called after :py:meth:`_convertBasicXS`. """ for absXS in self.macros.getAbsorptionXS(): self.macros.absorption += absXS def _computeDiffusionConstants(self): self.macros.diffusionConstants = 1.0 / (3.0 * self.macros.transport) def _buildTotalScatterMatrix(self): self.macros.totalScatter = self.macros.getTotalScatterMatrix() def _computeRemovalXS(self): """ Compute removal cross section (things that remove a neutron from this phase space). This includes all absorptions and outscattering. Outscattering is represented by columns of the total scatter matrix. Self-scattering (e.g. when g' == g) is not be included. This can be handled by summing the columns and then subtracting the diagonal. within-group n2n is accounted for by simply not including n2n in the removal xs. """ self.macros.removal = self.macros.absorption - self.macros.n2n columnSum = self.macros.totalScatter.sum(axis=0).getA1() # convert to ndarray diags = self.macros.totalScatter.diagonal() self.macros.removal += columnSum - diags # ruff: noqa: E501 def computeBlockAverageChi(b, isotxsLib): r""" Return the block average total chi vector based on isotope chi vectors. This is defined by eq 3.4b in DIF3D manual [DIF3D]_, which corresponds to 1 in A.HMG4C card. .. math:: \chi_g = \frac{\sum_{n} \chi_{g,n} N_n V \sum_{g'}(\nu_{g'}*\sigma_{f,g'})}{\sum_n N_n V \sum_{g'}(\nu_{g'}*\sigma_{f,g'} )} To evaluate efficiently, assume that if :math:`\chi_{g,n}=0`, there will be no contributions Volume is not used b/c it is already homogenized in the block. Parameters ---------- b : object Block object isotxsLib : object ISOTXS library object Notes ----- This methodology is based on option 1 in the HMG4C utility (named total fission source weighting). """ numGroups = isotxsLib.numGroups numerator = np.zeros(numGroups) denominator = 0.0 numberDensities = b.getNumberDensities() for nucObj in isotxsLib.getNuclides(b.getMicroSuffix()): nucMicroXS = nucObj.micros nucNDens = numberDensities.get(nucObj.name, 0.0) nuFissionTotal = sum(nucMicroXS.neutronsPerFission * nucMicroXS.fission) numerator += nucMicroXS.chi * nucNDens * nuFissionTotal denominator += nucNDens * nuFissionTotal if denominator != 0.0: return numerator / denominator else: return np.zeros(numGroups) def _getLibTypeSuffix(libType): if libType == "micros": libTypeSuffix = "" elif libType == "gammaXS": libTypeSuffix = "Gamma" else: libTypeSuffix = None runLog.warning( 'ARMI currently supports only micro XS libraries of types "micros" (neutron) and "gammaXS" (gamma).' ) return libTypeSuffix def computeNeutronEnergyDepositionConstants(numberDensities, lib, microSuffix): """ Compute the macroscopic neutron energy deposition group constants. These group constants can be multiplied by the flux to obtain energy deposition rates. Parameters ---------- numberDensities : dict nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method. lib : library object Microscopic cross section library. microSuffix : str Microscopic library suffix (e.g. 'AB') for this composite. See composite `getMicroSuffix` method. Returns ------- energyDepositionConsts : np.ndarray Neutron energy deposition group constants. (J/cm) Notes ----- PMATRX documentation says units will be eV/s when multiplied by flux but it's eV/s/cm^3. (eV/s/cm^3 = eV-bn * 1/cm^2/s * 1/bn-cm.) Converted here to obtain J/cm (eV-bn * 1/bn-cm * J / eV) """ return computeMacroscopicGroupConstants("neutronHeating", numberDensities, lib, microSuffix) * units.JOULES_PER_eV def computeGammaEnergyDepositionConstants(numberDensities, lib, microSuffix): """ Compute the macroscopic gamma energy deposition group constants. These group constants can be multiplied by the flux to obtain energy deposition rates. Parameters ---------- numberDensities : dict nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method. lib : library object Microscopic cross section library. microSuffix : str Microscopic library suffix (e.g. 'AB') for this composite. See composite `getMicroSuffix` method. Returns ------- energyDepositionConsts : np.ndarray gamma energy deposition group constants. (J/cm) Notes ----- PMATRX documentation says units will be eV/s when multiplied by flux but it's eV/s/cm^3. (eV/s/cm^3 = eV-bn * 1/cm^2/s * 1/bn-cm.) Convert here to obtain J/cm (eV-bn * 1/bn-cm * J / eV) """ return computeMacroscopicGroupConstants("gammaHeating", numberDensities, lib, microSuffix) * units.JOULES_PER_eV def computeFissionEnergyGenerationConstants(numberDensities, lib, microSuffix): r""" Get the fission energy generation group constant of a block. .. math:: E_{generation_fission} = \kappa_f \Sigma_f Power comes from fission and capture reactions. Parameters ---------- numberDensities : dict nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method. lib : library object Microscopic cross section library. microSuffix : str Microscopic library suffix (e.g. 'AB') for this composite. See composite `getMicroSuffix` method. Returns ------- fissionEnergyFactor: np.ndarray Fission energy generation group constants (in Joules/cm) """ fissionEnergyFactor = computeMacroscopicGroupConstants( FISSION_XS, numberDensities, lib, microSuffix, libType="micros", multConstant=E_FISSION, ) return fissionEnergyFactor def computeCaptureEnergyGenerationConstants(numberDensities, lib, microSuffix): r""" Get the energy generation group constant of a block. .. math:: E_{generation capture} = \kappa_c \Sigma_c Typically, one only cares about the flux* this XS (to find total power), but the XS itself is required in some sensitivity studies. Power comes from fission and capture reactions. Parameters ---------- numberDensities : dict nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which the macroscopic group constants are computed. See composite `getNumberDensities` method. lib : library object Microscopic cross section library. microSuffix : str Microscopic library suffix (e.g. 'AB') for this composite. See composite `getMicroSuffix` method. Returns ------- captureEnergyFactor: np.ndarray Capture energy generation group constants (in Joules/cm) """ captureEnergyFactor = None for xs in CAPTURE_XS: if captureEnergyFactor is None: captureEnergyFactor = np.zeros( np.shape(computeMacroscopicGroupConstants(xs, numberDensities, lib, microSuffix, libType="micros")) ) captureEnergyFactor += computeMacroscopicGroupConstants( xs, numberDensities, lib, microSuffix, libType="micros", multConstant=E_CAPTURE, ) return captureEnergyFactor def computeMacroscopicGroupConstants( constantName, numberDensities, lib, microSuffix, libType=None, multConstant=None, multLib=None, ): r""" Compute any macroscopic group constants given number densities and a microscopic library. .. impl:: Compute macroscopic cross sections from microscopic cross sections and number densities. :id: I_ARMI_NUCDATA_MACRO :implements: R_ARMI_NUCDATA_MACRO This function computes the macroscopic cross sections of a specified reaction type from inputted microscopic cross sections and number densities. The ``constantName`` parameter specifies what type of reaction is requested. The ``numberDensities`` parameter is a dictionary mapping the nuclide to its number density. The ``lib`` parameter is a library object like :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` or :py:class:`~armi.nuclearDataIO.xsLibraries.CompxsLibrary` that holds the microscopic cross-section data. The ``microSuffix`` parameter specifies from which part of the library the microscopic cross sections are gathered; this is typically gathered from a components ``getMicroSuffix`` method like :py:meth:`Block.getMicroSuffix <armi.reactor.blocks.Block.getMicroSuffix>`. ``libType`` is an optional parameter specifying whether the reaction is for neutrons or gammas. This function also has the optional parameters ``multConstant`` and ``multLib``, which allows another constant from the library, such as neutrons per fission (nu) or energy per fission (kappa), to be multiplied to the primary one. The macroscopic cross sections are then computed as: .. math:: \Sigma_{g} = \sum_{n} N_n \sigma_{n,g}\nu_n \quad g=1,...,G where :math:`n` is the isotope index, :math:`g` is the energy group index, :math:`\sigma` is the microscopic cross section, and :math:`\nu` is the scalar multiplier. If the library (``lib``) with suffix ``microSuffix`` is missing a cross section for the ``constantName`` reaction for one or more of the nuclides in ``numberDensities`` an error is raised; but if ``multConstant`` is missing that cross section, then those nuclides are printed as a warning. Parameters ---------- constantName : str Name of the reaction for which to obtain the group constants. This name should match a cross section name or an attribute in the collection. numberDensities : dict nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method. lib : library object Microscopic cross section library. microSuffix : str Microscopic library suffix (e.g. 'AB') for this composite. See composite `getMicroSuffix` method. libType : str, optional The block attribute containing the desired microscopic XS for this block: either "micros" for neutron XS or "gammaXS" for gamma XS. multConstant : str, optional Name of constant by which the group constants will be multiplied. This name should match a cross section name or an attribute in the collection. multLib : library object, optional Microscopic cross section nuclide library to obtain the multiplier from. If None, same library as base cross section is used. Returns ------- macroGroupConstant : np.ndarray Macroscopic group constants for the requested reaction. """ skippedNuclides = [] skippedMultNuclides = [] macroGroupConstants = None # sort the numberDensities because a summation is being performed that may result in slight # differences based on the order. for nuclideName, numberDensity in sorted(numberDensities.items()): if not numberDensity: continue try: libNuclide = lib.getNuclide(nuclideName, microSuffix) multLibNuclide = libNuclide except KeyError: skippedNuclides.append(nuclideName) # Nuclide does not exist in the library continue if multLib: try: multLibNuclide = multLib.getNuclide(nuclideName, microSuffix) except KeyError: skippedMultNuclides.append(nuclideName) # Nuclide does not exist in the library continue microGroupConstants = _getMicroGroupConstants(libNuclide, constantName, nuclideName, libType) multiplierVal = _getXsMultiplier(multLibNuclide, multConstant, libType) if macroGroupConstants is None: macroGroupConstants = np.zeros(microGroupConstants.shape) if microGroupConstants.shape != macroGroupConstants.shape and not microGroupConstants.any(): microGroupConstants = np.zeros(macroGroupConstants.shape) macroGroupConstants += np.asarray(numberDensity) * microGroupConstants * multiplierVal if skippedNuclides: msg = "The following nuclides are not in microscopic library {}: {}".format(lib, skippedNuclides) runLog.error(msg, single=True) raise ValueError(msg) if skippedMultNuclides: runLog.debug( "The following nuclides are not in multiplier library {}: {}".format(multLib, skippedMultNuclides), single=True, ) return macroGroupConstants def _getXsMultiplier(libNuclide, multiplier, libType): if multiplier: try: microCollection = getattr(libNuclide, libType) multiplierVal = getattr(microCollection, multiplier) except Exception: multiplierVal = libNuclide.isotxsMetadata[multiplier] else: multiplierVal = 1.0 return np.asarray(multiplierVal) def _getMicroGroupConstants(libNuclide, constantName, nuclideName, libType): if libType: microCollection = getattr(libNuclide, libType) else: microCollection = libNuclide microGroupConstants = np.asarray(getattr(microCollection, constantName)) if not microGroupConstants.any(): runLog.debug( "Nuclide {} does not have {} microscopic group constants.".format(nuclideName, constantName), single=True, ) return microGroupConstants ================================================ FILE: armi/nuclearDataIO/xsLibraries.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Cross section library objects. Cross section libraries, currently, contain neutron and/or gamma cross sections, but are not necessarily intended to be only neutron and gamma data. """ import glob import os import re from armi import runLog from armi.nucDirectory import nuclideBases from armi.nuclearDataIO.nuclearFileMetadata import NuclideXSMetadata, RegionXSMetadata from armi.utils import properties _ISOTXS_EXT = "ISO" def compare(lib1, lib2): """Compare two XSLibraries, and return True if equal, or False if not.""" from armi.nuclearDataIO.cccc import gamiso, isotxs, pmatrx equal = True # check the nuclides equal &= _checkLabels(lib1, lib2) equal &= _checkLabels(lib2, lib1) equal &= isotxs.compare(lib1, lib2) equal &= gamiso.compare(lib1, lib2) equal &= pmatrx.compare(lib1, lib2) return equal def _checkLabels(llib1, llib2): mismatch = set(llib1.nuclideLabels) - set(llib2.nuclideLabels) if any(mismatch): runLog.important("{} has nuclides that are not in {}: {}".format(llib1, llib2, mismatch)) return False return True def compareXSLibraryAttribute(lib1, lib2, attributeName, tolerance=0.0): """Compare the values of an attribute in two libraries.""" val1 = getattr(lib1, "_" + attributeName, None) val2 = getattr(lib2, "_" + attributeName, None) if not properties.areEqual(val1, val2, tolerance): runLog.important( "{} and {} have different `{}` attributes:\n{}\n{}".format(lib1, lib2, attributeName, val1, val2) ) return False return True def compareLibraryNeutronEnergies(lib1, lib2, tolerance=0.0): """Compare the neutron velocities and energy upper bounds for two libraries.""" equals = True equals &= compareXSLibraryAttribute(lib1, lib2, "neutronEnergyUpperBounds", tolerance) equals &= compareXSLibraryAttribute(lib1, lib2, "neutronVelocities", tolerance) return equals def getSuffixFromNuclideLabel(nucLabel): """ Return the xs suffix for the nuclide label. Parameters ---------- nucLabel: str A string representing the nuclide and xs suffix, eg, "U235AA" Returns ------- suffix: str The suffix of this string """ return nucLabel[-2:] def getISOTXSLibrariesToMerge(xsLibrarySuffix, xsLibFileNames): """ Find ISOTXS libraries out of a list that should be merged based on the provided ``xsLibrarySuffix``. Parameters ---------- xsLibrarySuffix : str XS library suffix is used to determine which ISOTXS files should be merged together. This can be an empty string or be something like `-doppler`. xsLibFileNames : list A list of library file paths like ISOAA, ISOBA, ISOCA, etc. Can be a standalone file name or a full path. Notes ----- Files that exist: ISOAA-n1, ISOAB-n1, ISOAA-n2, ISOAB-n2, ISOAA, ISOAB, ISODA, ISOBA. xsLibrarySuffix: 'n2' Results: ISOAA-n2, ISOAB-n2 """ isosToMerge = [ iso for iso in xsLibFileNames if "ISOTXS" not in iso # Skip merged ISOTXS file and ".ascii" not in iso # Skip BCD/ascii files and "BCD" not in iso ] # Skip BCD/ascii files if xsLibrarySuffix != "": isosWithSuffix = [iso for iso in isosToMerge if re.match(f".*ISO[A-Za-z]{{2}}F?{xsLibrarySuffix}$", iso)] isosToMerge = [ iso for iso in isosToMerge if "-" not in os.path.basename(iso) and not any(os.path.basename(iso) == os.path.basename(iws).split("-")[0] for iws in isosWithSuffix) ] isosToMerge += isosWithSuffix else: isosToMerge = [iso for iso in isosToMerge if "-" not in os.path.basename(iso)] return isosToMerge def mergeXSLibrariesInWorkingDirectory( lib, xsLibrarySuffix="", mergeGammaLibs=False, alternateDirectory=None, ): """ Merge neutron (ISOTXS) and gamma (GAMISO/PMATRX) library data into the provided library. Notes ----- Convention is for fuel XS id to come first alphabetically (A, B, C, etc.) and then be followed by non-fuel. This should allow `referenceDummyNuclide` to be defined before it is needed by a non-fuel cross section, but if the convention is not followed then this could cause an issue. Parameters ---------- lib : obj ISOTXS library object xsLibrarySuffix : str, optional XS library suffix used to determine which ISOTXS files are merged together, typically something like `-doppler`. If empty string, will merge everything without suffix (indicated by a `-`). mergeGammaLibs : bool, optional If True, the GAMISO and PMATRX files that correspond to the ISOTXS library will be merged. Note: if these files do not exist this will fail. alternateDirectory : str, optional An alternate directory in which to search for files other than the working directory. The main purpose of this is for testing, but it could also be useful to users. """ from armi import nuclearDataIO from armi.nuclearDataIO.cccc import gamiso, isotxs, pmatrx baseDir = alternateDirectory or os.getcwd() globPath = os.path.join(baseDir, _ISOTXS_EXT + "*") xsLibFiles = getISOTXSLibrariesToMerge(xsLibrarySuffix, [iso for iso in glob.glob(globPath)]) librariesToMerge = [] neutronVelocities = {} # Dictionary of neutron velocities from each ISOTXS file referenceDummyNuclides = None for xsLibFilePath in sorted(xsLibFiles): try: # get XS ID from the cross section library name xsID = re.search("ISO([A-Z0-9a-z]{2})", xsLibFilePath).group(1) except AttributeError: # if glob has matched something that is not actually an ISOXX file, # the .group() call will fail runLog.debug(f"Ignoring file {xsLibFilePath} in the merging of ISOXX files") continue xsFileTypes = "ISOTXS" if not mergeGammaLibs else "ISOTXS, GAMISO, and PMATRX" runLog.info("Retrieving {} data for XS ID {}{}".format(xsFileTypes, xsID, xsLibrarySuffix)) if xsLibFilePath in lib.isotxsMetadata.fileNames: runLog.extra("Skipping merge of {} because data already exists in the library".format(xsLibFilePath)) continue neutronLibrary = isotxs.readBinary(xsLibFilePath) neutronVelocities[xsID] = neutronLibrary.neutronVelocity dummyNuclidesInNeutron = [ nuc for nuc in neutronLibrary.nuclides if isinstance(nuc._base, nuclideBases.DummyNuclideBase) ] if not dummyNuclidesInNeutron: runLog.info(f"Adding dummy nuclides to library {xsID}") addedDummyData = isotxs.addDummyNuclidesToLibrary( neutronLibrary, referenceDummyNuclides ) # Add DUMMY nuclide data not produced by MC2-3 isotxsLibraryPath = os.path.join( baseDir, nuclearDataIO.getExpectedISOTXSFileName(suffix=xsLibrarySuffix, xsID=xsID), ) isotxsDummyPath = isotxsLibraryPath isotxs.writeBinary(neutronLibrary, isotxsDummyPath) neutronLibraryDummyData = isotxs.readBinary(isotxsDummyPath) librariesToMerge.append(neutronLibraryDummyData) dummyNuclidesInNeutron = referenceDummyNuclides else: librariesToMerge.append(neutronLibrary) if not referenceDummyNuclides: referenceDummyNuclides = dummyNuclidesInNeutron if mergeGammaLibs: gamisoLibraryPath = os.path.join( baseDir, nuclearDataIO.getExpectedGAMISOFileName(suffix=xsLibrarySuffix, xsID=xsID), ) pmatrxLibraryPath = os.path.join( baseDir, nuclearDataIO.getExpectedPMATRXFileName(suffix=xsLibrarySuffix, xsID=xsID), ) # Check if the gamiso and pmatrx data paths exist with the xs library suffix so that # these are merged in. If they don't both exist then that is OK and we can just # revert back to expecting the files just based on the XS ID. if not (os.path.exists(gamisoLibraryPath) and os.path.exists(pmatrxLibraryPath)): runLog.warning( "One of GAMISO or PMATRX data exist for " f"XS ID {xsID} with suffix {xsLibrarySuffix}. " "Attempting to find GAMISO/PMATRX data with " f"only XS ID {xsID} instead." ) gamisoLibraryPath = os.path.join(baseDir, nuclearDataIO.getExpectedGAMISOFileName(xsID=xsID)) pmatrxLibraryPath = os.path.join(baseDir, nuclearDataIO.getExpectedPMATRXFileName(xsID=xsID)) # GAMISO data gammaLibrary = gamiso.readBinary(gamisoLibraryPath) addedDummyData = gamiso.addDummyNuclidesToLibrary( gammaLibrary, dummyNuclidesInNeutron ) # Add DUMMY nuclide data not produced by MC2-3 if addedDummyData: gamisoDummyPath = gamisoLibraryPath gamiso.writeBinary(gammaLibrary, gamisoDummyPath) gammaLibraryDummyData = gamiso.readBinary(gamisoDummyPath) librariesToMerge.append(gammaLibraryDummyData) else: librariesToMerge.append(gammaLibrary) # PMATRX data pmatrxLibrary = pmatrx.readBinary(pmatrxLibraryPath) addedDummyData = pmatrx.addDummyNuclidesToLibrary( pmatrxLibrary, dummyNuclidesInNeutron ) # Add DUMMY nuclide data not produced by MC2-3 if addedDummyData: pmatrxDummyPath = pmatrxLibraryPath pmatrx.writeBinary(pmatrxLibrary, pmatrxDummyPath) pmatrxLibraryDummyData = pmatrx.readBinary(pmatrxDummyPath) librariesToMerge.append(pmatrxLibraryDummyData) else: librariesToMerge.append(pmatrxLibrary) for library in librariesToMerge: lib.merge(library) return neutronVelocities class _XSLibrary: """Parent class for Isotxs and Compxs library objects.""" neutronEnergyUpperBounds = properties.createImmutableProperty( "neutronEnergyUpperBounds", "an ISOTXS", "Get or set the neutron energy groups." ) neutronVelocity = properties.createImmutableProperty( "neutronVelocity", "an ISOTXS", "Get or set the mean neutron velocity in cm/s." ) def __init__(self): # each element is a string such as U235AA self._orderedNuclideLabels = [] def __contains__(self, key): return key in self._orderedNuclideLabels def __setitem__(self, key, value): if key in self._orderedNuclideLabels: raise AttributeError("{} already contains {}".format(self, key)) value.container = self self._orderedNuclideLabels.append(key) def __getitem__(self, key): raise NotImplementedError def __delitem__(self, key): self._orderedNuclideLabels.remove(key) def merge(self, other): raise NotImplementedError def __len__(self): return len(self._orderedNuclideLabels) def _mergeNeutronEnergies(self, other): self.neutronEnergyUpperBounds = other.neutronEnergyUpperBounds # neutron velocity changes, but just use the first one. if not hasattr(self, "_neutronVelocity"): self.neutronVelocity = other.neutronVelocity def items(self): for key in self._orderedNuclideLabels: yield (key, self[key]) class IsotxsLibrary(_XSLibrary): """ IsotxsLibrary objects are a collection of cross sections (XS) for both neutron and gamma reactions. IsotxsLibrary objects must be initialized with data through one of the read methods within this package See Also -------- :py:func:`armi.nuclearDataIO.cccc.isotxs.readBinary` :py:func:`armi.nuclearDataIO.cccc.gamiso.readBinary` :py:func:`armi.nuclearDataIO.cccc.pmatrx.readBinary` :py:class:`CompxsLibrary` Examples -------- >>> lib = xsLibraries.IsotxsLibrary() >>> # this doesn't have any information yet, we can read ISOTXS information >>> libIsotxs = isotxs.readBinary("ISOAA") >>> # any number of XSLibraries can be merged >>> lib.merge(libIsotxs) # now the `lib` contains the ISOAA information. """ def __init__(self): _XSLibrary.__init__(self) self.pmatrxMetadata = NuclideXSMetadata() self.isotxsMetadata = NuclideXSMetadata() self.gamisoMetadata = NuclideXSMetadata() # keys are nuclide labels such as U235AA # vals are XSNuclide objects self._nuclides = {} self._scatterWeights = {} gammaEnergyUpperBounds = properties.createImmutableProperty( "gammaEnergyUpperBounds", "a PMATRX or GAMISO", "Get or set the gamma energy groups.", ) neutronDoseConversionFactors = properties.createImmutableProperty( "neutronDoseConversionFactors", "a PMATRX", "Get or set the neutron dose conversion factors.", ) gammaDoseConversionFactors = properties.createImmutableProperty( "gammaDoseConversionFactors", "a PMATRX", "Get or set the gamma does conversion factors.", ) @property def numGroups(self): """Get the number of neutron energy groups.""" # This unlocks the immutable property so that it can be # read prior to not being set to check the number of groups # that are defined. If the property is not unlocked before # accessing when it has not yet been defined then an exception # is thrown. properties.unlockImmutableProperties(self) if self.neutronEnergyUpperBounds is not None: energyBounds = self.neutronEnergyUpperBounds else: energyBounds = [] # Make sure to re-lock the properties after we are done. properties.lockImmutableProperties(self) return len(energyBounds) @property def numGroupsGamma(self): """Get the number of gamma energy groups.""" # This unlocks the immutable property so that it can be # read prior to not being set to check the number of groups # that are defined. If the property is not unlocked before # accessing when it has not yet been defined then an exception # is thrown. properties.unlockImmutableProperties(self) if self.gammaEnergyUpperBounds is not None: energyBounds = self.gammaEnergyUpperBounds else: energyBounds = [] # Make sure to re-lock the properties after we are done. properties.lockImmutableProperties(self) return len(energyBounds) @property def xsIDs(self): """ Get the XS ID's present in this library. Assumes the suffixes are the last 2 letters in the nucNames """ return list(set(getSuffixFromNuclideLabel(name) for name in self.nuclideLabels)) def __repr__(self): isotxs = bool(self.isotxsMetadata.keys()) pmatrx = bool(self.pmatrxMetadata.keys()) gamiso = bool(self.gamisoMetadata.keys()) groups = "" if self.numGroups: groups += f"Neutron groups: {self.numGroups}, " if self.numGroupsGamma: groups += f"Gamma groups: {self.numGroupsGamma}," return ( f"<IsotxsLibrary (id:{id(self)}), " f"ISOTXS: {isotxs}, PMATRX: {pmatrx}, GAMISO: {gamiso}, " f"{groups} containing {len(self)} nuclides with " f"XS IDs: {sorted(self.xsIDs)}>" ) def __setitem__(self, key, value): _XSLibrary.__setitem__(self, key, value) self._nuclides[key] = value def __getitem__(self, key): return self._nuclides[key] def get(self, nuclideLabel, default): return self._nuclides.get(nuclideLabel, default) def getNuclide(self, nucName, suffix): """ Get a nuclide object from the XS library. Parameters ---------- nucName : str ARMI nuclide name, e.g. 'U235', 'PU239' suffix : str Restrict to a specific nuclide lib suffix e.g. 'AA' Returns ------- nuclide : Nuclide object A nuclide from the library or None """ libLabel = nuclideBases.byName[nucName].label + suffix try: return self[libLabel] except KeyError: runLog.error("Error in {}.\nSee stderr.".format(self)) raise def __delitem__(self, key): _XSLibrary.__delitem__(self, key) del self._nuclides[key] @property def nuclideLabels(self): """Get the nuclide Names.""" # need to create a new list so the _orderedNuclideLabels does not get modified. return list(self._orderedNuclideLabels) @property def nuclides(self): return [self[name] for name in self._orderedNuclideLabels] def getNuclides(self, suffix): """Returns a list of the nuclide objects in the library.""" nucs = [] # nucName is U235IA, etc.. nuc.name is U235, etc for nucLabel, nuc in self.items(): # `in` used below for support of >26 xs groups if not suffix or suffix in getSuffixFromNuclideLabel(nucLabel): # accept things with the suffix if one is given if nuc not in nucs: nucs.append(nuc) return nucs def merge(self, other): """Merge two XSLibraries.""" runLog.debug("Merging XS library {} into XS library {}".format(other, self)) self._mergeProperties(other) # merging meta data may raise an exception before knowing anything about the contained nuclides # if it raises an exception, nothing has been modified in two objects isotxsMeta, pmatrxMeta, gamisoMeta = self._mergeMetadata(other) self._mergeNuclides(other) # only vampire the __dict__ if successful other.__dict__ = {} # only reassign metadata if successful self.isotxsMetadata = isotxsMeta self.pmatrxMetadata = pmatrxMeta self.gamisoMetadata = gamisoMeta def _mergeProperties(self, other): properties.unlockImmutableProperties(other) try: self.neutronDoseConversionFactors = other.neutronDoseConversionFactors self._mergeNeutronEnergies(other) self.gammaEnergyUpperBounds = other.gammaEnergyUpperBounds self.gammaDoseConversionFactors = other.gammaDoseConversionFactors finally: properties.lockImmutableProperties(other) def _mergeMetadata(self, other): isotxsMeta = self.isotxsMetadata.merge(other.isotxsMetadata, self, other, "ISOTXS", OSError) pmatrxMeta = self.pmatrxMetadata.merge(other.pmatrxMetadata, self, other, "PMATRX", OSError) gamisoMeta = self.gamisoMetadata.merge(other.gamisoMetadata, self, other, "GAMISO", OSError) return isotxsMeta, pmatrxMeta, gamisoMeta def _mergeNuclides(self, other): # these must be different for nuclideKey, nuclide in other.items(): if nuclideKey in self: self[nuclideKey].merge(nuclide) else: self[nuclideKey] = nuclide def resetScatterWeights(self): self._scatterWeights = {} def getScatterWeights(self, scatterMatrixKey="elasticScatter"): """ Build or retrieve pre-built scatter weight data. This acts like a cache for _buildScatterWeights See Also -------- _buildScatterWeights """ if not self._scatterWeights.get(scatterMatrixKey): self._scatterWeights[scatterMatrixKey] = self._buildScatterWeights(scatterMatrixKey) return self._scatterWeights[scatterMatrixKey] def _buildScatterWeights(self, scatterMatrixKey): r""" Build a scatter-weight lookup table for the scatter matrix. Scatter "weights" are needed for sensitivity studies when deriviatives wrt the scatter XS are required. They are defined like: .. math:: w_{g^{\prime} \leftarrow g} = \frac{\sigma_{s,g^{\prime} \leftarrow g}} {\sum_{g^{\prime\prime}=1}^G \sigma_{s, g^{\prime\prime} \leftarrow g}} Returns ------- scatterWeights : dict (xsID, fromGroup) : weight column (sparse Gx1) """ runLog.info("Building {0} weights on cross section library".format(scatterMatrixKey)) scatterWeights = {} for nucName, nuc in self.items(): nucScatterWeights = nuc.buildNormalizedScatterColumns(scatterMatrixKey) for fromG, scatterColumn in nucScatterWeights.items(): scatterWeights[nucName, fromG] = scatterColumn return scatterWeights def purgeFissionProducts(self, r): """ Purge the fission products based on the active nuclides within the reactor. Parameters ---------- r : py:class:`armi.reactors.reactor.Reactor` a reactor, or None .. warning:: Sometimes worker nodes do not have a reactor, fission products will not be purged. """ runLog.info("Purging detailed fission products from {}".format(self)) modeledNucs = r.blueprints.allNuclidesInProblem for key, nuc in list(self.items()): if nuc.name not in modeledNucs: del self[key] class CompxsLibrary(_XSLibrary): """ Library object used in reading/writing COMPXS files. Contains macroscopic cross sections for homogenized regions. See Also -------- :py:class:`IsotxsLibrary` :py:func:`armi.nuclearDataIO.cccc.compxs.readBinary` Examples -------- >>> lib = compxs.readBinary("COMPXS") >>> lib.regions """ def __init__(self): _XSLibrary.__init__(self) self._regions = {} self.compxsMetadata = RegionXSMetadata() def __setitem__(self, key, value): _XSLibrary.__setitem__(self, key, value) self._regions[key] = value def __getitem__(self, key): return self._regions[key] def __delitem__(self, key): _XSLibrary.__delitem__(self, key) del self._regions[key] @property def regions(self): return [self[name] for name in self._orderedNuclideLabels] @property def regionLabels(self): return list(self._orderedNuclideLabels) def merge(self, other): """Merge two ``COMPXS`` libraries.""" self._mergeProperties(other) self.compxsMetadata = self.compxsMetadata.merge(other.compxsMetadata, self, other, "COMPXS", OSError) self._appendRegions(other) def _mergeProperties(self, other): properties.unlockImmutableProperties(other) try: self._mergeNeutronEnergies(other) finally: properties.lockImmutableProperties(other) def _appendRegions(self, other): offset = len(self.regions) for region in other.regions: newNumber = region.regionNumber + offset self[newNumber] = region self.compxsMetadata["numComps"] = len(self.regions) ================================================ FILE: armi/nuclearDataIO/xsNuclides.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" This module contains cross section nuclides, which are a wrapper around the :py:class:`~armi.nucDirectory.nuclideBases.INuclide` objects. The cross section nuclide objects contain cross section information from a specific calculation (e.g. neutron, or gamma cross sections). :py:class:`XSNuclide` objects also contain meta data from the original file, so that another file can be reconstructed. Warning ------- :py:class:`XSNuclide` objects should only be created by reading data into :py:class:`~armi.nuclearDataIO.xsLibrary.XSLibrary` objects, and then retrieving them through their label index (i.e. "PU39AA"). """ from armi.nucDirectory import nuclideBases from armi.nuclearDataIO import nuclearFileMetadata, xsCollections, xsLibraries from armi.utils.customExceptions import warn_when_root from armi.utils.plotting import plotScatterMatrix # noqa: F401 @warn_when_root def NuclideLabelDoesNotMatchNuclideLabel(nuclide, label, xsID): return "The label {} (xsID:{}) for nuclide {}, does not match the nucDirectory label.".format(label, xsID, nuclide) class XSNuclide(nuclideBases.NuclideWrapper): """ A nuclide object for a specific library. XSNuclide objects can contain GAMISO, ISOTXS, and PMATRX data all on a single instance. """ def __init__(self, xsCollection, xsCollectionKey): nuclideBases.NuclideWrapper.__init__(self, xsCollection, xsCollectionKey) self.xsId = xsLibraries.getSuffixFromNuclideLabel(xsCollectionKey) self.source = 0.0 # 2D record... nucNames # 4D record self.isotxsMetadata = nuclearFileMetadata.NuclideMetadata() self.gamisoMetadata = nuclearFileMetadata.NuclideMetadata() self.pmatrxMetadata = nuclearFileMetadata.NuclideMetadata() # 5D and 7D records self.micros = xsCollections.XSCollection(parent=self) self.gammaXS = xsCollections.XSCollection(parent=self) self.neutronHeating = None self.neutronDamage = None self.gammaHeating = None self.isotropicProduction = None self.linearAnisotropicProduction = None self.nOrderProductionMatrix = {} def updateBaseNuclide(self): """ Update which nuclide base this :py:class:`XSNuclide` points to. Notes ----- During instantiation, not everything is available, only they user-supplied nuclide label, i.e. :py:class:`~armi.nucDirectory.nuclideBases.NuclideWrapper.containerKey`. During the read operation, """ if self._base is not None: return # most nuclides have the correct NuclideBase ID nuclideId = self.isotxsMetadata["nuclideId"] nuclideBase = nuclideBases.byName.get(nuclideId, None) if nuclideBase is None or isinstance(nuclideBase, nuclideBases.DummyNuclideBase): # FP, DUMMY, DUMP nuclideBase = nuclideBases.byLabel.get(self.nucLabel, None) if nuclideBase is None: raise OSError("Could not determine NuclideBase for label {}".format(self.nucLabel)) if self.nucLabel != nuclideBase.label: NuclideLabelDoesNotMatchNuclideLabel(nuclideBase, self.nucLabel, self.xsId) nuclideBases.changeLabel(nuclideBase, self.nucLabel) self._base = nuclideBase def getMicroXS(self, interaction, group): """Returns the microscopic xs as the ISOTXS value if it exists or a 0 since it doesn't.""" if interaction in self.micros.__dict__: try: return self.micros[interaction][group] except IndexError: raise IndexError( "Group {0} not found in interaction {1} of nuclide {2}".format(group, interaction, self.name) ) else: return 0 def getXS(self, interaction): """Get the cross section of a particular interaction. See Also -------- armi.nucDirectory.homogRegion.getXS """ return self.micros[interaction] def buildNormalizedScatterColumns(self, scatterMatrixKey): """ Build normalized columns of a scatter matrix. the vectors represent all scattering out of each group. The rows of the scatter matrix represent in-scatter and the columns represent out-scatter. So this sums up the columns. Returns ------- scatterWeights : dict keys are fromG indices, values are sparse matrix columns (size: Gx1) containing normalized columns of the scatter matrix. """ scatter = self.micros[scatterMatrixKey] scatterWeights = {} if scatter is None: return scatterWeights for fromG in range(self.container.numGroups): outScatter = scatter[:, fromG] # fromG column of scatter matrix. total = outScatter.sum() if total != 0.0: normalizedOutScatter = outScatter / total else: normalizedOutScatter = outScatter scatterWeights[fromG] = normalizedOutScatter return scatterWeights @property def trans(self): """Get the transmutations for this nuclide. Notes ----- This is a property wrapper around the base nuclide's :code:`trans` attribute """ return self._base.trans @property def decays(self): """Get the decays for this nuclide. Notes ----- This is a property wrapper around the base nuclide's :code:`decays` attribute """ return self._base.decays def merge(self, other): """ Merge the attributes of two XSNuclides. Parameters ---------- other : armi.nuclearDataIO.xsNuclides.XSNuclide The other nuclide to merge information. Notes ----- The merge is really more like "cannibalize" in that the object performing the merge takes on the attributes of the :code:`other`. It isn't necessary to create new objects for the newly merged attributes, because the 99% usage is only used during runtime, where the second XSNuclide, and it's container (e.g. ISTOXS, GAMISO, etc.) are discarded after the merge. """ self.isotxsMetadata = self.isotxsMetadata.merge(other.isotxsMetadata, self, other, "ISOTXS", AttributeError) self.gamisoMetadata = self.gamisoMetadata.merge(other.gamisoMetadata, self, other, "GAMISO", AttributeError) self.pmatrxMetadata = self.pmatrxMetadata.merge(other.pmatrxMetadata, self, other, "PMATRX", AttributeError) self.micros.merge(other.micros) self.gammaXS.merge(other.gammaXS) self.neutronHeating = _mergeAttributes(self, other, "neutronHeating") self.neutronDamage = _mergeAttributes(self, other, "neutronDamage") self.gammaHeating = _mergeAttributes(self, other, "gammaHeating") self.isotropicProduction = _mergeAttributes(self, other, "isotropicProduction") self.linearAnisotropicProduction = _mergeAttributes(self, other, "linearAnisotropicProduction") # this is lazy, but should work, because the n-order wouldn't be set without the others being set first. self.nOrderProductionMatrix = self.nOrderProductionMatrix or other.nOrderProductionMatrix def _mergeAttributes(this, other, attrName): """Function for merging XSNuclide attributes. Notes ----- This function checks to see that the attribute has only been assigned for a single instance, and then uses uses the one that has been assigned. Returns ------- The proper value for the attribute. """ attr1 = getattr(this, attrName) attr2 = getattr(other, attrName) if attr1 is not None and attr2 is not None: raise AttributeError( "Cannot merge {} and {}, the attribute `{}` has been assigned on bothinstances.".format( this, other, attrName ) ) return attr1 if attr1 is not None else attr2 ================================================ FILE: armi/operators/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Operators build and hold the ARMI reactor model and perform operations on it. Different operators may perform different calculation loops upon the reactor model. Operators can be thought of as schedulers for the interactions between the various ARMI physics packages and the reactor object(s). Operators are generally created by a :py:mod:`armi.cases` object and are chosen by the ``runType`` setting. Custom operators may be introduced via the :py:mod:`armi.plugins` system. The ARMI framework comes with two general-purpose Operators, which can be used for very real analysis given a proper set of plugins. The :py:class:`~armi.operators.operator.Operator` is the Standard operator, which loops over cycles and timenodes. The :py:class:`~armi.operators.snapshots.OperatorSnapshots` is the Snapshots operator, which loops over specific point in time from a previous Standard run and performs additional analysis (e.g. for detailed follow-on analysis/transients). See Also -------- armi.cases : Builds operators armi.reactor : The reactor model that the operator operates upon armi.interfaces : Code that operators schedule to perform the real analysis or math on the reactor model """ # ruff: noqa: I001 from armi import context, getPluginManagerOrFail from armi.operators.runTypes import RunTypes from armi.operators.operator import Operator from armi.operators.operatorMPI import OperatorMPI from armi.operators.snapshots import OperatorSnapshots def factory(cs): """Choose an operator subclass and instantiate it object based on settings.""" return getOperatorClassFromSettings(cs)(cs) def getOperatorClassFromSettings(cs): """Choose a operator class based on user settings (possibly from plugin). Parameters ---------- cs : Settings Returns ------- Operator : Operator Raises ------ ValueError If the Operator class cannot be determined from the settings. """ runType = cs["runType"] if runType == RunTypes.STANDARD: if context.MPI_SIZE == 1: return Operator else: return OperatorMPI elif runType == RunTypes.SNAPSHOTS: return OperatorSnapshots plugInOperator = None for potentialOperator in getPluginManagerOrFail().hook.getOperatorClassFromRunType(runType=runType): if plugInOperator: raise ValueError( "More than one Operator class was " f"recognized for runType `{runType}`: " f"{plugInOperator} and {potentialOperator}. " "This is not allowed. Please adjust plugin config." ) plugInOperator = potentialOperator if plugInOperator: return plugInOperator raise ValueError( f"No valid operator was found for runType: `{runType}`. Please adjust settings or plugin configuration." ) ================================================ FILE: armi/operators/operator.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The standard ARMI operator. This builds and maintains the interface stack and loops through it for a certain number of cycles with a certain number of timenodes per cycle. This is analogous to a real reactor operating over some period of time, often from initial startup, through the various cycles, and out to the end of plant life. """ import collections import os import re import time from typing import Optional, Tuple from armi import context, interfaces, runLog from armi.bookkeeping import db, memoryProfiler from armi.bookkeeping.report import reportingUtils from armi.operators.runTypes import RunTypes from armi.physics.fuelCycle.settings import CONF_SHUFFLE_LOGIC from armi.physics.neutronics.globalFlux.globalFluxInterface import ( GlobalFluxInterfaceUsingExecuters, ) from armi.settings import settingsValidation from armi.settings.fwSettings.globalSettings import ( CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION, CONF_DEFERRED_INTERFACE_NAMES, CONF_DEFERRED_INTERFACES_CYCLE, CONF_TIGHT_COUPLING, CONF_TIGHT_COUPLING_MAX_ITERS, ) from armi.utils import ( codeTiming, getAvailabilityFactors, getBurnSteps, getCycleLengths, getCycleNames, getMaxBurnSteps, getPowerFractions, getPreviousTimeNode, getStepLengths, pathTools, units, ) class Operator: """ Orchestrate an ARMI run, building all the pieces, looping through the interfaces, and manipulating the reactor. This Operator loops over a user-input number of cycles, each with a user-input number of subcycles (called time nodes). It calls a series of interaction hooks on each of the :py:class:`~armi.interfaces.Interface` in the Interface Stack. .. figure:: /.static/armi_general_flowchart.png :align: center **Figure 1.** The computational flow of the interface hooks in a Standard Operator .. note:: The :doc:`/developer/guide` has some additional narrative on this topic. .. impl:: An operator will have a reactor object to communicate between plugins. :id: I_ARMI_OPERATOR_COMM :implements: R_ARMI_OPERATOR_COMM A major design feature of ARMI is that the Operator orchestrates the simulation, and as part of that, the Operator has access to the Reactor data model. In code, this just means the reactor object is a mandatory attribute of an instance of the Operator. But conceptually, this means that while the Operator drives the simulation of the reactor, all code has access to the same copy of the reactor data model. This is a crucial idea that allows disparate external nuclear models to interact; they interact with the ARMI reactor data model. .. impl:: An operator is built from user settings. :id: I_ARMI_OPERATOR_SETTINGS :implements: R_ARMI_OPERATOR_SETTINGS A major design feature of ARMI is that a run is built from user settings. In code, this means that a case ``Settings`` object is passed into this class to initialize an Operator. Conceptually, this means that the Operator that controls a reactor simulation is defined by user settings. Because developers can create their own settings, the user can control an ARMI simulation with arbitrary granularity in this way. In practice, settings common control things like: how many cycles a reactor is being modeled for, how many timesteps are to be modeled per time node, the verbosity of the logging of the run, and which modeling steps will be run. .. impl:: The operator shall advance the reactor through time. :id: I_ARMI_DB_TIME2 :implements: R_ARMI_DB_TIME A major design feature of any scientific model is time evolution of the physical system. The operator is in charge of driving the reactor through time. It sets various parameters that define the temporal position of the reactor: cycle, node, timeNode, and time. This information is then stored in the output database. Attributes ---------- cs : Settings Global settings that define the run. cycleNames : list of str The name of each cycle. Cycles without a name are `None`. stepLengths : list of list of float A two-tiered list, where primary indices correspond to cycle and secondary indices correspond to the length of each intra-cycle step (in days). cycleLengths : list of float The duration of each individual cycle in a run (in days). This is the entire cycle, from startup to startup and includes outage time. burnSteps : list of int The number of sub-cycles in each cycle. availabilityFactors : list of float The fraction of time in a cycle that the plant is producing power. Note that capacity factor is always less than or equal to this, depending on the power fraction achieved during each cycle. Note that this is not a two-tiered list like stepLengths or powerFractions, because each cycle can have only one availabilityFactor. powerFractions : list of list of float A two-tiered list, where primary indices correspond to cycles and secondary indices correspond to the fraction of full rated capacity that the plant achieves during that step of the cycle. Zero power fraction can indicate decay-only cycles. interfaces : list The Interface objects that will operate upon the reactor """ inspector = settingsValidation.Inspector def __init__(self, cs): """ Constructor for operator. Parameters ---------- cs : Settings Global settings that define the run. Raises ------ OSError If unable to create the FAST_PATH directory. """ self.r = None self.cs = cs runLog.LOG.startLog(self.cs.caseTitle) self.timer = codeTiming.MasterTimer.getMasterTimer() self.interfaces = [] self.restartData = [] self.loadedRestartData = [] self._cycleNames = None self._stepLengths = None self._cycleLengths = None self._burnSteps = None self._maxBurnSteps = None self._powerFractions = None self._availabilityFactors = None self._convergenceSummary = None # Create the welcome headers for the case (case, input, machine, and some basic reactor information) reportingUtils.writeWelcomeHeaders(self, cs) self._initFastPath() @property def burnSteps(self): if not self._burnSteps: self._burnSteps = getBurnSteps(self.cs) if self._burnSteps == [] and self.cs["nCycles"] == 1: # it is possible for there to be one cycle with zero burn up, in which case burnSteps is an empty list pass else: self._checkReactorCycleAttrs({"burnSteps": self._burnSteps}) return self._burnSteps @property def maxBurnSteps(self): if not self._maxBurnSteps: self._maxBurnSteps = getMaxBurnSteps(self.cs) return self._maxBurnSteps @property def stepLengths(self): """ Calculate step lengths. .. impl:: Calculate step lengths from cycles and burn steps. :id: I_ARMI_FW_HISTORY :implements: R_ARMI_FW_HISTORY In all computational modeling of physical systems, it is necessary to break time into discrete chunks. In reactor modeling, it is common to first break the time a reactor is simulated for into the practical cycles the reactor runs. And then those cycles are broken down into smaller chunks called burn steps. The final step lengths this method returns is a two-tiered list, where primary indices correspond to the cycle and secondary indices correspond to the length of each intra-cycle step (in days). """ if not self._stepLengths: self._stepLengths = getStepLengths(self.cs) if self._stepLengths == [] and self.cs["nCycles"] == 1: # it is possible for there to be one cycle with zero burn up, in which case stepLengths is an empty list pass else: self._checkReactorCycleAttrs({"Step lengths": self._stepLengths}) self._consistentPowerFractionsAndStepLengths() return self._stepLengths @property def cycleLengths(self): if not self._cycleLengths: self._cycleLengths = getCycleLengths(self.cs) self._checkReactorCycleAttrs({"cycleLengths": self._cycleLengths}) return self._cycleLengths @property def powerFractions(self): if not self._powerFractions: self._powerFractions = getPowerFractions(self.cs) self._checkReactorCycleAttrs({"powerFractions": self._powerFractions}) self._consistentPowerFractionsAndStepLengths() return self._powerFractions @property def availabilityFactors(self): if not self._availabilityFactors: self._availabilityFactors = getAvailabilityFactors(self.cs) self._checkReactorCycleAttrs({"availabilityFactors": self._availabilityFactors}) return self._availabilityFactors @property def cycleNames(self): if not self._cycleNames: self._cycleNames = getCycleNames(self.cs) self._checkReactorCycleAttrs({"Cycle names": self._cycleNames}) return self._cycleNames @staticmethod def _initFastPath(): """ Create the FAST_PATH directory for fast local operations. Notes ----- The FAST_PATH was once created at import-time in order to support modules that use FAST_PATH without operators (e.g. Database). However, we decided to leave FAST_PATH as the CWD in INTERACTIVE mode, so this should not be a problem anymore, and we can safely move FAST_PATH creation back into the Operator. If the operator is being used interactively (e.g. at a prompt) we will still use a temporary local fast path (in case the user is working on a slow network path). """ context.activateLocalFastPath() try: os.makedirs(context.getFastPath()) except OSError: # If FAST_PATH exists already that generally should be an error because different processes will be stepping # on each other. The exception to this rule is in cases that instantiate multiple operators in one process # (e.g. unit tests that loadTestReactor). Since the FAST_PATH is set at import, these will use the same path # multiple times. We pass here for that reason. if not os.path.exists(context.getFastPath()): # if it actually doesn't exist, that's an actual error. Raise raise def _checkReactorCycleAttrs(self, attrsDict): """Check that the list has nCycles number of elements.""" for name, param in attrsDict.items(): if len(param) != self.cs["nCycles"]: raise ValueError( "The `{}` setting did not have a length consistent with the number of cycles.\n" "Expected {} value(s), but only had {} defined.\n" "Current input: {}".format(name, self.cs["nCycles"], len(param), param) ) def _consistentPowerFractionsAndStepLengths(self): """Check that the internally-resolved _powerFractions and _stepLengths have consistent shapes, if they exist.""" if self._powerFractions and self._stepLengths: for cycleIdx in range(len(self._powerFractions)): if len(self._powerFractions[cycleIdx]) != len(self._stepLengths[cycleIdx]): raise ValueError( "The number of entries in lists for subcycle power fractions and sub-steps are inconsistent in " f"cycle {cycleIdx}" ) @property def atEOL(self): """ Return whether we are approaching EOL. For the standard operator, this will return true when the current cycle is the last cycle (cs["nCycles"] - 1). Other operators may need to impose different logic. """ return self.r.p.cycle == self.cs["nCycles"] - 1 def initializeInterfaces(self, r): """ Attach the reactor to the operator and initialize all interfaces. This does not occur in `__init__` so that the ARMI operator can be initialized before a reactor is created, which is useful for summarizing the case information quickly. Parameters ---------- r : Reactor The Reactor object to attach to this Operator. """ self.r = r r.o = self with self.timer.getTimer("Interface Creation"): self.createInterfaces() self._processInterfaceDependencies() if context.MPI_RANK == 0: runLog.header("=========== Interface Stack Summary ===========") runLog.info(reportingUtils.getInterfaceStackSummary(self)) self.interactAllInit() else: self._attachInterfaces() self._loadRestartData() def __repr__(self): return "<{} {} {}>".format(self.__class__.__name__, self.cs["runType"], self.cs) def __enter__(self): """Context manager to enable interface-level error handling hooks.""" return self def __exit__(self, exception_type, exception_value, stacktrace): if any([exception_type, exception_value, stacktrace]): runLog.error(r"{}\n{}\{}".format(exception_type, exception_value, stacktrace)) self.interactAllError() def operate(self): """ Run the operation loop. See Also -------- mainOperator : run the operator loop on the primary MPI node (for parallel runs) workerOperate : run the operator loop for the worker MPI nodes """ self._mainOperate() def _mainOperate(self): """Main loop for a standard ARMI run. Steps through time interacting with the interfaces.""" dbi = self.getInterface("database") if dbi is not None and dbi.enabled(): dbi.initDB() if self.cs["loadStyle"] != "fromInput" and self.cs["runType"] != RunTypes.SNAPSHOTS: self.interactAllRestart(dbi) self.interactAllBOL() startingCycle = self.r.p.cycle # may be starting at t != 0 in restarts for cycle in range(startingCycle, self.cs["nCycles"]): keepGoing = self._cycleLoop(cycle, startingCycle) if not keepGoing: break self.interactAllEOL() def interactAllRestart(self, dbi: Optional[db.DatabaseInterface]): """Prepare for a restart simulation. Some steps are necessary to be taken after interfaces are constructed but before we start the real simulation. Crucially, we need to load the previous time point from the database. The previous time node is chosen because that is the last point where we are certain we have valid data and can safely recover. If restarting at BOC, trigger the EOC actions from the previous cycle. This is necessary to perform any fuel management operations that would have happened at the end of the previous cycle. """ startCycle = self.cs["startCycle"] startNode = self.cs["startNode"] prevTimeNode = getPreviousTimeNode(startCycle, startNode, self.cs) if dbi is not None: dbi.prepRestartRun() else: raise ValueError("No database interface means nothing is responsible for restarting from DB") activeInterfaces = self.getActiveInterfaces("Restart", excludedInterfaceNames=("database",)) self._interactAll("Restart", activeInterfaces, (startCycle, startNode), prevTimeNode) if startNode == 0: runLog.important("Calling `o.interactAllEOC` due to loading the last time node of the previous cycle.") self.interactAllEOC(prevTimeNode[0]) # advance time time since we loaded the previous time step self.r.p.cycle = startCycle self.r.p.timeNode = startNode def _cycleLoop(self, cycle, startingCycle): """Run the portion of the main loop that happens each cycle.""" self.r.p.cycleLength = self.cycleLengths[cycle] self.r.p.availabilityFactor = self.availabilityFactors[cycle] self.r.p.cycle = cycle self.r.core.p.coupledIteration = 0 if cycle == startingCycle: startingNode = self.r.p.timeNode else: startingNode = 0 self.r.p.timeNode = startingNode halt = self.interactAllBOC(self.r.p.cycle) if halt: return False # read total core power from settings (power or powerDensity) basicPower = self.cs["power"] or (self.cs["powerDensity"] * self.r.core.getHMMass()) for timeNode in range(startingNode, int(self.burnSteps[cycle])): self.r.core.p.power = self.powerFractions[cycle][timeNode] * basicPower self.r.p.capacityFactor = self.r.p.availabilityFactor * self.powerFractions[cycle][timeNode] self.r.p.stepLength = self.stepLengths[cycle][timeNode] self._timeNodeLoop(cycle, timeNode) else: # do one last node at the end using the same power as the previous node timeNode = self.burnSteps[cycle] if self.burnSteps[cycle] == 0: # this is a zero-burnup case powFrac = 1 else: powFrac = self.powerFractions[cycle][timeNode - 1] self.r.core.p.power = powFrac * basicPower self._timeNodeLoop(cycle, timeNode) self.interactAllEOC(self.r.p.cycle) return True def _timeNodeLoop(self, cycle, timeNode): """Run the portion of the main loop that happens each subcycle.""" self.r.p.timeNode = timeNode if timeNode == 0: dt = 0 else: dt = self.r.o.stepLengths[cycle][timeNode - 1] / units.DAYS_PER_YEAR self.r.p.time = self.r.p.time + dt self.interactAllEveryNode(cycle, timeNode) self._performTightCoupling(cycle, timeNode) def _performTightCoupling(self, cycle: int, timeNode: int, writeDB: bool = True): """If requested, perform tight coupling and write out database. Notes ----- writeDB is False for OperatorSnapshots as the DB gets written at EOL. """ if not self.couplingIsActive(): # no coupling was requested return skipCycles = tuple(int(val) for val in self.cs[CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION]) if cycle in skipCycles: runLog.warning( f"interactAllCoupled disabled this cycle ({self.r.p.cycle}) due to " "`cyclesSkipTightCouplingInteraction` setting." ) else: self._convergenceSummary = collections.defaultdict(list) for coupledIteration in range(self.cs[CONF_TIGHT_COUPLING_MAX_ITERS]): self.r.core.p.coupledIteration = coupledIteration + 1 converged = self.interactAllCoupled(coupledIteration) if converged: runLog.important(f"Tight coupling iterations for c{cycle:02d}n{timeNode:02d} have converged!") break if not converged: runLog.warning( f"Tight coupling iterations for c{cycle:02d}n{timeNode:02d} have not converged!" f" The maximum number of iterations, {self.cs[CONF_TIGHT_COUPLING_MAX_ITERS]}, was reached." ) if writeDB: # database has not yet been written, so we need to write it. dbi = self.getInterface("database") dbi.writeDBEveryNode() def _interactAll(self, interactionName, activeInterfaces, *args): """ Loop over the supplied activeInterfaces and perform the supplied interaction on each. Notes ----- This is the base method for the other ``interactAll`` methods. """ interactMethodName = "interact{}".format(interactionName) printMemUsage = self.cs["verbosity"] == "debug" and self.cs["debugMem"] halt = False cycleNodeTag = self._expandCycleAndTimeNodeArgs(interactionName) runLog.header("=========== Triggering {} Event ===========".format(interactionName + cycleNodeTag)) for statePointIndex, interface in enumerate(activeInterfaces, start=1): self.printInterfaceSummary(interface, interactionName, statePointIndex) # maybe make this a context manager if printMemUsage: memBefore = memoryProfiler.PrintSystemMemoryUsageAction() memBefore.broadcast() memBefore.invoke(self, self.r, self.cs) interactionMessage = f"{interface.name}.{interactionName}" with self.timer.getTimer(interactionMessage): interactMethod = getattr(interface, interactMethodName) halt = halt or interactMethod(*args) if printMemUsage: memAfter = memoryProfiler.PrintSystemMemoryUsageAction() memAfter.broadcast() memAfter.invoke(self, self.r, self.cs) memAfter -= memBefore memAfter.printUsage("after {:25s} {:15s} interaction".format(interface.name, interactionName)) # Allow inherited classes to clean up things after an interaction self._finalizeInteract() runLog.header("=========== Completed {} Event ===========\n".format(interactionName + cycleNodeTag)) return halt def _finalizeInteract(self): """Member called after each interface has completed its interaction. Useful for cleaning up data. """ pass def printInterfaceSummary(self, interface, interactionName, statePointIndex): """ Log which interaction point is about to be executed. This looks better as multiple lines but it's a lot easier to grep as one line. We leverage newlines instead of long banners to save disk space. """ nodeInfo = self._expandCycleAndTimeNodeArgs(interactionName) line = "=========== {:02d} - {:30s} {:15s} ===========".format( statePointIndex, interface.name, interactionName + nodeInfo ) runLog.header(line) def _expandCycleAndTimeNodeArgs(self, interactionName): """Return text annotating information for current run event. Notes ----- - Init, BOL, EOL: empty - Everynode: cycle, time node - BOC, EOC: cycle number - Coupled: cycle, time node, iteration number """ if interactionName == "Coupled": cycleNodeInfo = ( f" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, " f"year {'{0:.2f}'.format(self.r.p.time)} - iteration " f"{self.r.core.p.coupledIteration}" ) elif interactionName in ("BOC", "EOC"): cycleNodeInfo = f" - timestep: cycle {self.r.p.cycle}" # - timestep: cycle 2 elif interactionName in ("Init", "BOL", "EOL"): cycleNodeInfo = "" else: cycleNodeInfo = ( f" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, year {'{0:.2f}'.format(self.r.p.time)}" ) return cycleNodeInfo def interactAllInit(self): """Call interactInit on all interfaces in the stack after they are initialized.""" self._interactAll("Init", self.getInterfaces()) def interactAllBOL(self, excludedInterfaceNames=()): """ Call interactBOL for all interfaces in the interface stack at beginning-of-life. All enabled or bolForce interfaces will be called excluding interfaces with excludedInterfaceNames. """ activeInterfaces = self.getActiveInterfaces("BOL", excludedInterfaceNames) self._interactAll("BOL", activeInterfaces) def interactAllBOC(self, cycle): """Interact at beginning of cycle of all enabled interfaces.""" activeInterfaces = self.getActiveInterfaces("BOC", cycle=cycle) return self._interactAll("BOC", activeInterfaces, cycle) def interactAllEveryNode(self, cycle, tn, excludedInterfaceNames=()): """ Call the interactEveryNode hook for all enabled interfaces. All enabled interfaces will be called excluding interfaces with excludedInterfaceNames. Parameters ---------- cycle : int The cycle that is currently being run. Starts at 0 tn : int The time node that is currently being run (0 for BOC, etc.) excludedInterfaceNames : list, optional Names of interface names that will not be interacted with. """ activeInterfaces = self.getActiveInterfaces("EveryNode", excludedInterfaceNames) self._interactAll("EveryNode", activeInterfaces, cycle, tn) def interactAllEOC(self, cycle, excludedInterfaceNames=()): """Interact end of cycle for all enabled interfaces.""" self.r.p.time += self.r.p.cycleLength * (1 - self.r.p.availabilityFactor) / units.DAYS_PER_YEAR activeInterfaces = self.getActiveInterfaces("EOC", excludedInterfaceNames) self._interactAll("EOC", activeInterfaces, cycle) def interactAllEOL(self, excludedInterfaceNames=()): """ Run interactEOL for all enabled interfaces. Notes ----- If the interfaces are flagged to be reversed at EOL, they are separated from the main stack and appended at the end in reverse order. This allows, for example, an interface that must run first to also run last. """ activeInterfaces = self.getActiveInterfaces("EOL", excludedInterfaceNames) self._interactAll("EOL", activeInterfaces) def interactAllCoupled(self, coupledIteration): """ Run all interfaces that are involved in tight physics coupling. .. impl:: Physics coupling is driven from Operator. :id: I_ARMI_OPERATOR_PHYSICS1 :implements: R_ARMI_OPERATOR_PHYSICS This method runs all the interfaces that are defined as part of the tight physics coupling of the reactor. Then it returns if the coupling has converged or not. Tight coupling implies the operator has split iterations between two or more physics solvers at the same solution point in simulated time. For example, a flux solution might be computed, then a temperature solution, and then another flux solution based on updated temperatures (which updates densities, dimensions, and Doppler). This is distinct from loose coupling, which simply uses the temperature values from the previous timestep in the current flux solution. It's also distinct from full coupling where all fields are solved simultaneously. ARMI supports tight and loose coupling. """ activeInterfaces = self.getActiveInterfaces("Coupled") # Store the previous iteration values before calling interactAllCoupled for each interface. for interface in activeInterfaces: if interface.coupler is not None: interface.coupler.storePreviousIterationValue(interface.getTightCouplingValue()) self._interactAll("Coupled", activeInterfaces, coupledIteration) return self._checkTightCouplingConvergence(activeInterfaces) def _checkTightCouplingConvergence(self, activeInterfaces: list): """Check if interfaces are converged. Parameters ---------- activeInterfaces : list the list of active interfaces on the operator Notes ----- This is split off from self.interactAllCoupled to accommodate testing. """ # Summarize the coupled results and the convergence status. converged = [] for interface in activeInterfaces: coupler = interface.coupler if coupler is not None: key = f"{interface.name}: {coupler.parameter}" converged.append(coupler.isConverged(interface.getTightCouplingValue())) self._convergenceSummary[key].append(coupler.eps) reportingUtils.writeTightCouplingConvergenceSummary(self._convergenceSummary) return all(converged) def interactAllError(self): """Interact when an error is raised by any other interface. Provides a wrap-up option on the way to a crash.""" for i in self.interfaces: runLog.extra("Error-interacting with {0}".format(i.name)) i.interactError() def createInterfaces(self): """ Dynamically discover all available interfaces and call their factories, potentially adding them to the stack. An operator contains an ordered list of interfaces. These communicate between the core ARMI structure and auxiliary computational modules and/or external codes. At specified interaction points in a run, the list of interfaces is executed. Each interface optionally defines interaction "hooks" for each of the interaction points. The normal interaction points are BOL, BOC, every node, EOC, and EOL. If an interface defines an interactBOL method, that will run at BOL, and so on. The majority of ARMI capabilities lie within interfaces, and this architecture provides much of the flexibility of ARMI. See Also -------- addInterface : Adds a particular interface to the interface stack. armi.interfaces.STACK_ORDER : A system to determine the required order of interfaces. armi.interfaces.getActiveInterfaceInfo : Collects the interface classes from relevant packages. """ runLog.header("=========== Creating Interfaces ===========") interfaceList = interfaces.getActiveInterfaceInfo(self.cs) for klass, kwargs in interfaceList: self.addInterface(klass(self.r, self.cs), **kwargs) def addInterface( self, interface, index=None, reverseAtEOL=False, enabled=True, bolForce=False, ): """ Attach an interface to this operator. Notes ----- Order matters. Parameters ---------- interface : Interface the interface to add index : int, optional. Will insert the interface at this index rather than appending it to the end of the list reverseAtEOL : bool, optional. The interactEOL hooks will run in reverse order if True. All interfaces with this flag will be run as a group after all other interfaces. This allows something to run first at BOL and last at EOL, etc. enabled : bool, optional If enabled, will run at all hooks. If not, won't run any (with possible exception at BOL, see bolForce). Whenever possible, Interfaces that are needed during runtime for some peripheral operation but not during the main loop should be instantiated by the part of the code that actually needs the interface. bolForce: bool, optional If true, will run at BOL hook even if disabled. This is often a sign that the interface in question should be ephemerally instantiated on demand rather than added to the interface stack at all. Raises ------ RuntimeError If an interface of the same name or purpose is already attached to the Operator. """ if self.getInterface(interface.name): raise RuntimeError(f"An interface with name {interface.name} is already attached.") iFunc = self.getInterface(purpose=interface.purpose) if iFunc: if issubclass(type(iFunc), type(interface)): runLog.info( "Ignoring Interface {newFunc} because existing interface {old} already more specific".format( newFunc=interface, old=iFunc ) ) return elif issubclass(type(interface), type(iFunc)): self.removeInterface(iFunc) runLog.info( "Will Insert Interface {newFunc} because it is a subclass of {old} interface and " " more derived".format(newFunc=interface, old=iFunc) ) else: raise RuntimeError( "Cannot add {0}; the {1} already is designated " "as the {2} interface. Multiple interfaces of the same " "purpose is not supported.".format(interface, iFunc, interface.purpose) ) runLog.debug("Adding {0}".format(interface)) if index is None: self.interfaces.append(interface) else: self.interfaces.insert(index, interface) if reverseAtEOL: interface.reverseAtEOL = True if not enabled: interface.enabled(False) interface.bolForce(bolForce) interface.attachReactor(self, self.r) def _processInterfaceDependencies(self): """ Check all interfaces' dependencies and adds missing ones. Notes ----- Order does not matter here because the interfaces added here are disabled and playing supporting role so it is not intended to run on the interface stack. They will be called by other interfaces. As mentioned in :py:meth:`addInterface`, it may be better to just instantiate utility code when its needed rather than rely on this system. """ # Make multiple passes in case there's one added that depends on another. for _dependencyPass in range(5): numInterfaces = len(self.interfaces) # manipulation friendly, so it's ok to add additional things to the stack for i in self.getInterfaces(): for dependency in i.getDependencies(self.cs): name = dependency.name purpose = dependency.purpose klass = dependency if not self.getInterface(name, purpose=purpose): runLog.extra( "Attaching {} interface (disabled, BOL forced) due to dependency in {}".format( klass.name, i.name ) ) self.addInterface(klass(r=self.r, cs=self.cs), enabled=False, bolForce=True) if len(self.interfaces) == numInterfaces: break else: raise RuntimeError("Interface dependency resolution did not converge.") def removeAllInterfaces(self): """Removes all of the interfaces.""" for interface in self.interfaces: interface.detachReactor() self.interfaces = [] def removeInterface(self, interface=None, interfaceName=None): """ Remove a single interface from the interface stack. Parameters ---------- interface : Interface, optional An actual interface object to remove. interfaceName : str, optional The name of the interface to remove. Returns ------- success : boolean True if the interface was removed False if it was not (because it wasn't there to be removed) """ if interfaceName: interface = self.getInterface(interfaceName) if interface and interface in self.interfaces: self.interfaces.remove(interface) interface.detachReactor() return True else: runLog.warning("Cannot remove interface {0} because it is not in the interface stack.".format(interface)) return False def getInterface(self, name=None, purpose=None): """ Returns a specific interface from the stack by its name or more generic purpose. Parameters ---------- name : str, optional Interface name purpose : str Interface purpose (general, like 'globalFlux','th',etc.). This is useful when you need the ___ solver (e.g. globalFlux) but don't care which particular one is active (e.g. SERPENT vs. DIF3D) Raises ------ RuntimeError If there are more than one interfaces of the given name or purpose. """ candidateI = None for i in self.interfaces: if (name and i.name == name) or (purpose and i.purpose == purpose): if candidateI is None: candidateI = i else: raise RuntimeError( "Cannot retrieve a single interface as there are multiple " "interfaces with name {} or purpose {} attached. ".format(name, purpose) ) return candidateI def interfaceIsActive(self, name): """True if named interface exists and is enabled. Notes ----- This logic is significantly simpler that getActiveInterfaces. This logic only touches the enabled() flag, but doesn't take into account the case settings. """ i = self.getInterface(name) return i and i.enabled() def getInterfaces(self): """ Get list of interfaces in interface stack. .. impl:: An operator will expose an ordered list of interfaces. :id: I_ARMI_OPERATOR_INTERFACES :implements: R_ARMI_OPERATOR_INTERFACES This method returns an ordered list of instances of the Interface class. This list is useful because at any time node in the reactor simulation, these interfaces will be called in sequence to perform various types of calculations. It is important to note that this Operator instance has a list of Plugins, and each of those Plugins potentially defines multiple Interfaces. And these Interfaces define their own order, separate from the ordering of the Plugins. Notes ----- Returns a copy so you can manipulate the list in an interface, like dependencies. """ return self.interfaces[:] def getActiveInterfaces( self, interactState: str, excludedInterfaceNames: Tuple[str] = (), cycle: int = 0, ): """Retrieve the interfaces which are active for a given interaction state. Parameters ---------- interactState: str A string dictating which interaction state the interfaces should be pulled for. excludedInterfaceNames: Tuple[str] A tuple of strings dictating which interfaces should be manually skipped. cycle: int The given cycle. 0 by default. Returns ------- activeInterfaces: List[Interfaces] The interfaces deemed active for the given interactState. """ # Validate the inputs if excludedInterfaceNames is None: excludedInterfaceNames = () if interactState not in ("BOL", "BOC", "EveryNode", "EOC", "EOL", "Coupled", "Restart"): raise ValueError(f"{interactState} is an unknown interaction state!") # Ensure the interface is enabled. enabled = lambda i: i.enabled() if interactState == "BOL": enabled = lambda i: i.enabled() or i.bolForce() # Ensure the name of the interface isn't in some exclusion list. nameCheck = lambda i: True if interactState in ("EveryNode", "EOC", "EOL"): nameCheck = lambda i: i.name not in excludedInterfaceNames elif interactState == "BOC" and cycle < self.cs[CONF_DEFERRED_INTERFACES_CYCLE]: nameCheck = lambda i: i.name not in self.cs[CONF_DEFERRED_INTERFACE_NAMES] elif interactState == "BOL": nameCheck = ( lambda i: i.name not in self.cs[CONF_DEFERRED_INTERFACE_NAMES] and i.name not in excludedInterfaceNames ) # Finally, find the active interfaces. activeInterfaces = [i for i in self.interfaces if enabled(i) and nameCheck(i)] # Special Case: At EOL we reverse the order of some interfaces. if interactState == "EOL": actInts = [ii for ii in activeInterfaces if not ii.reverseAtEOL] actInts.extend(reversed([ii for ii in activeInterfaces if ii.reverseAtEOL])) activeInterfaces = actInts return activeInterfaces def reattach(self, r, cs=None): """Add links to globally-shared objects to this operator and all interfaces. Notes ----- Could be a good opportunity for weakrefs. """ self.r = r self.r.o = self if cs is not None: self.cs = cs for i in self.interfaces: i.r = r i.o = self if cs is not None: i.cs = cs def detach(self): """ Break links to globally-shared objects to this operator and all interfaces. May be required prior to copying these objects over the network. Notes ----- Could be a good opportunity for weakrefs. """ if self.r: self.r.o = None for comp in self.r: comp.parent = None self.r = None for i in self.interfaces: i.o = None i.r = None i.cs = None def _attachInterfaces(self): """ Links all the interfaces in the interface stack to the operator, reactor, and cs. See Also -------- createInterfaces : creates all interfaces addInterface : adds a single interface to the stack """ for i in self.interfaces: i.attachReactor(self, self.r) def _loadRestartData(self): """ Read a restart.dat file which contains all the fuel management factorLists and cycle lengths. Notes ----- This allows the ARMI to do the same shuffles that it did last time, assuming fuel management logic has not changed. Note, it would be better if the moves were just read from a table in the database. """ restartName = self.cs.caseTitle + ".restart.dat" if not os.path.exists(restartName): return else: runLog.info(f"Loading restart data from {restartName}") with open(restartName, "r") as restart: for line in restart: match = re.search( r"cycle=(\d+)\s+time=(\d+\.\d+[Ee+-]+\d+)\s+factorList=[\[\{](.+?)[\]\}]", line, ) if match: newStyle = re.findall(r"'(\w+)':\s*(\d*\.?\d*)", line) if newStyle: # key-based factorList. load a dictionary. factorList = {} for key, val in newStyle: factorList[key] = float(val) else: # list based factorList. Load a list. (old style, backward compat) try: factorList = [float(item) for item in match.group(3).split(",")] except ValueError: factorList = match.group(3).split(",") runLog.debug("loaded restart data for cycle %d" % float(match.group(1))) self.restartData.append((float(match.group(1)), float(match.group(2)), factorList)) runLog.info("loaded restart data for {0} cycles".format(len(self.restartData))) def loadState(self, cycle, timeNode, timeStepName="", fileName=None, updateMassFractions=None): """ Convenience method reroute to the database interface state reload method. See Also -------- armi.bookkeeping.db.loadOperator: A method for loading an operator given a database. loadOperator does not require an operator prior to loading the state of the reactor. loadState does, and therefore armi.init must be called which requires access to the blueprints, settings, and geometry files. These files are stored implicitly on the database, so loadOperator creates the reactor first, and then attaches it to the operator. loadState should be used if you are in the middle of an ARMI calculation and need load a different time step. If you are loading from a fresh ARMI session, either method is sufficient if you have access to all the input files. """ dbi = self.getInterface("database") if not dbi: raise RuntimeError("Cannot load from snapshot without a database interface") if updateMassFractions is not None: runLog.warning("deprecated: updateMassFractions is no longer a valid option for loadState") dbi.loadState(cycle, timeNode, timeStepName, fileName) def snapshotRequest(self, cycle, node, iteration=None): """ Process a snapshot request at this time. This copies various physics input and output files to a special folder that follow-on analysis be executed upon later. Notes ----- This was originally used to produce MC2/DIF3D inputs for external parties (who didn't have ARMI) to review. Since then, the concept of snapshots has evolved with respect to the :py:class:`~armi.operators.snapshots.OperatorSnapshots`. """ from armi.physics.neutronics.settings import CONF_LOADING_FILE runLog.info(f"Producing snapshot for cycle {cycle} node {node}") self.r.core.zones.summary() newFolder = f"snapShot{cycle}_{node}" if os.path.exists(newFolder): runLog.important(f"Deleting existing snapshot data in {newFolder}") pathTools.cleanPath(newFolder, forceClean=True) # careful with cleanPath! # give it a minute. time.sleep(1) if os.path.exists(newFolder): runLog.warning(f"Deleting existing snapshot data in {newFolder} failed") else: os.mkdir(newFolder) # Moving the cross section files is to a snapshot directory is a reasonable requirement, but these hard-coded # names are not desirable. This is legacy and should be updated to be more robust for users. for fileName in os.listdir("."): if "mcc" in fileName and re.search(r"[A-Z]AF?\d?.inp", fileName): base, ext = os.path.splitext(fileName) if iteration is not None: newFile = "{0}_{1:03d}_{2:d}_{4}{3}".format(base, cycle, node, ext, iteration) else: newFile = "{0}_{1:03d}_{2:d}{3}".format(base, cycle, node, ext) # add the cycle and timenode to the XS input file names so that a rx-coeff case that # runs in here won't overwrite them. pathTools.copyOrWarn(fileName, fileName, os.path.join(newFolder, newFile)) if "rzmflx" in fileName: pathTools.copyOrWarn("rzmflx for snapshot", fileName, newFolder) fileNamePossibilities = [f"ISOTXS-c{cycle}n{node}", f"ISOTXS-c{cycle}"] if iteration is not None: fileNamePossibilities = [f"ISOTXS-c{cycle}n{node}i{iteration}"] + fileNamePossibilities for isoFName in fileNamePossibilities: if os.path.exists(isoFName): break pathTools.copyOrWarn("ISOTXS for snapshot", isoFName, pathTools.armiAbsPath(newFolder, "ISOTXS")) globalFluxLabel = GlobalFluxInterfaceUsingExecuters.getLabel(self.cs.caseTitle, cycle, node, iteration) globalFluxInput = globalFluxLabel + ".inp" globalFluxOutput = globalFluxLabel + ".out" pathTools.copyOrWarn("DIF3D input for snapshot", globalFluxInput, newFolder) pathTools.copyOrWarn("DIF3D output for snapshot", globalFluxOutput, newFolder) pathTools.copyOrWarn("Shuffle logic for snapshot", self.cs[CONF_SHUFFLE_LOGIC], newFolder) pathTools.copyOrWarn("Loading definition for snapshot", self.cs[CONF_LOADING_FILE], newFolder) @staticmethod def setStateToDefault(cs): """Update the state of ARMI to fit the kind of run this operator manages.""" return cs.modified(newSettings={"runType": RunTypes.STANDARD}) def couplingIsActive(self): """True if any kind of physics coupling is active.""" return self.cs[CONF_TIGHT_COUPLING] ================================================ FILE: armi/operators/operatorMPI.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The MPI-aware variant of the standard ARMI operator. .. impl:: There is an MPI-aware variant of the ARMI Operator. :id: I_ARMI_OPERATOR_MPI :implements: R_ARMI_OPERATOR_MPI This sets up the main Operator on the primary MPI node and initializes worker processes on all other MPI nodes. At certain points in the run, particular interfaces might call into action all the workers. For example, a depletion or subchannel T/H module may ask the MPI pool to perform a few hundred independent physics calculations in parallel. In many cases, this can speed up the overall execution of an analysis, if a big enough computer or computing cluster is available. See :py:class:`~armi.operators.operator.Operator` for the parent class. Notes ----- This is not *yet* smart enough to use shared memory when the MPI tasks are on the same machine. Everything goes through MPI. This can be optimized as needed. """ import gc import os import re import time import traceback from armi import context, getPluginManager, mpiActions, runLog from armi.operators.operator import Operator from armi.reactor import reactors class OperatorMPI(Operator): """MPI-aware Operator.""" def __init__(self, cs): try: Operator.__init__(self, cs) except: # kill the workers too so everything dies. runLog.important("Primary node failed on init. Quitting.") if context.MPI_COMM: # else it's a single cpu case. context.MPI_COMM.bcast("quit", root=0) raise def operate(self): """ Operate method for all nodes. Calls _mainOperate or workerOperate depending on which MPI rank we are, and handles errors. """ runLog.debug("OperatorMPI.operate") if context.MPI_RANK == 0: # this is the primary try: # run the regular old operate method Operator.operate(self) runLog.important(time.ctime()) except Exception as ee: runLog.error("Error in Primary Node. Check STDERR for a traceback.\n{}".format(ee)) raise finally: # If there are other processes, tell them to stop if context.MPI_SIZE > 1: runLog.important("Stopping all MPI worker nodes and cleaning temps.") # send the quit command to the workers. context.MPI_COMM.bcast("quit", root=0) runLog.debug("Waiting for all nodes to close down") # wait until they're done cleaning up. context.MPI_COMM.bcast("finished", root=0) runLog.important("All worker nodes stopped.") # even though we waited, still need more time to close stdout. time.sleep(1) runLog.debug("Main operate finished") runLog.close() # concatenate all logs. else: try: self.workerOperate() except: # grab the final command runLog.warning("An error has occurred in one of the worker nodes. See STDERR for traceback.") # bcasting quit won't work if the main is sitting around waiting for a different bcast or gather. traceback.print_exc() runLog.debug("Worker failed") runLog.close() raise def workerOperate(self): """ The main loop on any worker MPI nodes. Notes ----- This method is what worker nodes are in while they wait for instructions from the primary node in a parallel run. The nodes will sit, waiting for a "worker command". When this comes (from a bcast from the primary), a set of if statements are evaluated, with specific behaviors defined for each command. If the operator doesn't understand the command, it loops through the interface stack to see if any of the interfaces understand it. Originally, "magic strings" were broadcast, which were handled either here or in one of the interfaces' ``workerOperate`` methods. Since then, the :py:mod:`~armi.mpiActions` system has been devised which just broadcasts ``MpiAction`` objects. Both methods are still supported. See Also -------- armi.mpiActions : MpiAction information armi.interfaces.workerOperate : interface-level handling of worker commands. """ while True: # sit around waiting for a command from the primary runLog.extra("Node {0} ready and waiting".format(context.MPI_RANK)) cmd = context.MPI_COMM.bcast(None, root=0) runLog.extra("worker received command {0}".format(cmd)) # got a command. go use it. if isinstance(cmd, mpiActions.MpiAction): cmd.invoke(self, self.r, self.cs) elif cmd == "quit": self.workerQuit() break # If this break is removed, the program will remain in the while loop forever. elif cmd == "finished": runLog.warning( "Received unexpected FINISHED command. Usually a QUIT command precedes this. " "Skipping cleanup of temporary files." ) break elif cmd == "sync": # wait around for a sync runLog.debug("Worker syncing") note = context.MPI_COMM.bcast("wait", root=0) if note != "wait": raise RuntimeError(f'did not get "wait". Got {note}') elif cmd == "reset": runLog.extra("Workers are being reset.") else: # We don't understand the command on our own. Check the interfaces this allows all interfaces to have # their own custom operation code. handled = False for i in self.interfaces: handled = i.workerOperate(cmd) if handled: break if not handled: if context.MPI_RANK == 0: print("Interfaces" + str(self.interfaces)) runLog.error( "No interface understood worker command {0}\n check stdout for err\n" "available interfaces:\n {1}".format( cmd, "\n ".join(f"name:{i.name} typeName:{i.purpose} {i}" for i in self.interfaces), ) ) raise RuntimeError(f"Failed to delegate worker command {cmd} to an interface.") pm = getPluginManager() resetFlags = pm.hook.mpiActionRequiresReset(cmd=cmd) # only reset if all the plugins agree to reset if all(resetFlags) or cmd == "reset": self._resetWorker() # might be an mpi action which has a reactor and everything, preventing garbage collection del cmd gc.collect() def _finalizeInteract(self): """Inherited member called after each interface has completed its interact. This will force all the workers to clear their reactor data so that it isn't carried around to the next interact. Notes ----- This is only called on the root processor. Worker processors will know what to do with the "reset" broadcast. """ if context.MPI_SIZE > 1: context.MPI_COMM.bcast("reset", root=0) runLog.extra("Workers have been reset.") def _resetWorker(self): """ Clear out the reactor on the workers to start anew. Notes ----- This was made to help minimize the amount of RAM that is used during some gigantic long-running cases. Resetting after building copies of reactors or transforming their geometry is one approach. We hope to implement more efficient solutions in the future. Warning ------- This should build empty non-core systems too. """ # Nothing to do if we never had anything if self.r is None: return cs = self.cs bp = self.r.blueprints spatialGrid = self.r.core.spatialGrid spatialGrid.armiObject = None xsGroups = self.getInterface("xsGroups") if xsGroups: xsGroups.clearRepresentativeBlocks() self.detach() self.r = reactors.Reactor(cs.caseTitle, bp) core = reactors.Core("Core") self.r.add(core) core.spatialGrid = spatialGrid core.spatialGrid.armiObject = core self.reattach(self.r, cs) @staticmethod def workerQuit(): runLog.debug("Worker ending") runLog.close() # no more messages. # wait until all workers are closed so we can delete them. context.MPI_COMM.bcast("finished", root=0) def collapseAllStderrs(self): """Takes all the individual stderr files from each processor and arranges them nicely into one file.""" stderrFiles = [] for fName in os.listdir("."): match = re.search(r"_(\d\d\d\d)\.stderr", fName) if match: stderrFiles.append((match.group(1), fName)) stderrFiles.sort() stderr = open("{0}w.stderr".format(self.cs.caseTitle), "w") for cpu, fName in stderrFiles: f = open(fName) stderr.write("Processor {0}\n".format(cpu)) stderr.write(f.read()) stderr.write("\n") f.close() stderr.close() ================================================ FILE: armi/operators/runTypes.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Constants defining the different supported run types. These were moved here to better structure the dependencies within this package. Dependencies should be organized in a tree-like structure, with ``__init__.py`` living at the top. These will likely need to be extended by plugins in the near future. """ class RunTypes: """All available values of the ``runType`` setting that determine which Operator to use.""" STANDARD = "Standard" SNAPSHOTS = "Snapshots" EQUILIBRIUM = "Equilibrium" ================================================ FILE: armi/operators/snapshots.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Snapshot Operator.""" from armi import runLog from armi.operators import operatorMPI class OperatorSnapshots(operatorMPI.OperatorMPI): """ This operator just loops over the requested snapshots and computes at them. These may add CR worth curves, rx coefficients, transient runs etc at these snapshots. This operator can be run as a restart, adding new physics to a previous run. """ def __init__(self, cs): super().__init__(cs) # disable fuel management and optimization # disable depletion because we don't want to change number densities for tn's >0 (or any) self.disabledInterfaces = ["depletion", "fuelHandler", "optimize"] def createInterfaces(self): operatorMPI.OperatorMPI.createInterfaces(self) for toDisable in self.disabledInterfaces: i = self.getInterface(name=toDisable, purpose=toDisable) if i: i.enabled(False) def _mainOperate(self): """ General main loop for ARMI snapshot case. Instead of going through all cycles, this goes through just the snapshots. See Also -------- Operator._mainOperate : The primary ARMI loop for non-restart cases. """ runLog.important("---- Beginning Snapshot (restart) ARMI Operator Loop ------") # run things that happen before a calculation. # setups, etc. self.interactAllBOL() # figure out which snapshots to run in. Parse the CCCNNN settings snapshots = [(int(i[:3]), int(i[3:])) for i in self.cs["dumpSnapshot"]] # update the snapshot requests if the user chose to load from a specific cycle/node dbi = self.getInterface("database") # database is excluded since SS writes by itself excludeDB = ("database",) for ssCycle, ssNode in snapshots: runLog.important("Beginning snapshot ({0:02d}, {1:02d})".format(ssCycle, ssNode)) dbi.loadState(ssCycle, ssNode) # need to update reactor power after the database load # this is normally handled in operator._cycleLoop self.r.core.p.power = self.cs["power"] self.r.core.p.powerDensity = self.cs["powerDensity"] halt = self.interactAllBOC(self.r.p.cycle) if halt: break # database is excluded since it writes after coupled self.interactAllEveryNode(ssCycle, ssNode, excludedInterfaceNames=excludeDB) self._performTightCoupling(ssCycle, ssNode, writeDB=False) # tight coupling is done, now write to DB dbi.writeDBEveryNode() self.interactAllEOC(self.r.p.cycle) # run things that happen at EOL, like reports, plotters, etc. self.interactAllEOL(excludedInterfaceNames=excludeDB) dbi.closeDB() # dump the database to file runLog.important("Done with ARMI snapshots case.") @staticmethod def setStateToDefault(cs): """Update the state of ARMI to fit the kind of run this operator manages.""" from armi.operators.runTypes import RunTypes return cs.modified(newSettings={"runType": RunTypes.STANDARD}) @property def atEOL(self): """ Notes ----- This operator's atEOL method behaves very differently than other operators. The idea is that snapshots don't really have an EOL since they are independent of chrological order and may or may not contain the last time node from the load database. """ return False ================================================ FILE: armi/operators/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for built-in operators.""" ================================================ FILE: armi/operators/tests/test_operatorSnapshots.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for operator snapshots.""" import unittest from pathlib import Path from unittest.mock import Mock from armi import settings from armi.bookkeeping.db.databaseInterface import DatabaseInterface from armi.operators import getOperatorClassFromSettings from armi.operators.runTypes import RunTypes from armi.operators.snapshots import OperatorSnapshots from armi.settings.fwSettings.globalSettings import CONF_GROW_TO_FULL_CORE_AFTER_LOAD from armi.testing import TESTING_ROOT, loadTestReactor from armi.utils.directoryChangers import TemporaryDirectoryChanger class TestOperatorSnapshots(unittest.TestCase): @classmethod def setUpClass(cls): newSettings = {} newSettings["axialExpansion"] = False newSettings["db"] = True newSettings["genReports"] = False newSettings["summarizeAssemDesign"] = False newSettings["runType"] = "Standard" newSettings["verbosity"] = "error" newSettings["branchVerbosity"] = "error" newSettings["nCycles"] = 1 newSettings["dumpSnapshot"] = ["000000", "008000", "016005"] o1, cls.r = loadTestReactor( customSettings=newSettings, inputFileName="smallestTestReactor/armiRunSmallest.yaml", ) cls.o = OperatorSnapshots(o1.cs) cls.o.r = cls.r # let's disable all the interfaces, to save time allInterfaces = [ "database", "fissionProducts", "fuelHandler", "history", "main", "memoryProfiler", "snapshot", "xsGroups", ] for i in allInterfaces: cls.o.disabledInterfaces.append(i) # mock a Database Interface cls.dbi = DatabaseInterface(cls.r, o1.cs) cls.dbi.loadState = lambda c, n: None cls.dbi.writeDBEveryNode = lambda: None cls.dbi.closeDB = lambda: None cls.o.createInterfaces() def test_atEOL(self): self.assertFalse(self.o.atEOL) def test_setStateToDefault(self): cs0 = self.o.cs.modified(newSettings={"runType": RunTypes.SNAPSHOTS}) self.assertEqual(cs0["runType"], RunTypes.SNAPSHOTS) cs = self.o.setStateToDefault(cs0) self.assertEqual(cs["runType"], RunTypes.STANDARD) def test_mainOperate(self): # Mock some tooling that we aren't testing self.o.interactBOL = lambda: None self.o.getInterface = lambda s: (self.dbi if s == "database" else super().getInterface(s)) self.assertEqual(self.r.core.p.power, 0.0) self.o._mainOperate() self.assertEqual(self.r.core.p.power, 1000000.0) def test_createInterfacesDisabled(self): # If someone adds an interface, we don't want this test to break, so let's do >6 self.assertGreater(len(self.o.interfaces), 6) for i in self.o.interfaces: self.assertFalse(i.enabled()) class TestOperatorSnapshotsSettings(unittest.TestCase): def test_getOperatorClassFromSettings(self): cs = settings.Settings() cs = cs.modified(newSettings={"runType": RunTypes.SNAPSHOTS}) o = getOperatorClassFromSettings(cs) self.assertEqual(o, OperatorSnapshots) class TestSnapshotFullCoreExpan(unittest.TestCase): """Test that a snapshot operator can do full core analysis with a 1/3 core DB.""" DB_PATH = Path("test_operator_snapshot_full_core_expansion.h5") @classmethod def setUpClass(cls): cls.td = TemporaryDirectoryChanger() cls.td.__enter__() o, cls.symmetricReactor = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml" ) dbi: DatabaseInterface = next(filter(lambda i: isinstance(i, DatabaseInterface), o.interfaces)) dbi.initDB(cls.DB_PATH) dbi.writeDBEveryNode() dbi.closeDB() cls.snapshotSettings: settings.Settings = o.cs.modified( newSettings={"runType": RunTypes.SNAPSHOTS, "reloadDBName": str(cls.DB_PATH)} ) @classmethod def tearDownClass(cls): cls.DB_PATH.unlink() cls.td.__exit__(None, None, None) def test_fullCoreFromThirdCore(self): self.assertFalse(self.symmetricReactor.core.isFullCore) cs = self.snapshotSettings.modified( newSettings={CONF_GROW_TO_FULL_CORE_AFTER_LOAD: True, "dumpSnapshot": ["0000"]} ) o = getOperatorClassFromSettings(cs)(cs) self.assertIsInstance(o, OperatorSnapshots) o.r = self.symmetricReactor # Just want Database interface not history tracker not reporting not etc. o.addInterface(DatabaseInterface(o.r, o.cs)) # Mock interactAllBOC so we don't do iteract every nodes # We just want to trigger the re-attachment of the loaded reactor o.interactAllBOC = Mock(return_value=True) o.operate() self.assertTrue(o.r.core.isFullCore) ================================================ FILE: armi/operators/tests/test_operators.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for operators.""" import collections import io import os import sys import unittest from contextlib import contextmanager from unittest.mock import patch from armi import settings from armi.bookkeeping.db.databaseInterface import DatabaseInterface from armi.interfaces import Interface, TightCoupler from armi.operators.operator import Operator from armi.physics.neutronics.globalFlux.globalFluxInterface import ( GlobalFluxInterfaceUsingExecuters, ) from armi.reactor.reactors import Core, Reactor from armi.reactor.tests import test_reactors from armi.settings.caseSettings import Settings from armi.settings.fwSettings.globalSettings import ( CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION, CONF_DEFERRED_INTERFACE_NAMES, CONF_DEFERRED_INTERFACES_CYCLE, CONF_RUN_TYPE, CONF_TIGHT_COUPLING, CONF_TIGHT_COUPLING_SETTINGS, ) from armi.tests import mockRunLogs from armi.utils import directoryChangers from armi.utils.directoryChangers import TemporaryDirectoryChanger class InterfaceA(Interface): purpose = "A" name = "First" class InterfaceB(InterfaceA): """Dummy Interface that extends A.""" purpose = "A" name = "Second" class InterfaceC(Interface): purpose = "A" name = "Third" class OperatorTests(unittest.TestCase): def setUp(self): self.o, self.r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") self.activeInterfaces = [ii for ii in self.o.interfaces if ii.enabled()] def test_operatorData(self): """Test that the operator has input data, a reactor model. .. test:: The Operator includes input data and the reactor data model. :id: T_ARMI_OPERATOR_COMM :tests: R_ARMI_OPERATOR_COMM """ self.assertEqual(self.o.r, self.r) self.assertEqual(type(self.o.cs), settings.Settings) @patch("armi.operators.Operator._interactAll") def test_orderedInterfaces(self, interactAll): """Test the default interfaces are in an ordered list, looped over at each time step. .. test:: An ordered list of interfaces are run at each time step. :id: T_ARMI_OPERATOR_INTERFACES :tests: R_ARMI_OPERATOR_INTERFACES .. test:: Interfaces are run at BOC, EOC, and at time points between. :id: T_ARMI_INTERFACE :tests: R_ARMI_INTERFACE .. test:: When users set the time discretization, it is enforced. :id: T_ARMI_FW_HISTORY2 :tests: R_ARMI_FW_HISTORY """ # an ordered list of interfaces self.assertGreater(len(self.o.interfaces), 0) for i in self.o.interfaces: self.assertTrue(isinstance(i, Interface)) # make sure we only iterate one time step self.o.cs = self.o.cs.modified(newSettings={"nCycles": 2}) self.r.p.cycle = 1 # mock some stdout logging of what's happening when def sideEffect(node, activeInts, *args, **kwargs): print(node) print(activeInts) interactAll.side_effect = sideEffect # run the operator through one cycle origout = sys.stdout try: out = io.StringIO() sys.stdout = out self.o.operate() finally: sys.stdout = origout # grab the log data log = out.getvalue() # verify we have some common interfaces listed self.assertIn("main", log) self.assertIn("fuelHandler", log) self.assertIn("fissionProducts", log) self.assertIn("history", log) self.assertIn("snapshot", log) # At the first time step, we get one ordered list of interfaces interfaces = log.split("BOL")[1].split("EOL")[0].split(",") self.assertGreater(len(interfaces), 0) for i in interfaces: self.assertIn("Interface", i) # verify the various time nodes are hit in order timeNodes = ["BOL", "BOC"] + ["EveryNode"] * 3 + ["EOC", "EOL"] for node in timeNodes: self.assertIn(node, log) log = node.join(log.split(node)[1:]) def test_addInterfaceSubclassCollision(self): cs = settings.Settings() interfaceA = InterfaceA(self.r, cs) interfaceB = InterfaceB(self.r, cs) self.o.addInterface(interfaceA) # 1) Adds B and gets rid of A self.o.addInterface(interfaceB) self.assertEqual(self.o.getInterface("Second"), interfaceB) self.assertEqual(self.o.getInterface("First"), None) # 2) Now we have B which is a subclass of A, # we want to not add A (but also not have an error) self.o.addInterface(interfaceA) self.assertEqual(self.o.getInterface("Second"), interfaceB) self.assertEqual(self.o.getInterface("First"), None) # 3) Also if another class not a subclass has the same purpose, # raise an error interfaceC = InterfaceC(self.r, cs) self.assertRaises(RuntimeError, self.o.addInterface, interfaceC) # 4) Check adding a different purpose Interface interfaceC.purpose = "C" self.o.addInterface(interfaceC) self.assertEqual(self.o.getInterface("Second"), interfaceB) self.assertEqual(self.o.getInterface("Third"), interfaceC) def test_interfaceIsActive(self): self.o, _r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") self.assertTrue(self.o.interfaceIsActive("main")) self.assertFalse(self.o.interfaceIsActive("Fake-o")) def test_getActiveInterfaces(self): """Ensure that the right interfaces are returned for a given interaction state.""" self.o.cs[CONF_DEFERRED_INTERFACES_CYCLE] = 1 self.o.cs[CONF_DEFERRED_INTERFACE_NAMES] = ["history"] # Test invalid inputs. with self.assertRaises(ValueError): self.o.getActiveInterfaces("notAnInterface") # Test BOL interfaces = self.o.getActiveInterfaces("BOL", excludedInterfaceNames=("xsGroups")) interfaceNames = [interface.name for interface in interfaces] self.assertNotIn("xsGroups", interfaceNames) self.assertNotIn("history", interfaceNames) # Test BOC interfaces = self.o.getActiveInterfaces("BOC", cycle=0) interfaceNames = [interface.name for interface in interfaces] self.assertNotIn("history", interfaceNames) # Test EveryNode and EOC interfaces = self.o.getActiveInterfaces("EveryNode", excludedInterfaceNames=("xsGroups")) interfaceNames = [interface.name for interface in interfaces] self.assertIn("history", interfaceNames) self.assertNotIn("xsGroups", interfaceNames) # Test Coupled interfaces = self.o.getActiveInterfaces("Coupled") for test, ref in zip(interfaces, self.activeInterfaces): self.assertEqual(test.name, ref.name) # Test EOL interfaces = self.o.getActiveInterfaces("EOL") self.assertEqual(interfaces[-1].name, "main") # Test excludedInterfaceNames excludedInterfaceNames = ["fissionProducts", "fuelHandler", "xsGroups"] interfaces = self.o.getActiveInterfaces("EOL", excludedInterfaceNames=excludedInterfaceNames) interfaceNames = [ii.name for ii in interfaces] self.assertIn("history", interfaceNames) self.assertIn("main", interfaceNames) self.assertIn("snapshot", interfaceNames) self.assertNotIn("fissionProducts", interfaceNames) self.assertNotIn("fuelHandler", interfaceNames) self.assertNotIn("xsGroups", interfaceNames) def test_loadStateError(self): """The ``loadTestReactor()`` test tool does not have any history in the DB to load from.""" # a first, simple test that this method fails correctly with self.assertRaises(RuntimeError): self.o.loadState(0, 1) def test_setStateToDefault(self): # reset the runType for testing self.assertEqual(self.o.cs[CONF_RUN_TYPE], "Standard") self.o.cs = self.o.cs.modified(newSettings={"runType": "fake"}) self.assertEqual(self.o.cs[CONF_RUN_TYPE], "fake") # validate the method works cs = self.o.setStateToDefault(self.o.cs) self.assertEqual(cs[CONF_RUN_TYPE], "Standard") @patch("shutil.copy") @patch("os.listdir") def test_snapshotRequest(self, fakeDirList, fakeCopy): fakeDirList.return_value = ["mccAA.inp"] with TemporaryDirectoryChanger(): with mockRunLogs.BufferLog() as mock: self.o.snapshotRequest(0, 1) self.assertIn("ISOTXS-c0", mock.getStdout()) self.assertIn("DIF3D input for snapshot", mock.getStdout()) self.assertIn("Shuffle logic for snapshot", mock.getStdout()) self.assertIn("Loading definition for snapshot", mock.getStdout()) self.assertTrue(os.path.exists("snapShot0_1")) with TemporaryDirectoryChanger(): with mockRunLogs.BufferLog() as mock: self.o.snapshotRequest(0, 2, iteration=1) self.assertIn("ISOTXS-c0", mock.getStdout()) self.assertIn("DIF3D input for snapshot", mock.getStdout()) self.assertIn("Shuffle logic for snapshot", mock.getStdout()) self.assertIn("Loading definition for snapshot", mock.getStdout()) self.assertTrue(os.path.exists("snapShot0_2")) class TestCreateOperator(unittest.TestCase): def test_createOperator(self): """Test that an operator can be created from settings. .. test:: Create an operator from settings. :id: T_ARMI_OPERATOR_SETTINGS :tests: R_ARMI_OPERATOR_SETTINGS """ cs = settings.Settings() o = Operator(cs) # high-level items self.assertTrue(isinstance(o, Operator)) self.assertTrue(isinstance(o.cs, settings.Settings)) # validate some more nitty-gritty operator details come from settings burnStepsSetting = cs["burnSteps"] if type(burnStepsSetting) is not list: burnStepsSetting = [burnStepsSetting] self.assertEqual(o.burnSteps, burnStepsSetting) self.assertEqual(o.maxBurnSteps, max(burnStepsSetting)) powerFracsSetting = cs["powerFractions"] if powerFracsSetting: self.assertEqual(o.powerFractions, powerFracsSetting) else: self.assertEqual(o.powerFractions, [[1] * cs["burnSteps"]]) class TestTightCoupling(unittest.TestCase): def setUp(self): self.cs = settings.Settings() self.cs[CONF_TIGHT_COUPLING] = True self.o = Operator(self.cs) self.o.r = Reactor("empty", None) self.o.r.core = Core("empty") def test_getStepLengths(self): """Test the step lengths are correctly calculated, based on settings. .. test:: Users can control time discretization of the simulation through settings. :id: T_ARMI_FW_HISTORY0 :tests: R_ARMI_FW_HISTORY """ self.assertEqual(self.cs["nCycles"], 1) self.assertAlmostEqual(self.cs["cycleLength"], 365.242199) self.assertEqual(self.cs["burnSteps"], 4) self.assertEqual(len(self.o.stepLengths), 1) self.assertEqual(len(self.o.stepLengths[0]), 4) def test_couplingIsActive(self): """Ensure that ``cs[CONF_TIGHT_COUPLING]`` controls ``couplingIsActive``.""" self.assertTrue(self.o.couplingIsActive()) self.o.cs[CONF_TIGHT_COUPLING] = False self.assertFalse(self.o.couplingIsActive()) def test_performTightCoupling_Inactive(self): """Ensures no action by ``_performTightCoupling`` if ``cs[CONF_TIGHT_COUPLING] = false``.""" self.o.cs[CONF_TIGHT_COUPLING] = False self.o._performTightCoupling(0, 0, writeDB=False) self.assertEqual(self.o.r.core.p.coupledIteration, 0) def test_performTightCoupling_skip(self): """Ensure that cycles within ``cs[CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION]`` are skipped.""" self.o.cs[CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION] = [1] with mockRunLogs.BufferLog() as mock: self.o._performTightCoupling(1, 0, writeDB=False) self.assertIn("interactAllCoupled disabled this cycle", mock.getStdout()) self.assertEqual(self.o.r.core.p.coupledIteration, 0) def test_performTightCoupling_notConverged(self): """Ensure that the appropriate ``runLog.warning`` is addressed in tight coupling reaches max num of iters. .. test:: The tight coupling logic can fail if there is no convergence. :id: T_ARMI_OPERATOR_PHYSICS0 :tests: R_ARMI_OPERATOR_PHYSICS """ class NoConverge(TightCoupler): def isConverged(self, _val: TightCoupler._SUPPORTED_TYPES) -> bool: return False class InterfaceNoConverge(Interface): name = "NoConverge" def __init__(self, r, cs): super().__init__(r, cs) self.coupler = NoConverge(param="dummy", tolerance=None, maxIters=1) def getTightCouplingValue(self): return 0.0 self.o.addInterface(InterfaceNoConverge(None, self.o.cs)) with mockRunLogs.BufferLog() as mock: self.o._performTightCoupling(0, 0, writeDB=False) self.assertIn("have not converged! The maximum number of iterations", mock.getStdout()) def test_performTightCoupling_WriteDB(self): """Ensure a tight coupling iteration accours and that a DB WILL be written if requested.""" hasCouplingInteraction = 1 with directoryChangers.TemporaryDirectoryChanger(): with mockRunLogs.BufferLog() as mock: self.dbWriteForCoupling(writeDB=True) self.assertIn("Writing to database for statepoint:", mock.getStdout()) self.assertEqual(self.o.r.core.p.coupledIteration, hasCouplingInteraction) def test_performTightCoupling_NoWriteDB(self): """Ensure a tight coupling iteration accours and that a DB WILL NOT be written if requested.""" hasCouplingInteraction = 1 with directoryChangers.TemporaryDirectoryChanger(): with mockRunLogs.BufferLog() as mock: self.dbWriteForCoupling(writeDB=False) self.assertNotIn("Writing to database for statepoint:", mock.getStdout()) self.assertEqual(self.o.r.core.p.coupledIteration, hasCouplingInteraction) def dbWriteForCoupling(self, writeDB: bool): self.o.removeAllInterfaces() dbi = DatabaseInterface(self.o.r, self.o.cs) dbi.initDB(fName=self._testMethodName + ".h5") self.o.addInterface(dbi) self.o._performTightCoupling(0, 0, writeDB=writeDB) h5Contents = list(dbi.database.getH5Group(dbi.r).items()) if writeDB: self.assertTrue(h5Contents) else: self.assertFalse(h5Contents) dbi.database.close() def test_computeTightCouplingConvergence(self): """Ensure that tight coupling convergence can be computed and checked. Notes ----- - Assertion #1: ensure that the convergence of Keff, eps, is greater than 1e-5 (the prescribed convergence criteria) - Assertion #2: ensure that eps is (prevIterKeff - currIterKeff) """ prevIterKeff = 0.9 currIterKeff = 1.0 self.o.cs[CONF_TIGHT_COUPLING_SETTINGS] = {"globalFlux": {"parameter": "keff", "convergence": 1e-05}} globalFlux = GlobalFluxInterfaceUsingExecuters(self.o.r, self.o.cs) globalFlux.coupler.storePreviousIterationValue(prevIterKeff) self.o.addInterface(globalFlux) # set keff to some new value and compute tight coupling convergence self.o.r.core.p.keff = currIterKeff self.o._convergenceSummary = collections.defaultdict(list) self.assertFalse(self.o._checkTightCouplingConvergence([globalFlux])) self.assertAlmostEqual( globalFlux.coupler.eps, currIterKeff - prevIterKeff, ) class CyclesSettingsTests(unittest.TestCase): """Check that we can correctly access the various cycle settings from the operator.""" detailedCyclesSettings = """ metadata: version: uncontrolled settings: power: 1000000000.0 nCycles: 3 cycles: - name: startup sequence cumulative days: [1, 2, 3] power fractions: [0.1, 0.2, 0.3] availability factor: 0.1 - cycle length: 10 burn steps: 5 power fractions: [0.2, 0.2, 0.2, 0.2, 0] availability factor: 0.5 - name: prepare for shutdown step days: [3, R4] power fractions: [0.3, R4] runType: Standard """ def setUp(self): self.standaloneDetailedCS = Settings() self.standaloneDetailedCS.loadFromString(self.detailedCyclesSettings) self.detailedOperator = Operator(self.standaloneDetailedCS) def test_getPowerFractions(self): """Test that the power fractions are calculated correctly. .. test:: Test the powerFractions are retrieved correctly for multiple cycles. :id: T_ARMI_SETTINGS_POWER1 :tests: R_ARMI_SETTINGS_POWER """ powerFractionsSolution = [ [0.1, 0.2, 0.3], [0.2, 0.2, 0.2, 0.2, 0], [0.3, 0.3, 0.3, 0.3, 0.3], ] self.assertEqual(self.detailedOperator.powerFractions, powerFractionsSolution) self.detailedOperator._powerFractions = None self.assertEqual(self.detailedOperator.powerFractions, powerFractionsSolution) def test_getCycleNames(self): cycleNamesSolution = ["startup sequence", None, "prepare for shutdown"] self.assertEqual(self.detailedOperator.cycleNames, cycleNamesSolution) self.detailedOperator._cycleNames = None self.assertEqual(self.detailedOperator.cycleNames, cycleNamesSolution) def test_getAvailabilityFactors(self): """Check that the "availability factor" is correctly set from the "cycles" setting. .. test:: Users can manually control time discretization of the simulation. :id: R_ARMI_FW_HISTORY3 :tests: R_ARMI_FW_HISTORY """ availabilityFactorsSolution = [0.1, 0.5, 1] self.assertEqual(self.detailedOperator.availabilityFactors, availabilityFactorsSolution) self.detailedOperator._availabilityFactors = None self.assertEqual(self.detailedOperator.availabilityFactors, availabilityFactorsSolution) def test_getStepLengths(self): """Test that the manually-set, detailed time steps are retrievable. .. test:: Users can manually control time discretization of the simulation. :id: T_ARMI_FW_HISTORY1 :tests: R_ARMI_FW_HISTORY """ stepLengthsSolution = [ [1, 1, 1], [10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5], [3, 3, 3, 3, 3], ] # detailed step lengths can be set manually self.assertEqual(self.detailedOperator.stepLengths, stepLengthsSolution) self.detailedOperator._stepLength = None self.assertEqual(self.detailedOperator.stepLengths, stepLengthsSolution) # when doing detailed step information, we don't get step information from settings cs = self.detailedOperator.cs self.assertEqual(cs["nCycles"], 3) with self.assertRaises(ValueError): cs["cycleLength"] with self.assertRaises(ValueError): cs["burnSteps"] def test_getCycleLengths(self): """Check that the "cycle length" is correctly set from the "cycles" setting. .. test:: Users can manually control time discretization of the simulation. :id: R_ARMI_FW_HISTORY4 :tests: R_ARMI_FW_HISTORY """ cycleLengthsSolution = [30, 10, 15] self.assertEqual(self.detailedOperator.cycleLengths, cycleLengthsSolution) self.detailedOperator._cycleLengths = None self.assertEqual(self.detailedOperator.cycleLengths, cycleLengthsSolution) def test_getBurnSteps(self): """Check that the "burn steps" is correctly set from the "cycles" setting. .. test:: Users can manually control time discretization of the simulation. :id: R_ARMI_FW_HISTORY5 :tests: R_ARMI_FW_HISTORY """ burnStepsSolution = [3, 5, 5] self.assertEqual(self.detailedOperator.burnSteps, burnStepsSolution) self.detailedOperator._burnSteps = None self.assertEqual(self.detailedOperator.burnSteps, burnStepsSolution) def test_getMaxBurnSteps(self): """Check that the max of the "burn steps" is correctly set from the "cycles" setting. .. test:: Users can manually control time discretization of the simulation. :id: R_ARMI_FW_HISTORY6 :tests: R_ARMI_FW_HISTORY """ maxBurnStepsSolution = 5 self.assertEqual(self.detailedOperator.maxBurnSteps, maxBurnStepsSolution) self.detailedOperator._maxBurnSteps = None self.assertEqual(self.detailedOperator.maxBurnSteps, maxBurnStepsSolution) class TestInterfaceAndEventHeaders(unittest.TestCase): @classmethod def setUpClass(cls): cls.o, cls.r = test_reactors.loadTestReactor( inputFileName="smallestTestReactor/armiRunSmallest.yaml", customSettings={CONF_TIGHT_COUPLING: True}, ) cls.r.p.cycle = 0 cls.r.p.timeNode = 1 cls.r.p.time = 11.01 cls.r.core.p.coupledIteration = 7 def test_expandCycleAndTimeNodeArgs_Empty(self): """When cycleNodeInfo should be an empty string.""" for task in ["Init", "BOL", "EOL"]: self.assertEqual(self.o._expandCycleAndTimeNodeArgs(interactionName=task), "") def test_expandCycleAndTimeNodeArgs_Cycle(self): """When cycleNodeInfo should return only the cycle.""" for task in ["BOC", "EOC"]: self.assertEqual( self.o._expandCycleAndTimeNodeArgs(interactionName=task), f" - timestep: cycle {self.r.p.cycle}", ) def test_expandCycleAndTimeNodeArgs_EveryNode(self): """When cycleNodeInfo should return the cycle and node.""" self.assertEqual( self.o._expandCycleAndTimeNodeArgs(interactionName="EveryNode"), f" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, year {'{0:.2f}'.format(self.r.p.time)}", ) def test_expandCycleAndTimeNodeArgs_Coupled(self): """When cycleNodeInfo should return the cycle, node, and iteration number.""" self.assertEqual( self.o._expandCycleAndTimeNodeArgs(interactionName="Coupled"), ( f" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, year " f"{'{0:.2f}'.format(self.r.p.time)} - iteration {self.r.core.p.coupledIteration}" ), ) class OperatorRestartTests(unittest.TestCase): """Tests on the behavior of the interactAllRestart hook.""" @classmethod def setUpClass(cls): cls.START_CYCLE = 4 cls.START_NODE = 2 cls.o, cls.r = test_reactors.loadTestReactor( inputFileName="smallestTestReactor/armiRunSmallest.yaml", customSettings={ "loadStyle": "fromDB", "startCycle": cls.START_CYCLE, "startNode": cls.START_NODE, # Need more cycles than we're restarting "nCycles": cls.START_CYCLE + 3, }, ) def setUp(self): self.dbi: DatabaseInterface = self.o.getInterface("database") self.assertIsNotNone(self.dbi, msg="Database interface required for test.") def test_nominalRestart(self): """Make sure the database interface is uniquely called and the interactRestart is not called for DB. We need to make sure the database interface loads the reactor before every other interface goes first. But then, when all the interfaces get their chance to ``interactRestart``, the database interface does not. Since it did it's work already. """ mainInterface: Interface = self.o.getInterface(name="main") self.assertIsNotNone(mainInterface) with ( patch.object(self.dbi, "interactRestart") as dbInteractRestart, patch.object(self.dbi, "prepRestartRun") as dbPrepRestart, patch.object(mainInterface, "interactRestart") as mainIfcRestart, ): self.o.interactAllRestart(self.dbi) dbPrepRestart.assert_called_once() # Skip DatabaseInterface.interactRestart since we jumped ahead and "restarted" with prepRestartRun dbInteractRestart.assert_not_called() # Ensure we called other interfaces restarts at the previous node mainIfcRestart.assert_called_once_with( (self.START_CYCLE, self.START_NODE), (self.START_CYCLE, self.START_NODE - 1) ) self.assertEqual(self.o.r.p.cycle, self.START_CYCLE) self.assertEqual(self.o.r.p.timeNode, self.START_NODE) @contextmanager def patchCS(self, **kwargs): """Patch the case settings, restoring at the end of the context block. Kwargs are key: value pairs for settings to be modified. Can't use ``patch.dict`` because case settings don't have at least a ``.copy`` method that ``patch.dict`` expects. """ cs = self.o.cs old = {k: cs[k] for k in kwargs} for k, v in kwargs.items(): cs[k] = v yield for k, v in old.items(): cs[k] = v def test_callPreviousEOC(self): """When restarting at the start of the cycle, make sure we call the previous interactEOC for all interfaces.""" with ( self.patchCS(startNode=0), patch.object(self.o, "interactAllEOC") as patchEOC, # Don't want to attempt to load a ficticious DB patch.object(self.dbi, "prepRestartRun"), ): self.o.interactAllRestart(self.dbi) patchEOC.assert_called_once_with(self.START_CYCLE - 1) def test_noDatabaseNoRestart(self): """Ensure there must be a database interface responsible for loading from database.""" with self.assertRaisesRegex(ValueError, "No database interface"): self.o.interactAllRestart(None) ================================================ FILE: armi/physics/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The Physics Packages are where the magic of physical simulation happens in an ARMI run. .. tip:: The vast majority of physics capabilities are provided by :py:mod:`Plugins <armi.plugins>`. Thus, this package contains some fairly generic physics-related code that belongs in a reactor analysis framework. Besides providing some generic physics-related capabilities, this package also provides a recommended *physics namespace* for all ARMI plugins to follow. The physics namespaces we've come up with is as follows: fuelCycle Fuel management, fabrication, reprocessing, mass flow, etc. neutronics Radiation transport, nuclear depletion, nuclear cross sections, reactivity coefficients, kinetics, etc. safety Systems analysis in accident scenarios, source term, dose conversion, etc. fuelPerformance Changes in fuel systems vs. burnup and time, including thermophysical modeling of fuel, cladding, fuel salt, etc. thermalHydraulics Heat transfer, fluid flow, pressure drop, power cycles, you name it. economics Economic modeling and cost estimation. .. important:: Yeah, we know that it is kind of a stretch to call economics a kind of physics. We have found it very useful to use `Python namespace packages <https://packaging.python.org/guides/packaging-namespace-packages/>`_ to mirror this exact namespace in physics plugins that are outside of the ARMI framework. Thus, there can be two totally separate plugins:: IAEA/ physics/ neutronics/ superSourceTerm/ __init__.py plugin.py and:: IAEA/ physics/ economics/ magwoodsbrain/ __init__.py plugin.py And then the associated ARMI-based app could import both ``IAEA.physics.neutronics.superSourceTerm`` and ``IAEA.physics.economics.magwoodsbrain``. Having a consistency in namespace along these lines is quite nice. """ ================================================ FILE: armi/physics/constants.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Some constants.""" DPA_CROSS_SECTIONS = {} """Multigroup dpa cross sections. Displacements per atom are correlated to material damage. Notes ----- This data structure can be updated by plugins with design-specific dpa data. :meta hide-value: """ # The following are multigroup DPA XS for EBR II. They were generated using an ultra hard MCC spectrum # that calculated buckling and had an initial keff of 2. Even so, Inc600/625/X750 33 group dpa XS values are less than # 5% for all but 5 energy groups. The maximum deviation is 18% in INC625 between .192 and .331 MeV. DPA_CROSS_SECTIONS["dpa_EBRII_HT9"] = [ 2.34569e03, 1.92004e03, 1.58640e03, 1.25670e03, 8.24006e02, 5.20750e02, 3.96146e02, 3.28749e02, 2.06149e02, 1.42452e02, 1.15189e02, 6.60183e01, 8.23281e01, 1.31771e01, 1.94552e01, 3.33861e01, 1.27099e01, 6.20510e00, 3.58651e00, 3.74080e00, 4.52607e-01, 1.62650e-01, 1.24318e-01, 1.56210e-01, 1.89583e-01, 2.36694e-01, 2.97445e-01, 3.92136e-01, 5.07320e-01, 6.81782e-01, 1.07978e00, 2.43258e00, 4.35563e00, ] DPA_CROSS_SECTIONS["dpa_EBRII_INC600"] = [ 2.57204e03, 2.11682e03, 1.64031e03, 1.21591e03, 8.69816e02, 6.47128e02, 4.25248e02, 3.59778e02, 2.89208e02, 1.89443e02, 1.55667e02, 1.22460e02, 8.25721e01, 1.15026e02, 9.90510e01, 2.42252e01, 1.73504e01, 9.34915e00, 5.67409e00, 3.13557e00, 5.95081e-01, 1.95832e-01, 1.93791e-01, 2.52465e-01, 3.11159e-01, 3.71897e-01, 4.95951e-01, 6.50177e-01, 8.39344e-01, 1.12626e00, 1.78500e00, 4.02021e00, 7.19616e00, ] DPA_CROSS_SECTIONS["dpa_EBRII_INC625"] = [ 2.49791e03, 2.05899e03, 1.60441e03, 1.20292e03, 8.68237e02, 6.39219e02, 4.16975e02, 3.50177e02, 2.74491e02, 1.89846e02, 1.53178e02, 1.16379e02, 7.35708e01, 1.05281e02, 8.96142e01, 2.58537e01, 1.91218e01, 8.44318e00, 5.16493e00, 2.67000e00, 5.66731e-01, 2.20242e-01, 1.92435e-01, 3.31226e-01, 3.69475e-01, 5.24326e-01, 4.78120e-01, 6.22211e-01, 8.15999e-01, 1.07725e00, 1.70732e00, 3.84540e00, 6.88285e00, ] DPA_CROSS_SECTIONS["dpa_EBRII_INCX750"] = [ 2.59270e03, 2.13361e03, 1.65837e03, 1.23739e03, 8.86458e02, 6.51012e02, 4.27294e02, 3.58449e02, 2.88178e02, 1.88428e02, 1.56886e02, 1.27132e02, 8.89576e01, 1.31703e02, 1.04350e02, 2.55248e01, 1.77532e01, 9.43101e00, 5.60558e00, 3.06838e00, 5.85632e-01, 1.90347e-01, 1.89737e-01, 2.50070e-01, 3.08765e-01, 3.69079e-01, 4.92257e-01, 6.45369e-01, 8.33181e-01, 1.11802e00, 1.77196e00, 3.98945e00, 7.13947e00, ] DPA_CROSS_SECTIONS["dpa_EBRII_PE16"] = [ 2.47895e03, 2.03583e03, 1.61943e03, 1.23864e03, 8.58439e02, 5.95879e02, 4.10632e02, 3.42948e02, 2.49940e02, 1.69919e02, 1.39511e02, 1.00171e02, 8.21254e01, 7.94117e01, 6.73353e01, 2.84413e01, 1.61127e01, 7.13145e00, 4.59314e00, 3.12973e00, 5.17916e-01, 1.51560e-01, 1.56357e-01, 2.37675e-01, 2.81173e-01, 3.65433e-01, 4.12907e-01, 5.40601e-01, 7.03084e-01, 9.37963e-01, 1.48726e00, 3.34954e00, 5.99536e00, ] ================================================ FILE: armi/physics/executers.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Executors are useful for having a standard way to run physics calculations. They may involve external codes (with inputs/execution/output) or in-memory data pathways. """ import hashlib import os from armi import runLog from armi.context import MPI_RANK, getFastPath from armi.utils import directoryChangers, pathTools class ExecutionOptions: """ A data structure representing all options needed for a physics kernel. .. impl:: Options for executing external calculations. :id: I_ARMI_EX0 :implements: R_ARMI_EX Implements a basic container to hold and report options to be used in the execution of an external code (see :need:`I_ARMI_EX1`). Options are stored as instance attributes and can be dumped as a string using :py:meth:`~armi.physics.executers.ExecutionOptions.describe`, which will include the name and value of all public attributes of the instance. Also facilitates the ability to execute parallel instances of a code by providing the ability to resolve a ``runDir`` that is aware of the executing MPI rank. This is done via :py:meth:`~armi.physics.executers.ExecutionOptions.setRunDirFromCaseTitle`, where the user passes in a ``caseTitle`` string, which is hashed and combined with the MPI rank to provide a unique directory name to be used by each parallel instance. Attributes ---------- inputFile : str Name of main input file. Often passed to stdin of external code. outputFile : str Name of main output file. Often the stdout of external code. extraInputFiles : list of tuples (sourceName, destName) pairs of file names that will be brought from the working dir into the runDir. Allows renames while in transit. extraOutputFiles : list of tuples (sourceName, destName) pairs of file names that will be extracted from the runDir to the working dir executablePath : str Path to external executable to run (if external code is used) runDir : str Path on running system where the run will take place. This is often used to ensure external codes that use hard-drive disk space run on a local disk rather than a shared network drive workingDir : str Path on system where results will be placed after the run. This is often a shared network location. Auto-applied during execution by default. label : str A name for the run that may be used as a prefix for input/output files generated. interface : str A name for the interface calling the Executer that may be used to organize the input/output files generated within sub-folders under the working directory. savePhysicsFiles : bool Dump the physics kernel I/O files from the execution to a dedicated directory that will not be overwritten so they will be available after the run. copyOutput : bool Copy the output from running the executable back to the working directory. applyResultsToReactor : bool Update the in-memory reactor model with results upon completion. Set to False when information from a run is needed for auxiliary purposes rather than progressing the reactor model. """ def __init__(self, label=None): self.inputFile = None self.outputFile = None self.extraInputFiles = [] self.extraOutputFiles = [] self.executablePath = None self.runDir = None self.workingDir = None self.label = label self.interfaceName = None self.applyResultsToReactor = True self.paramsToScaleSubset = None self.savePhysicsFiles = False self.copyOutput = True def __repr__(self): return f"<{self.__class__.__name__}: {self.label}>" def fromUserSettings(self, cs): """Set options from a particular Settings object.""" raise NotImplementedError() def fromReactor(self, reactor): """Set options from a particular reactor object.""" raise NotImplementedError() def resolveDerivedOptions(self): """Called by executers right before executing.""" def setRunDirFromCaseTitle(self, caseTitle: str) -> None: """ Set run directory derived from case title and label. This is optional (you can set runDir to whatever you want). If you use this, you will get a relatively consistent naming convention for your fast-path folders. """ # This creates a hash of the case title plus the label # to shorten the running directory and to avoid path length # limitations on the OS. caseString = f"{caseTitle}-{str(self.label)}".encode("utf-8") caseTitleHash = str(hashlib.sha1(caseString).hexdigest())[:8] self.runDir = os.path.join(getFastPath(), f"{caseTitleHash}-{MPI_RANK}") def describe(self) -> str: """Make a string summary of all options.""" lines = ["Options summary:", "----------------"] for key, val in sorted(self.__dict__.items()): if not key.startswith("_"): lines.append(f" {key:40s}{str(val)[:80]:80s}") return "\n".join(lines) class Executer: """ Short-lived object that coordinates a calculation step and updates a reactor. Notes ----- This is deliberately **not** a :py:class:`~mpiActions.MpiAction`. Thus, Executers can run as potentially multiple steps in a parent (parallelizable ) MpiAction or in other flexible ways. This is intended to maximize reusability. """ def __init__(self, options, reactor): self.options = options self.r = reactor self.dcType = directoryChangers.TemporaryDirectoryChanger def run(self): """ Run the executer steps. This should use the current state of the reactor as input, perform some kind of calculation, and update the reactor with the output. """ raise NotImplementedError() class DefaultExecuter(Executer): """ An Executer that uses a common run sequence. This sequence has been found to be relatively common in many externally-executed physics codes. It is here for convenience but is not required. The sequence look like: * Choose modeling options (either from the global run settings input or dictated programmatically) * Apply geometry transformations to the ARMI Reactor as needed * Build run-specific working directory * Write input file(s) * Put specific input files and libs in run directory * Run the analysis (external execution, or not) * Process output while still in run directory * Check error conditions * Move desired output files back to main working directory * Clean up run directory * Un-apply geometry transformations as needed * Update ARMI data model as desired .. impl:: Default tool for executing external calculations. :id: I_ARMI_EX1 :implements: R_ARMI_EX Facilitates the execution of external calculations by accepting ``options`` (an :py:class:`~armi.physics.executers.ExecutionOptions` object) and providing methods that build run directories and execute a code based on the values in ``options``. The :py:meth:`~armi.physics.executers.DefaultExecuter.run` method will first resolve any derived options in the ``options`` object and check if the specified ``executablePath`` option is valid, raising an error if not. If it is, preparation work for executing the code is performed, such as performing any geometry transformations specified in subclasses or building the directories needed to save input and output files. Once the temporary working directory is created, the executer moves into it and runs the external code, applying any results from the run as specified in subclasses. Finally, any geometry perturbations that were performed are undone. """ def run(self): """ Run the executer steps. .. warning:: If a calculation requires anything different from what this method does, do not update this method with new complexity! Instead, simply make your own run sequence and/or class. This pattern is useful only in that it is fairly simple. By all means, do use ``DirectoryChanger`` and ``ExecuterOptions`` and other utilities. """ self.options.resolveDerivedOptions() runLog.debug(self.options.describe()) if self.options.executablePath and not os.path.exists(self.options.executablePath): raise IOError(f"Required executable `{self.options.executablePath}` not found for {self}") self._performGeometryTransformations() inputs, outputs = self._collectInputsAndOutputs() state = f"c{self.r.p.cycle}n{self.r.p.timeNode}" dirName = self.options.interfaceName or self.options.label if self.options.savePhysicsFiles: outputDir = os.path.join(pathTools.armiAbsPath(os.getcwd()), state, dirName) else: outputDir = pathTools.armiAbsPath(os.getcwd()) # must either write input to CWD for analysis and then copy to runDir # or not list it in inputs (for optimization) self.writeInput() with self.dcType( self.options.runDir, filesToMove=inputs, filesToRetrieve=outputs, outputPath=outputDir, ) as dc: self.options.workingDir = dc.initial self._updateRunDir(dc.destination) self._execute() output = self._readOutput() if self.options.applyResultsToReactor: output.apply(self.r) self._undoGeometryTransformations() self._updateAdditionalParameters() return output def _updateRunDir(self, directory): """ If a ``TemporaryDirectoryChanger`` is used, the ``runDir`` needs to be updated. If a ForcedCreationDirectoryChanger is used instead, nothing needs to be done. Parameters ---------- directory : str New path for runDir """ if self.dcType == directoryChangers.TemporaryDirectoryChanger: self.options.runDir = directory def _collectInputsAndOutputs(self): """ Get total lists of input and output files. If self.options.copyOutput is false, don't copy the main `outputFile` back from the working directory. In some ARMI runs, the executer can be run hundreds or thousands of times and generate many output files that aren't strictly necessary to keep around. One can save space by choosing not to copy the outputs back in these special cases. ``extraOutputFiles`` are typically controlled by the subclass, so the copyOutput option only affects the main ``outputFile``. """ inputs = [self.options.inputFile] if self.options.inputFile else [] inputs.extend(self.options.extraInputFiles) if self.options.outputFile and self.options.copyOutput: outputs = [self.options.outputFile] else: outputs = [] outputs.extend(self.options.extraOutputFiles) return inputs, outputs def _execute(self) -> bool: runLog.extra( f"Executing {self.options.executablePath}\n" f"\tInput: {self.options.inputFile}\n" f"\tOutput: {self.options.outputFile}\n" f"\tWorking dir: {self.options.runDir}" ) return True def writeInput(self): pass def _readOutput(self): raise NotImplementedError() def _applyOutputToDataModel(self, output): pass def _performGeometryTransformations(self): pass def _undoGeometryTransformations(self): pass def _updateAdditionalParameters(self): pass ================================================ FILE: armi/physics/fuelCycle/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The fuel cycle package analyzes the various elements of nuclear fuel cycles from mining to disposal. Fuel cycle code can include things like: * In- and ex-core fuel management * Fuel chemistry * Fuel processing * Fuel fabrication * Fuel mass flow scenarios * And so on There is one included fuel cycle plugin: The Fuel Handler. The fuel handler plugin moves fuel around in a reactor. """ from armi import interfaces, operators, plugins from armi.operators import RunTypes from armi.physics.fuelCycle import fuelHandlers, settings ORDER = interfaces.STACK_ORDER.FUEL_MANAGEMENT class FuelHandlerPlugin(plugins.ArmiPlugin): """The built-in ARMI fuel management plugin.""" @staticmethod @plugins.HOOKIMPL def exposeInterfaces(cs): """ Implementation of the exposeInterfaces plugin hookspec. Notes ----- The interface may import user input modules to customize the actual fuel management. """ from armi.physics.neutronics.settings import CONF_NEUTRONICS_KERNEL fuelHandlerNeedsToBeActive = ( cs[settings.CONF_FUEL_HANDLER_NAME] or cs[settings.CONF_SHUFFLE_SEQUENCE_FILE] or (cs["eqDirect"] and cs["runType"].lower() == RunTypes.STANDARD.lower()) ) if not fuelHandlerNeedsToBeActive or "MCNP" in cs[CONF_NEUTRONICS_KERNEL]: return [] else: enabled = cs["runType"] != operators.RunTypes.SNAPSHOTS return [interfaces.InterfaceInfo(ORDER, fuelHandlers.FuelHandlerInterface, {"enabled": enabled})] @staticmethod @plugins.HOOKIMPL def defineSettings(): """Define settings for the plugin.""" return settings.getFuelCycleSettings() @staticmethod @plugins.HOOKIMPL def defineSettingsValidators(inspector): """Implementation of settings inspections for fuel cycle settings.""" return settings.getFuelCycleSettingValidators(inspector) ================================================ FILE: armi/physics/fuelCycle/assemblyRotationAlgorithms.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Algorithms used to rotate hex assemblies in a reactor core. Notes ----- These algorithms are defined in assemblyRotationAlgorithms.py, but they are used in: ``FuelHandler.outage()``. .. warning:: Nothing should go in this file, but rotation algorithms. """ import math from collections import defaultdict from armi import runLog from armi.physics.fuelCycle.hexAssemblyFuelMgmtUtils import ( getOptimalAssemblyOrientation, ) from armi.physics.fuelCycle.settings import CONF_ASSEM_ROTATION_STATIONARY from armi.physics.fuelCycle.utils import ( assemblyHasFuelPinBurnup, assemblyHasFuelPinPowers, ) from armi.reactor.assemblies import Assembly def _rotationNumberToRadians(rot: int) -> float: """Convert a rotation number to radians, assuming a HexAssembly.""" return rot * math.pi / 3 def buReducingAssemblyRotation(fh): """ Rotates all detail assemblies to put the highest bu pin in the lowest power orientation. Parameters ---------- fh : FuelHandler object A fully initialized FuelHandler object. See Also -------- simpleAssemblyRotation : an alternative rotation algorithm """ runLog.info("Algorithmically rotating assemblies to minimize burnup") # Store how we should rotate each assembly but don't perform the rotation just yet # Consider assembly A is shuffled to a new location and rotated. # Now, assembly B is shuffled to where assembly A used to be. We need to consider the # power profile of A prior to it's rotation to understand the power profile B may see. rotations: dict[int, list[Assembly]] = defaultdict(list) for aPrev in fh.moved: # If the assembly was out of the core, it will not have pin powers. # No rotation information to be gained. if aPrev.lastLocationLabel in Assembly.NOT_IN_CORE: continue aNow = fh.r.core.getAssemblyWithStringLocation(aPrev.lastLocationLabel) # An assembly in the SFP could have burnup but if it's coming from the load # queue it's totally fresh. Skip a check over all pins in the model if aNow.lastLocationLabel == Assembly.LOAD_QUEUE: continue # no point in rotation if there's no pin detail if assemblyHasFuelPinPowers(aPrev) and assemblyHasFuelPinBurnup(aNow): rot = getOptimalAssemblyOrientation(aNow, aPrev) rotations[rot].append(aNow) if fh.cs[CONF_ASSEM_ROTATION_STATIONARY]: for a in filter( lambda asm: asm not in fh.moved and assemblyHasFuelPinPowers(asm) and assemblyHasFuelPinBurnup(asm), fh.r.core, ): rot = getOptimalAssemblyOrientation(a, a) rotations[rot].append(a) nRotations = 0 for rot, assems in filter(lambda item: item[0], rotations.items()): # Radians used for the actual rotation. But a neater degrees print out is nice for logs radians = _rotationNumberToRadians(rot) degrees = round(math.degrees(radians), 3) for a in assems: runLog.important(f"Rotating assembly {a} {degrees} CCW.") a.rotate(radians) nRotations += 1 runLog.info(f"Rotated {nRotations} assemblies.") def simpleAssemblyRotation(fh): """ Rotate all pin-detail assemblies that were just shuffled by 60 degrees. Parameters ---------- fh : FuelHandler object A fully initialized FuelHandler object. Notes ----- Also, optionally rotate stationary (non-shuffled) assemblies if the setting is set. Obviously, only pin-detail assemblies can be rotated, because homogenized assemblies are isotropic. Examples -------- >>> simpleAssemblyRotation(fh) See Also -------- FuelHandler.outage : calls this method based on a user setting """ runLog.info("Rotating assemblies by 60 degrees") numRotated = 0 hist = fh.o.getInterface("history") rot = math.radians(60) for a in hist.getDetailAssemblies(): if a in fh.moved or fh.cs[CONF_ASSEM_ROTATION_STATIONARY]: a.rotate(rot) numRotated += 1 ring, pos = a.spatialLocator.getRingPos() runLog.extra("Rotating Assembly ({0},{1}) to Orientation {2}".format(ring, pos, 1)) runLog.extra("Rotated {0} assemblies".format(numRotated)) ================================================ FILE: armi/physics/fuelCycle/fuelHandlerFactory.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """factory for the FuelHandler.""" import importlib from pathlib import Path from armi.physics.fuelCycle import fuelHandlers from armi.physics.fuelCycle.settings import CONF_FUEL_HANDLER_NAME, CONF_SHUFFLE_LOGIC from armi.utils import directoryChangers, pathTools def fuelHandlerFactory(operator): """ Return an instantiated FuelHandler object based on user settings. The FuelHandler is expected to be a short-lived object that only lives for the cycle upon which it acts. At the next cycle, this factory will be called again to instantiate a new FuelHandler. """ cs = operator.cs fuelHandlerClassName = cs[CONF_FUEL_HANDLER_NAME] fuelHandlerModulePath = cs[CONF_SHUFFLE_LOGIC] if not fuelHandlerClassName: # give the default FuelHandler. This does not have an implemented outage, but # still offers moving capabilities. Useful when you just need to make explicit # moves but do not have a fully-defined fuel management input. return fuelHandlers.FuelHandler(operator) # User did request a custom fuel handler. We must go find and import it # from the input directory. with directoryChangers.DirectoryChanger(cs.inputDirectory, dumpOnException=False): try: modulePath = Path(fuelHandlerModulePath) if modulePath.exists() and modulePath.suffix == ".py": module = pathTools.importCustomPyModule(modulePath) else: module = importlib.import_module(fuelHandlerModulePath) if not hasattr(module, fuelHandlerClassName): raise KeyError( "The requested fuel handler object {0} is not " "found in the fuel management input file {1} from CWD {2}. " "Check input" "".format(fuelHandlerClassName, fuelHandlerModulePath, cs.inputDirectory) ) # instantiate the custom object fuelHandlerCls = getattr(module, fuelHandlerClassName) fuelHandler = fuelHandlerCls(operator) # also get getFactorList function from module level if it's there. # This is a legacy input option, getFactorList should now generally # be an method of the FuelHandler object if hasattr(module, "getFactorList"): # staticmethod binds the provided getFactorList function to the # fuelHandler object without passing the implicit self argument. # The __get__ pulls the actual function out from the descriptor. fuelHandler.getFactorList = staticmethod(module.getFactorList).__get__(fuelHandlerCls) except (IOError, ImportError): raise ValueError( "Either the file specified in the `shuffleLogic` setting ({}) or the " "fuel handler class name specified in the `fuelHandlerName` setting ({}) " "cannot be found. CWD is: {}. Update input.".format( fuelHandlerModulePath, fuelHandlerClassName, cs.inputDirectory ) ) return fuelHandler ================================================ FILE: armi/physics/fuelCycle/fuelHandlerInterface.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A place for the FuelHandler's Interface.""" from armi import interfaces, runLog from armi.physics.fuelCycle import fuelHandlerFactory, fuelHandlers from armi.physics.fuelCycle.settings import ( CONF_PLOT_SHUFFLE_ARROWS, CONF_RUN_LATTICE_BEFORE_SHUFFLING, CONF_SHUFFLE_LOGIC, CONF_SHUFFLE_SEQUENCE_FILE, ) from armi.utils import plotting class FuelHandlerInterface(interfaces.Interface): """ Moves and/or processes fuel in a Standard Operator. Fuel management traditionally runs at the beginning of a cycle, before power or temperatures have been updated. This allows pre-run fuel management steps for highly customized fuel loadings. In typical runs, no fuel management occurs at the beginning of the first cycle and the as-input state is left as is. .. impl:: ARMI provides a shuffle logic interface. :id: I_ARMI_SHUFFLE :implements: R_ARMI_SHUFFLE This interface allows for a user to define custom shuffle logic that modifies to the core model. Being based on the :py:class:`~armi.interfaces.Interface` class, it has direct access to the current core model. User logic is able to be executed from within the :py:meth:`~armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.manageFuel` method, which will use the :py:meth:`~armi.physics.fuelCycle.fuelHandlerFactory.fuelHandlerFactory` to search for a Python file or importable module specified by the case setting ``shuffleLogic``. If it exists, the fuel handler with name specified by the user via the ``fuelHandlerName`` case setting will be imported, and any actions in its ``outage`` method will be executed at the :py:meth:`~armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.interactBOC` hook. If no class with the name specified by the ``fuelHandlerName`` setting is found in the module or file specified by ``shuffleLogic``, an error is returned. See the user manual for how the custom shuffle logic module or file should be constructed. """ name = "fuelHandler" def __init__(self, r, cs): interfaces.Interface.__init__(self, r, cs) # assembly name key, (x, y) values. used for making shuffle arrows. self.oldLocations = {} # need order due to nature of moves but with fast membership tests self.moved = [] self.cycle = 0 @staticmethod def specifyInputs(cs): files = { cs.getSetting(settingName): [ cs[settingName], ] for settingName in [CONF_SHUFFLE_LOGIC, "explicitRepeatShuffles", CONF_SHUFFLE_SEQUENCE_FILE] if cs[settingName] } return files def interactBOC(self, cycle=None): """ Move and/or process fuel. Also, if requested, first have the lattice physics system update XS. """ # if lattice physics is requested, compute it here instead of after fuel management. # This enables XS to exist for branch searching, etc. mc2 = self.o.getInterface(purpose="latticePhysics") xsgm = self.o.getInterface("xsGroups") if mc2 and self.cs[CONF_RUN_LATTICE_BEFORE_SHUFFLING]: runLog.extra( f'Running {mc2} lattice physics before fuel management due to the "{CONF_RUN_LATTICE_BEFORE_SHUFFLING}"' " setting being activated." ) xsgm.interactBOC(cycle=cycle) mc2.interactBOC(cycle=cycle) if self.enabled() and ( self.cs["loadStyle"] != "fromDB" or self.cs["startNode"] == 0 or (self.cs["startCycle"] != cycle) ): # in restart cases, only do this if restarting at BOC to avoid duplicating shuffles # the logic to accomplish this is a bit long because we don't pass the # timeNode into interactBOC hooks. Otherwise it would be much easier # to determine when to call this or not self.manageFuel(cycle) def interactEOC(self, cycle=None): if self.r.excore.get("sfp") is not None: runLog.extra(f"There are {len(self.r.excore['sfp'])} assemblies in the Spent Fuel Pool") def interactEOL(self): """Make reports at EOL.""" self.makeShuffleReport() def manageFuel(self, cycle): """Perform the fuel management for this cycle.""" fh = fuelHandlerFactory.fuelHandlerFactory(self.o) fh.prepCore() fh.prepShuffleMap() # take note of where each assembly is located before the outage # for mapping after the outage self.r.core.locateAllAssemblies() shuffleFactors, _ = fh.getFactorList(cycle) fh.outage(shuffleFactors) # move the assemblies around if self.cs[CONF_PLOT_SHUFFLE_ARROWS]: arrows = fh.makeShuffleArrows() plotting.plotFaceMap( self.r.core, "percentBu", labelFmt=None, fName="{}.shuffles_{}.png".format(self.cs.caseTitle, self.r.p.cycle), shuffleArrows=arrows, ) def makeShuffleReport(self): """ Create a data file listing all the shuffles that occurred in a case. This can be used to export shuffling to an external code or to perform explicit repeat shuffling in a restart. It creates a ``*SHUFFLES.txt`` file based on the Reactor.moves structure See Also -------- readMoves : reads this file and parses it. """ fname = self.cs.caseTitle + "-SHUFFLES.txt" out = open(fname, "w") for cycle in range(self.cs["nCycles"]): # do cycle+1 because cycle 0 at t=0 isn't usually interesting # remember, we put cycle 0 in so we could do BOL branch searches. # This also syncs cycles up with external physics kernel cycles. out.write("Before cycle {0}:\n".format(cycle)) movesThisCycle = self.r.core.moves.get(cycle) if movesThisCycle is not None: for move in movesThisCycle: enrichLine = " ".join(["{0:.8f}".format(enrich) for enrich in move.enrichList]) if move.fromLoc in fuelHandlers.FuelHandler.DISCHARGE_LOCS: # this is a re-entering assembly. Give extra info so repeat shuffles can handle it out.write( "{0} moved to {1} with assembly type {2} ringPosCycle={4} with enrich list: {3}\n".format( move.fromLoc, move.toLoc, move.assemType, enrichLine, move.ringPosCycle, ) ) else: # skip extra info. regular expression in readMoves will handle it just fine. out.write( "{0} moved to {1} with assembly type {2} with enrich list: {3}\n".format( move.fromLoc, move.toLoc, move.assemType, enrichLine ) ) out.write("\n") out.close() def workerOperate(self, cmd): """Delegate mpi command to the fuel handler object.""" fh = fuelHandlerFactory.fuelHandlerFactory(self.o) return fh.workerOperate(cmd) ================================================ FILE: armi/physics/fuelCycle/fuelHandlers.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module handles fuel management operations such as shuffling, rotation, and fuel processing (in fluid systems). The :py:class:`FuelHandlerInterface` instantiates a ``FuelHandler``, which is typically a user-defined subclass the :py:class:`FuelHandler` object in custom shuffle-logic input files. Users point to the code modules with their custom fuel handlers using the ``shuffleLogic`` and ``fuelHandlerName`` settings, as described in :ref:`fuel-management-input`. These subclasses override ``chooseSwaps`` that determine the particular shuffling of a case. This module also handles repeat shuffles when doing a restart. """ # ruff: noqa: F401 import inspect import math import os import re from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional, Tuple import numpy as np from ruamel.yaml import YAML from ruamel.yaml.constructor import DuplicateKeyError from armi import runLog from armi.physics.fuelCycle import assemblyRotationAlgorithms as rotAlgos from armi.physics.fuelCycle.fuelHandlerFactory import fuelHandlerFactory from armi.physics.fuelCycle.fuelHandlerInterface import FuelHandlerInterface from armi.physics.fuelCycle.settings import ( CONF_ASSEMBLY_ROTATION_ALG, CONF_SHUFFLE_SEQUENCE_FILE, ) from armi.reactor import grids from armi.reactor.flags import Flags from armi.reactor.parameters import ParamLocation from armi.utils.customExceptions import InputError @dataclass(eq=True) class AssemblyMove: """Description of an individual shuffle move. Parameters ---------- fromLoc : str Original location label. toLoc : str Destination location label. enrichList : list[float] Axial U235 weight percent enrichment values for each block. assemType : str, optional Type of assembly that is moving. ringPosCycle : list[int], optional List of ints of length 3. For assembly retrieval from SFP. (ring, position, cycle) specifies the desired assembly resided at (ring, position) during specified cycle number. rotation : float, optional Degrees of manual rotation to apply after shuffling. """ fromLoc: str toLoc: str enrichList: List[float] = field(default_factory=list) assemType: Optional[str] = None ringPosCycle: Optional[list[int]] = None rotation: Optional[float] = None def __post_init__(self): """Perform some data checks.""" errorMsg = ( "invalid (ring, position, cycle) specified for assembly retrieval from SFP\n" f"expected: list of ints, len=3\nreceived: {self.ringPosCycle}" ) if self.ringPosCycle is not None: if not isinstance(self.ringPosCycle, list): raise TypeError(errorMsg) if len(self.ringPosCycle) != 3: raise ValueError(errorMsg) for val in self.ringPosCycle: if not isinstance(val, int): raise TypeError(errorMsg) @dataclass class ProcessMoveListResult: """Container for the results of :meth:`FuelHandler.processMoveList`.""" loadChains: List[List[str]] loopChains: List[List[str]] enriches: List[List[float]] loadChargeTypes: List[Optional[str]] ringPosCycles: List[Optional[list[int]]] dischargeDests: List[str] rotations: List[Tuple[str, float]] alreadyDone: List[str] class FuelHandler: """ A fuel handling machine can move fuel around the core and reactor. It makes decisions on how to shuffle fuel based on user specifications. It provides some supervisory data tracking, such as having the ability to print out information about all moves that happened in a cycle (without the user needing to explicitly track this information). To use this, simply create an input Python file and point to it by path with the ``fuelHandler`` setting. In that file, subclass this object. """ DISCHARGE_LOCS = frozenset({"SFP", "Delete"}) """Special strings to indicate an assembly is no longer in the core.""" def __init__(self, operator): # we need access to the operator to find the core, get settings, grab other interfaces, etc. self.o = operator self.moved = [] self.pendingRotations = [] @property def cycle(self): """ Link to the current cycle number. Notes ----- This retains backwards compatibility with previous fuel handler inputs. """ return self.o.r.p.cycle @property def cs(self): """Link to the Case Settings object.""" return self.o.cs @property def r(self): """Link to the Reactor object.""" return self.o.r def outage(self, factor=1.0): """ Simulates a reactor reload outage. Moves and tracks fuel. This sets the moveList structure. """ if self.moved: raise ValueError("Cannot perform two outages with same FuelHandler instance.") # determine if a repeat shuffle is occurring or a new shuffle pattern if self.cs[CONF_SHUFFLE_SEQUENCE_FILE]: if not os.path.exists(self.cs[CONF_SHUFFLE_SEQUENCE_FILE]): raise FileNotFoundError( "Requested shuffle sequence file {0} does not exist. Cannot perform shuffling. ".format( self.cs[CONF_SHUFFLE_SEQUENCE_FILE] ) ) runLog.important("Applying shuffle sequence from {}".format(self.cs[CONF_SHUFFLE_SEQUENCE_FILE])) # location hist params updated within performShuffle self.performShuffle(self.cs[CONF_SHUFFLE_SEQUENCE_FILE], yaml=True) elif self.cs["explicitRepeatShuffles"]: # repeated shuffle if not os.path.exists(self.cs["explicitRepeatShuffles"]): raise RuntimeError( "Requested repeat shuffle file {0} does not exist. Cannot perform shuffling. ".format( self.cs["explicitRepeatShuffles"] ) ) runLog.important("Repeating a shuffling pattern from {}".format(self.cs["explicitRepeatShuffles"])) # location hist params updated within performShuffle self.performShuffle(self.cs["explicitRepeatShuffles"]) else: # Normal shuffle from user-provided shuffle logic input self.chooseSwaps(factor) self.updateAllLocationHistParams(self.cycle) # do rotations if pin-level details are available (requires fluxRecon plugin) if self.cs["fluxRecon"] and self.cs[CONF_ASSEMBLY_ROTATION_ALG]: # Rotate assemblies ONLY IF at least some assemblies have pin detail # The user can choose the algorithm method name directly in the settings if hasattr(rotAlgos, self.cs[CONF_ASSEMBLY_ROTATION_ALG]): rotationMethod = getattr(rotAlgos, self.cs[CONF_ASSEMBLY_ROTATION_ALG]) rotationMethod(self) else: raise RuntimeError( "FuelHandler {0} does not have a rotation algorithm called {1}.\nChange your {2} setting".format( rotAlgos, self.cs[CONF_ASSEMBLY_ROTATION_ALG], CONF_ASSEMBLY_ROTATION_ALG, ) ) for loc, deg in self.pendingRotations: assem = self.r.core.getAssemblyWithStringLocation(loc) if assem is None: runLog.warning(f"No assembly found at {loc} for manual rotation") continue runLog.important(f"Rotating assembly {assem} in {loc} by {deg} degrees CCW from shuffle file") assem.rotate(math.radians(deg)) self.pendingRotations = [] # inform the reactor of how many moves occurred so it can put the number in the database. if self.moved: numMoved = len(self.moved) * self.r.core.powerMultiplier # tell the reactor which assemblies moved where # also tell enrichments of each block in case there's some autoboosting going on. # This is also essential for repeating shuffles in later restart runs. for a in self.moved: try: ringPosCycle = None # grab first (ring, pos) at cycle info which can be used to identify this assembly if it goes to SFP if a.p.ringPosHist: for cycleNum, rp in enumerate(a.p.ringPosHist): if isinstance(rp, tuple) and rp[0] not in a.NOT_IN_CORE: ringPosCycle = [int(rp[0]), int(rp[1]), cycleNum] break else: ringPosCycle = None self.r.core.setMoveList( self.cycle, a.lastLocationLabel, a.getLocation(), [b.getUraniumMassEnrich() for b in a], a.getType(), ringPosCycle, ) except: runLog.important("A fuel management error has occurred. ") runLog.important("Trying operation on assembly {}".format(a)) runLog.important("The moved list is {}".format(self.moved)) raise else: numMoved = 0 self.o.r.core.p.numMoves = numMoved self.o.r.core.setBlockMassParams() runLog.important("Fuel handler performed {0} assembly shuffles.".format(numMoved)) # now wipe out the self.moved version so it doesn't transmit the assemblies during distributeState moved = self.moved[:] self.moved = [] return moved def _preconditionLocationHistParam(self, a, cycle): """ Trim assembly location history param to be consistent with the specified cycle or the reactor cycle parameter in preparation for the current ring and position to be added. list index corresponds to the cycle number n, which will be appended after the parameter is preconditioned to length n (max index is n-1) e.g. i=0 is the initial position, i=1 is the position at BOC1, etc. Parameters ---------- a : armi.reactor.assembly.Assembly cycle : int cycle number at BOC to update assembly location history """ # Param length is shorter than expected (data from previous cycles is missing or shuffling was not performed # on a previous cycle) if len(a.p.ringPosHist) < cycle: a.p.ringPosHist += [(a.NOT_CREATED_YET, a.NOT_CREATED_YET)] * (cycle - len(a.p.ringPosHist)) # Param length is longer than expected. perhaps a restart analysis of some sort. trim trailing data if len(a.p.ringPosHist) > cycle: a.p.ringPosHist = a.p.ringPosHist[:cycle] return a def _updateAssemLocationHistParam(self, a, cycle): """ Update assembly location history parameter with current assembly location for specified cycle number. Index of a.p.ringPosHist corresponds to the cycle number BOC assembly location e.g. i=0 is the initial position, i=1 is the position at BOC1, etc. """ a = self._preconditionLocationHistParam(a, cycle) # assem param should now be the correct len. append data at correct index. if a.getLocation() in a.NOT_IN_CORE: a.p.ringPosHist.append((a.getLocation(), a.getLocation())) else: ring, pos, _ = grids.locatorLabelToIndices(a.getLocation()) a.p.ringPosHist.append((ring, pos)) def updateAllLocationHistParams(self, cycle): """ Update location history param for all assemblies with current assembly locations for specified cycle number Index of a.p.ringPosHist corresponds to the cycle number BOC assembly location e.g. i=0 is the initial position, i=1 is the position at BOC1, etc. """ for a in self.r.core: self._updateAssemLocationHistParam(a, cycle) for a in list(self.r.excore["sfp"]): self._updateAssemLocationHistParam(a, cycle) def chooseSwaps(self, shuffleFactors=None): """Moves the fuel around or otherwise processes it between cycles.""" raise NotImplementedError @staticmethod def getFactorList(cycle, cs=None, fallBack=False): """ Return factors between 0 and 1 that control fuel management. This is the default shuffle control function. Usually you would override this with your own in a custom shuffleLogic.py file. For more details about how this works, refer to :ref:`fuel-management-input`. This will get bound to the default FuelHandler as a static method below. This is done to allow a user to mix and match FuelHandler class implementations and getFactorList implementations at run time. Notes ----- Ultimately, this approach will likely get replaced using the plugin framework, but we aren't there yet. """ # prefer to keep these 0 through 1 since this is what the branch search can do. defaultFactorList = {"eqShuffles": 1} factorSearchFlags = [] return defaultFactorList, factorSearchFlags def prepCore(self): """Aux function to run before XS generation (do moderation, etc).""" pass @staticmethod def _compareAssem(candidate, current): """Check whether the candidate assembly should replace the current ideal assembly. Given a candidate tuple (diff1, a1) and current tuple (diff2, a2), decide whether the candidate is better than the current ideal. This first compares the diff1 and diff2 values. If diff1 is sufficiently less than diff2, a1 wins, returning True. Otherwise, False. If diff1 and diff2 are sufficiently close, the assembly with the lesser assemNum wins. This should result in a more stable comparison than on floating-point comparisons alone. """ if np.isclose(candidate[0], current[0], rtol=1e-8, atol=1e-8): return candidate[1].p.assemNum < current[1].p.assemNum else: return candidate[0] < current[0] @staticmethod def _getParamMax(a, paramName, blockLevelMax=True): """Get assembly/block-level maximum parameter value in assembly.""" multiplier = a.getSymmetryFactor() if multiplier != 1: # handle special case: volume-integrated parameters where symmetry factor is not 1 if blockLevelMax: paramCollection = a[0].p else: paramCollection = a.p isVolumeIntegrated = paramCollection.paramDefs[paramName].location == ParamLocation.VOLUME_INTEGRATED multiplier = a.getSymmetryFactor() if isVolumeIntegrated else 1.0 if blockLevelMax: return a.getChildParamValues(paramName).max() * multiplier else: return a.p[paramName] * multiplier def findAssembly( self, targetRing=None, width=(0, 0), param=None, compareTo=None, forceSide=None, exclusions=None, typeSpec=None, mandatoryLocations=None, zoneList=None, excludedLocations=None, minParam=None, minVal=None, maxParam=None, maxVal=None, findMany=False, coords=None, exactType=False, acceptFirstCandidateRing=False, blockLevelMax=False, findFromSfp=False, maxNumAssems=None, circularRingFlag=False, ): r""" Search reactor for assemblies with various criterion. Primarily for shuffling. Parameters ---------- targetRing : int, optional The ring in which to search width : tuple of integers A (size, side) tuple where size is the number of rings on either side to also check. side=1: only look in higher, -1: only look lower, 0: both sides param : string, optional A block (if blockLevelMax) or assem level param name such as 'power' or 'percentBu' (requires compareTo). compareTo : float or Assembly instance an assembly to be compared to. Alternatively, a floating point number to compare to. Even more alternatively, an (assembly,mult) or (float,mult) tuple where mult is a multiplier. For example, if you wanted an assembly that had a bu close to half of assembly bob, you'd give param='percentBu', compareTo=(bob,0.5) If you want one with a bu close to 0.3, you'd do param='percentBu',compareTo=0.3. Yes, if you give a (float, multiplier) tuple the code will still work as expected. forceSide : bool, optional requires the found assembly to have either 1: higher, -1: lower, None: any param than compareTo exclusions : list, optional List of assemblies that will be excluded from the search minParam : float or list, optional a parameter to compare to minVal for setting lower bounds. If list, must correspond to parameters in minVal in order. maxParam : float or list, optional a parameter to compare to maxVal for setting upper bounds of acceptable assemblies. If list, must correspond to parameters in maxVal in order. minVal : float or list, optional a value or a (parameter, multiplier) tuple for setting lower bounds For instance, if minParam='timeToLimit' and minVal=10, only assemblies with timeToLimit higher than 10 will be returned. (Of course, there is also maxParam and maxVal) maxVal : float or list, optional a value or a (parameter, multiplier) tuple for setting upper bounds mandatoryLocations : list, optional A list of string-representations of locations in the core for limiting the search to several places. Any locations also included in `excludedLocations` will be excluded. excludedLocations : list, optional a list of string-representations of locations in the core that will be excluded from the search zoneList : list, optional name of a zone defined in settings.py that will be picked from. Under development findMany : bool, optional If True, will return a list of assembies that match. Don't give a param. typeSpec : Flags or list of Flags, optional only assemblies with this type list will be returned. If none, only fuel will be found. coords : tuple, optional x,y tuple in cm. the fuel handler will try to find an assembly with a center closest to that point exactType : bool, optional require type to be exactly equal to what's in the type list. So Flags.IGNITER | Flags.FUEL is not Flags.INNER | Flags.IGNITER | Flags.FUEL acceptFirstCandidateRing : bool, optional takes the first assembly found in the earliest ring (without searching all rings for a maxBu, for example) So if the candidate rings are 1-10 and we're looking for igniter fuel with a maxBurnup, we don't get the max burnup in all those rings, but rather the igniter with the max burnup in the ring closest to 1. If there are no igniters until ring 4, you will get an igniter in ring 4. blockLevelMax : bool, optional If true, the param to search for will be built as the maximum block-level param of this name instead of the assembly param. This avoids the need to assign assembly level params sometimes. default: false. findFromSfp : bool, optional If true, will look in the spent-fuel pool instead of in the core. maxNumAssems : int, optional The maximum number of assemblies to return. Only relevant if findMany==True circularRingFlag : bool, optional Toggle using rings that are based on distance from the center of the reactor Notes ----- The call signature on this method may have gotten slightly out of hand as valuable capabilities were added in fuel management studies. For additional expansion, it may be worth reconsidering the design of these query operations. Returns ------- Assembly instance or assemList of assembly instances that match criteria, or None if none match Examples -------- This returns the feed fuel assembly in ring 4 that has a burnup closest to 100% (the highest burnup assembly):: feed = self.findAssembly( targetRing=4, width=(0, 0), param="maxPercentBu", compareTo=100, typeSpec=Flags.FEED | Flags.FUEL ) """ # list for storing multiple results if findMany is true. assemList = [] # process input arguments if targetRing is None: # look through the full core targetRing = 0 width = (100, 0) if exclusions is None: exclusions = [] if isinstance(minVal, list): # list given with multiple mins minVals = minVal minParams = minParam else: minVals = [minVal] minParams = [minParam] if isinstance(maxVal, list): maxVals = maxVal maxParams = maxParam else: # just one given. put it in a list so the below machinery can handle it. maxVals = [maxVal] maxParams = [maxParam] if typeSpec is None: # restrict motions to fuel only # not really necessary. take this default out if you want to move control rods, etc. typeSpec = Flags.FUEL minDiff = (1e60, None) # compareTo can either be a tuple, a value, or an assembly # if it's a tuple, it can either be an int/float and a multiplier, or an assembly and a multiplier # if it's not a tuple, the multiplier will be assumed to be 1.0 mult = 1.0 # if no mult brought in, just assume 1.0 if isinstance(compareTo, tuple): # tuple (assem or int/float, multiplier) brought in. # separate it compareTo, mult = compareTo if isinstance(compareTo, (float, int)): # floating point or int. compVal = compareTo * mult elif param: # assume compareTo is an assembly compVal = FuelHandler._getParamMax(compareTo, param, blockLevelMax) * mult if coords: # find the assembly closest to xt,yt if coords are given without considering params. aTarg = None minD = 1e10 xt, yt = coords # assume (x,y) tuple for a in self.r.core: x, y, _ = a.spatialLocator.getLocalCoordinates() d = (y - yt) ** 2 + (x - xt) ** 2 if d < minD: minD = d aTarg = a return aTarg if findFromSfp: # hack to enable SFP searching. candidateRings = ["SFP"] else: # set up candidateRings based on targetRing and width. The target rings comes first b/c it is preferred. candidateRings = [targetRing] if width[1] <= 0: # 0 or -1 implies that the inner rings can be added. for inner in range(width[0]): candidateRings.append(targetRing - inner - 1) # +1 to get 1,2,3 instead of 0,1,2 if width[1] >= 0: # if 1, add in the outer rings for outer in range(width[0]): candidateRings.append(targetRing + outer + 1) # get lists of assemblies in each candidate ring. Do it in this order in case we prefer ones in the first. # scan through all assemblies and find the one (or more) that best fits the criteria for ringI, assemsInRings in enumerate( self._getAssembliesInRings(candidateRings, typeSpec, exactType, exclusions, circularRingFlag) ): for a in assemsInRings: innocent = True # Check that this assembly's minParam is > the minimum for each minParam for minIndex, minVal in enumerate(minVals): minParam = minParams[minIndex] if minParam: # a minimum was specified. Check to see if we're ok if isinstance(minVal, tuple): # tuple turned in. it's a multiplier and a param realMinVal = FuelHandler._getParamMax(a, minVal[0], blockLevelMax) * minVal[1] else: realMinVal = minVal if FuelHandler._getParamMax(a, minParam, blockLevelMax) < realMinVal: # this assembly does not meet the minVal specifications. Skip it. innocent = False break # for speed (not a big deal here) if not innocent: continue # Check upper bounds, to make sure this assembly doesn't have maxParams>maxVals for maxIndex, maxVal in enumerate(maxVals): maxParam = maxParams[maxIndex] if maxParam: if isinstance(maxVal, tuple): # tuple turned in. it's a multiplier and a param realMaxVal = FuelHandler._getParamMax(a, maxVal[0], blockLevelMax) * maxVal[1] else: realMaxVal = maxVal if FuelHandler._getParamMax(a, maxParam, blockLevelMax) > realMaxVal: # this assembly has a maxParam that's higher than maxVal and therefore # doesn't qualify. skip it. innocent = False break if not innocent: continue # Check to see if this assembly is in the list of candidate locations. if not, skip it. if mandatoryLocations: if a.getLocation() not in mandatoryLocations: continue if excludedLocations: if a.getLocation() in excludedLocations: # this assembly is in the excluded location list. skip it. continue # only process of the Assembly is in a Zone if not self.isAssemblyInAZone(zoneList, a): continue # Now find the assembly with the param closest to the target val. if param: diff = abs(FuelHandler._getParamMax(a, param, blockLevelMax) - compVal) if ( forceSide == 1 and FuelHandler._getParamMax(a, param, blockLevelMax) > compVal and FuelHandler._compareAssem((diff, a), minDiff) ): # forceSide=1, so that means look in rings further out minDiff = (diff, a) elif ( forceSide == -1 and FuelHandler._getParamMax(a, param, blockLevelMax) < compVal and FuelHandler._compareAssem((diff, a), minDiff) ): # forceSide=-1, so that means look in rings closer in from the targetRing minDiff = (diff, a) elif FuelHandler._compareAssem((diff, a), minDiff): # no preference of which side, just take the one with the closest param. minDiff = (diff, a) else: # no param specified. Just return one closest to the target ring diff = None if a.spatialLocator.getRingPos()[0] == targetRing: # short circuit the search if findMany: assemList.append((diff, a)) continue else: return a elif abs(a.spatialLocator.getRingPos()[0] - targetRing) < minDiff[0]: minDiff = ( abs(a.spatialLocator.getRingPos()[0] - targetRing), a, ) if findMany: # returning many assemblies. If there's a param, we'd like it to be honored by # ordering this list from smallest diff to largest diff. assemList.append((diff, a)) if ringI == 0 and acceptFirstCandidateRing and minDiff[1]: # an acceptable assembly was found in the targetRing (ringI==0) # and the user requested this to be returned. Therefore, return it without # scanning through the additional rings. return minDiff[1] if not minDiff[1]: # can't find assembly in targetRing with close param to compareTo pass if findMany: assemList.sort() # prefer items that have params that are the closest to the value. # extract the assemblies. assemsInRings = [a for diff, a in assemList] if maxNumAssems: return assemsInRings[:maxNumAssems] else: return assemsInRings else: return minDiff[1] @staticmethod def isAssemblyInAZone(zoneList, a): """Does the given assembly in one of these zones.""" if zoneList: # ruff: noqa: SIM110 for zone in zoneList: if a.getLocation() in zone: # Success! return True return False else: # A little counter-intuitively, if there are no zones, we return True. return True def _getAssembliesInRings( self, ringList, typeSpec=Flags.FUEL, exactType=False, exclusions=None, circularRingFlag=False, ): """ Find assemblies in particular rings. Parameters ---------- ringList : list List of integer ring numbers to find assemblies in. Optionally, a string specifying a special location like the SFP (spent fuel pool) typeSpec : Flags or iterable of Flags, optional Flag types to restrict assemblies to exactType : bool, optional Match the type in typelist exactly exclusions : list of Assemblies, optional exclude these assemblies from the results circularRingFlag : bool A flag to toggle on using rings that are based on distance from the center of the reactor Returns ------- assemblyList : list List of assemblies in each ring of the ringList. [[a1,a2,a3],[a4,a5,a6,a7],...] """ if "SFP" in ringList and self.r.excore.get("sfp") is None: sfpAssems = [] runLog.warning( f"{self} can't pull from SFP; no SFP is attached to the reactor {self.r}." "To get assemblies from an SFP, you must add an SFP system to the blueprints" f"or otherwise instantiate a SpentFuelPool object as r.excore['sfp']" ) else: sfpAssems = list(self.r.excore["sfp"]) assemblyList = [[] for _i in range(len(ringList))] # empty lists for each ring if exclusions is None: exclusions = [] exclusions = set(exclusions) if circularRingFlag: assemListTmp = [] assemListTmp2 = [] if ringList[0] == "SFP": # kind of a hack for now. Need the capability. assemblyList = sfpAssems else: for i, ringNumber in enumerate(ringList): assemListTmp = self.r.core.getAssembliesInCircularRing(ringNumber, typeSpec, exactType, exclusions) for a in assemListTmp: if a in exclusions: continue if not a.hasFlags(typeSpec, exact=exactType): continue # save only the assemblies not in the exclusions and with the proper type assemListTmp2.append(a) # make the list of lists of assemblies assemblyList[i] = assemListTmp2 else: if ringList[0] == "SFP": # kind of a hack for now. Need the capability. assemList = sfpAssems else: assemList = self.r.core.getAssemblies() for a in assemList: if a in exclusions: continue if not a.hasFlags(typeSpec, exact=exactType): continue if a.getLocation() == "SFP": ring = "SFP" else: ring = a.spatialLocator.getRingPos()[0] if ring in ringList: # keep it in the right order assemblyList[ringList.index(ring)].append(a) return assemblyList def swapAssemblies(self, a1, a2): """Moves a whole assembly from one place to another. .. impl:: User-specified blocks can be left in place during within-core swaps. :id: I_ARMI_SHUFFLE_STATIONARY0 :implements: R_ARMI_SHUFFLE_STATIONARY Before assemblies are moved, the ``_transferStationaryBlocks`` class method is called to check if there are any block types specified by the user as stationary via the ``stationaryBlockFlags`` case setting. Using these flags, blocks are gathered from each assembly which should remain stationary and checked to make sure that both assemblies have the same number and same height of stationary blocks. If not, return an error. If all checks pass, the :py:meth:`~armi.reactor.assemblies.Assembly.remove` and :py:meth:`~armi.reactor.assemblies.Assembly.insert` methods are used to swap the stationary blocks between the two assemblies. Once this process is complete, the actual assembly movement can take place. Through this process, the stationary blocks remain in the same core location. Parameters ---------- a1 : :py:class:`Assembly <armi.reactor.assemblies.Assembly>` The first assembly a2 : :py:class:`Assembly <armi.reactor.assemblies.Assembly>` The second assembly See Also -------- dischargeSwap : swap assemblies where one is outside the core and the other is inside """ if a1 is None or a2 is None: runLog.warning("Cannot swap None assemblies. Check your findAssembly results. Skipping swap") return runLog.extra("Swapping {} with {}.".format(a1, a2)) # add assemblies into the moved location for a in [a1, a2]: if a not in self.moved: self.moved.append(a) oldA1Location = a1.spatialLocator self._transferStationaryBlocks(a1, a2) a1.moveTo(a2.spatialLocator) a2.moveTo(oldA1Location) def _transferStationaryBlocks(self, assembly1, assembly2): """ Exchange the stationary blocks (e.g. grid plate) between the moving assemblies. These blocks in effect are not moved at all. """ # grab stationary block flags sBFList = self.r.core.stationaryBlockFlagsList # identify stationary blocks for assembly 1 a1StationaryBlocks = [ [block, block.spatialLocator.k] for block in assembly1 if any(block.hasFlags(sbf) for sbf in sBFList) ] # identify stationary blocks for assembly 2 a2StationaryBlocks = [ [block, block.spatialLocator.k] for block in assembly2 if any(block.hasFlags(sbf) for sbf in sBFList) ] # check for any inconsistencies in stationary blocks and ensure alignment if [block[1] for block in a1StationaryBlocks] != [block[1] for block in a2StationaryBlocks]: raise ValueError( """Different number and/or locations of stationary blocks between {} (Stationary Blocks: {}) and {} (Stationary Blocks: {}).""".format( assembly1, a1StationaryBlocks, assembly2, a2StationaryBlocks ) ) if a1StationaryBlocks and a2StationaryBlocks: if a1StationaryBlocks[-1][0].p.ztop != a2StationaryBlocks[-1][0].p.ztop: runLog.warning( """Difference in top elevation of stationary blocks between {} (Stationary Blocks: {}, Elevation at top of stationary blocks {}) and {} (Stationary Blocks: {}, Elevation at top of stationary blocks {}))""".format( assembly1, a1StationaryBlocks, a1StationaryBlocks[-1][0].p.ztop, assembly2, a2StationaryBlocks, a2StationaryBlocks[-1][0].p.ztop, ) ) # swap stationary blocks for (assem1Block, assem1BlockIndex), (assem2Block, assem2BlockIndex) in zip( a1StationaryBlocks, a2StationaryBlocks ): # remove stationary blocks assembly1.remove(assem1Block) assembly2.remove(assem2Block) # insert stationary blocks assembly1.insert(assem1BlockIndex, assem2Block) assembly2.insert(assem2BlockIndex, assem1Block) @staticmethod def validateLoc(loc, cycle): """Validate a location label from a shuffle YAML file. Parameters ---------- loc : str Location label to validate. cycle : int Cycle currently being processed, used for context in error messages. """ if loc in FuelHandler.DISCHARGE_LOCS: return try: grids.locatorLabelToIndices(loc) except Exception: raise InputError( f"Invalid location label {loc!r} in cycle {cycle} in shuffle YAML. " "Location labels must be non-empty and contain integers." ) def dischargeSwap(self, incoming, outgoing, toSfp=False): """Removes one assembly from the core and replace it with another assembly. .. impl:: User-specified blocks can be left in place for the discharge swap. :id: I_ARMI_SHUFFLE_STATIONARY1 :implements: R_ARMI_SHUFFLE_STATIONARY Before assemblies are moved, the ``_transferStationaryBlocks`` class method is called to check if there are any block types specified by the user as stationary via the ``stationaryBlockFlags`` case setting. Using these flags, blocks are gathered from each assembly which should remain stationary and checked to make sure that both assemblies have the same number and same height of stationary blocks. If not, return an error. If all checks pass, the :py:meth:`~armi.reactor.assemblies.Assembly.remove` and :py:meth:`~armi.reactor.assemblies.Assembly.insert` methods are used to swap the stationary blocks between the two assemblies. Once this process is complete, the actual assembly movement can take place. Through this process, the stationary blocks from the outgoing assembly remain in the original core position, while the stationary blocks from the incoming assembly are discharged with the outgoing assembly. Parameters ---------- incoming : :py:class:`Assembly <armi.reactor.assemblies.Assembly>` The assembly getting swapped into the core. outgoing : :py:class:`Assembly <armi.reactor.assemblies.Assembly>` The assembly getting discharged out the core. toSfp : bool, optional If True, store the discharged assembly in the SFP regardless of the ``trackAssems`` setting. See Also -------- swapAssemblies : swaps assemblies that are already in the core """ runLog.debug("Discharge swapping {} for {}.".format(incoming, outgoing)) if incoming is None or outgoing is None: runLog.warning("Cannot discharge swap None assemblies. Check your findAssembly calls. Skipping") return # add assemblies into the moved location # keep it unique so we don't get artificially inflated numMoves for a in [incoming, outgoing]: if a not in self.moved: self.moved.append(a) self._transferStationaryBlocks(incoming, outgoing) # replace the goingOut guy. loc = outgoing.spatialLocator # say it happened at the end of the previous cycle by sending cycle-1 # to removeAssembly, which will look up EOC of last cycle, # which, coincidentally is the same time we're at right now at BOC. self.r.core.removeAssembly(outgoing, addToSFP=toSfp) # adjust the assembly multiplicity so that it does not forget how many it really # represents. This allows us to discharge an assembly from any location in # fractional-core models where the central location may only be one assembly, # whereas other locations are more, and keep proper track of things. In the # future, this mechanism may be used to handle symmetry in general. outgoing.p.multiplicity = len(loc.getSymmetricEquivalents()) + 1 if self.r.excore.get("sfp") is not None: if incoming in self.r.excore["sfp"].getChildren(): # pull it out of the sfp if it's in there. runLog.extra("removing {0} from the sfp".format(incoming)) self.r.excore["sfp"].remove(incoming) incoming.p.multiplicity = 1 self.r.core.add(incoming, loc) def swapCascade(self, assemList): """ Perform swaps on a list of assemblies. Parameters ---------- assemList: list A list of assemblies to be shuffled. Notes ----- [goingOut,inter1,inter2,goingIn] will go to [inter1, inter2, goingIn, goingOut] in terms of positions or, in ASCII art:: >---------------v | | [A <- B <- C <- D] """ # first check for duplicates for assem in assemList: if assemList.count(assem) != 1: runLog.warning(f"{assem} is in the cascade more than once.") # now swap levels = len(assemList) for level in range(levels - 1): if not assemList[level + 1]: runLog.info( f"Skipping level {level + 1} in the cascade because it is None. Be careful, " "this might cause an unexpected shuffling order." ) continue self.swapAssemblies(assemList[0], assemList[level + 1]) def performShuffle(self, shuffleFile, yaml=False): """ Execute shuffling instructions from a previous run or YAML file. Parameters ---------- shuffleFile : str Path to the shuffle sequence file. yaml : bool, optional If True, interpret ``shuffleFile`` as a YAML shuffle sequence. Returns ------- moved : list List of assemblies that moved this cycle. Notes ----- Typically the shuffle file from a previous run will be ``caseTitle``-"SHUFFLES.txt". See Also -------- doRepeatShuffle : Performs moves as processed by this method processMoveList : Converts a stored list of moves into a functional list of assemblies to swap makeShuffleReport : Creates the file that is processed here """ # read moves file cycle = self.r.p.cycle if cycle == 0: # if cycle is 0, we are at the beginning of the first cycle # this is a special case where we don't have any moves # so we return an empty list return [] if yaml: moves, swaps = self.readMovesYaml(shuffleFile) else: moves = self.readMoves(shuffleFile) swaps = {} # setup the load and loop chains to be run per cycle moveList = moves[cycle] swapList = swaps.get(cycle, []) moveData = self.processMoveList(moveList) # Now have the move locations moved = self.doRepeatShuffle( moveData.loadChains, moveData.loopChains, moveData.enriches, moveData.loadChargeTypes, moveData.ringPosCycles, moveData.dischargeDests, ) # Apply any swaps after performing cascades for loc1, loc2 in swapList: a1 = self.r.core.getAssemblyWithStringLocation(loc1) a2 = self.r.core.getAssemblyWithStringLocation(loc2) if a1 is None or a2 is None: runLog.warning(f"Could not perform swap between {loc1} and {loc2}") continue self.swapAssemblies(a1, a2) moved.extend([a1, a2]) self.pendingRotations = moveData.rotations return moved @staticmethod def readMoves(fname): r""" Reads a shuffle output file and sets up the moves dictionary. Parameters ---------- fname : str The shuffles file to read Returns ------- moves : dict A dictionary of all the moves. Keys are the cycle number. Values are a list of :class:`~armi.physics.fuelCycle.fuelHandlers.AssemblyMove` objects, one for each individual move that happened in the cycle. ``oldLoc`` and ``newLoc`` are string representations of the locations and ``enrichList`` is a list of mass enrichments from bottom to top. See Also -------- performShuffle : reads this file and executes the shuffling outage : creates the moveList in the first place. makeShuffleReport : writes the file that is read here. """ try: f = open(fname) except OSError: raise RuntimeError( "Could not find/open repeat shuffle file {} in working directory {}".format(fname, os.getcwd()) ) moves = {} numMoves = 0 for line in f: if "ycle " in line: # Used to say "Cycle 1 at 0.0 years". Now says: "Before cycle 1 at 0.0 years" to be more specific. # This RE allows backwards compatibility. # Later, we removed the at x years m = re.search(r"ycle (\d+)", line) cycle = int(m.group(1)) moves[cycle] = [] elif "assembly" in line: # this is the new load style where an actual assembly type is written to the shuffle logic # due to legacy reasons, the assembly type will be put into group 4 pat = ( r"([A-Za-z0-9!\-]+) moved to ([A-Za-z0-9!\-]+) with assembly type " + r"([A-Za-z0-9!\s]+)\s*(ringPosCycle=\[.*\])?\s*with enrich list: (.+)" ) m = re.search(pat, line) if not m: raise InputError('Failed to parse line "{0}" in shuffle file'.format(line)) oldLoc = m.group(1) newLoc = m.group(2) assemType = m.group(3).strip() # take off any possible trailing whitespace ringPosCycle = m.group(4) # will be None for legacy shuffleLogic files. (pre 2013-08) if ringPosCycle: ringPosCycle = eval(ringPosCycle.split("=")[1]) # extract the assembly ring, position and cycle. enrichList = [float(i) for i in m.group(5).split()] moves[cycle].append(AssemblyMove(oldLoc, newLoc, enrichList, assemType, ringPosCycle)) numMoves += 1 elif "moved" in line: # very old shuffleLogic file. runLog.warning( "Using old *.SHUFFLES.txt loading file", single=True, label="Using old shuffles file", ) m = re.search( "([A-Za-z0-9!]+) moved to ([A-Za-z0-9!]+) with enrich list: (.+)", line, ) if not m: raise InputError('Failed to parse line "{0}" in shuffle file'.format(line)) oldLoc = m.group(1) newLoc = m.group(2) enrichList = [float(i) for i in m.group(3).split()] # old loading style, just assume that there is a booster as our surrogate moves[cycle].append(AssemblyMove(oldLoc, newLoc, enrichList)) numMoves += 1 f.close() runLog.info("Read {0} moves over {1} cycles".format(numMoves, len(moves.keys()))) return moves @staticmethod def readMovesYaml(fname): r""" Read a shuffle file in YAML format. A cascade with no explicit final location deletes the assembly by default. Parameters ---------- fname : str Path to the YAML-formatted shuffle file. Returns ------- moves : dict Mapping of cycle numbers to lists of :class:`~armi.physics.fuelCycle.fuelHandlers.AssemblyMove` objects that describe the shuffle sequence. swaps : dict Mapping of cycle numbers to lists of location-pair tuples describing assemblies to be swapped. """ # 1. load YAML file try: with open(fname, "r") as stream: yaml = YAML(typ="safe") data = yaml.load(stream) except DuplicateKeyError as e: raise InputError(str(e)) from e except OSError as ee: raise RuntimeError( f"Could not find/open repeat shuffle file {fname!r} in working directory {os.getcwd()}: {ee}" ) from ee # 2. perform various validation tests on the YAML data if "sequence" not in data: raise InputError("Shuffle YAML missing required 'sequence' mapping") moves = {} swaps = defaultdict(list) # cycles may be provided in any order; verify only that there are no gaps cycleNums = {int(c) for c in data["sequence"].keys()} if cycleNums: expected = set(range(min(cycleNums), max(cycleNums) + 1)) missing = sorted(expected - cycleNums) if missing: if len(missing) == 1: raise InputError(f"Missing cycle {missing[0]} in shuffle sequence") raise InputError(f"Missing cycles {missing} in shuffle sequence") # 3. parse YAML file into shuffle data for cycleKey, actions in data["sequence"].items(): cycle = int(cycleKey) moves[cycle] = [] seenLocs = set() if actions is None and cycle != 0: runLog.warning(f"Cycle {cycleKey} has no shuffle actions defined, skipping.") continue elif cycle == 0: raise InputError( "Cycle 0 is not allowed in shuffle YAML. " "This cycle is reserved for the initial core loading." "Shuffling is available at the beginning of cycle 1" ) for action in actions: allowed = {"cascade", "fuelEnrichment", "extraRotations", "swap", "ringPosCycle"} unknown = set(action) - allowed if unknown: raise InputError(f"Unknown action keys {unknown} in shuffle YAML") if "cascade" in action: chain = list(action["cascade"]) if len(chain) < 2: raise InputError("cascade must contain at least two entries") if any(not isinstance(item, str) for item in chain): raise InputError("cascade entries must be strings") if chain[0] == "SFP": # move an assembly from the SFP into the Core assemType = None locs = chain if len(locs) < 2: raise InputError("cascade starting with SFP must include a destination location") else: # move an assembly around the Core assemType = chain[0] locs = chain[1:] if not locs: raise InputError("cascade must contain at least one location after the assembly type") for loc in locs: FuelHandler.validateLoc(loc, cycle) if loc not in FuelHandler.DISCHARGE_LOCS and loc in seenLocs: raise InputError(f"Location {loc} appears in multiple cascades in cycle {cycle}") seenLocs.add(loc) enrich = [] enrichList = action.get("fuelEnrichment", []) try: enrich = [float(e) for e in enrichList] except (TypeError, ValueError): raise InputError("fuelEnrichment values must be numeric. Got {enrichList}") if any(e < 0 or e > 1 for e in enrich): raise InputError("fuelEnrichment values must be between 0 and 1. Got {enrich}") ringPosCycle = action.get("ringPosCycle") if locs[0] == "SFP": if ringPosCycle is None: raise InputError("ringPosCycle required when loading from SFP") moves[cycle].append(AssemblyMove("SFP", locs[1], [], None, ringPosCycle)) startIdx = 1 else: if ringPosCycle is not None: raise InputError("ringPosCycle is only valid when loading from SFP") moves[cycle].append(AssemblyMove("LoadQueue", locs[0], enrich, assemType)) startIdx = 0 for i in range(startIdx, len(locs) - 1): moves[cycle].append(AssemblyMove(locs[i], locs[i + 1])) if locs[-1] not in FuelHandler.DISCHARGE_LOCS: moves[cycle].append(AssemblyMove(locs[-1], "Delete")) elif "swap" in action: swap = action["swap"] if not isinstance(swap, list) or len(swap) != 2: raise InputError("swap must be a list of two location labels, got {swap}") if any(not isinstance(item, str) for item in swap): raise InputError("swap entries must be strings, got {swap}") for loc in swap: FuelHandler.validateLoc(loc, cycle) loc1, loc2 = swap swaps[cycle].append((loc1, loc2)) elif "extraRotations" in action: for loc, angle in action.get("extraRotations", {}).items(): FuelHandler.validateLoc(loc, cycle) moves[cycle].append(AssemblyMove(loc, loc, rotation=float(angle))) else: raise InputError(f"Unable to process {action} in {cycle}") return moves, dict(swaps) @staticmethod def trackChain(moveList, startingAt, alreadyDone=None): r""" Builds a chain of locations based on starting location. Notes ----- Takes a moveList and extracts chains. Remembers all it touches. If A moved to B, C moved to D, and B moved to C, this returns A, B, C ,D. Used in some monte carlo physics writers and in performShuffle Parameters ---------- moveList : list a list of :class:`~armi.physics.fuelCycle.fuelHandlers.AssemblyMove` objects that occurred at a single outage. startingAt : str A location label where the chain would start. This is important because the discharge moves are built when the SFP is found in a move. This method must find all assemblies in the chain leading up to this particular discharge. alreadyDone : list A list of locations that have already been tracked. Returns ------- chain : list The chain as a location list in order enrich : list The axial enrichment distribution of the load assembly. assemType : str The type of the assembly loadName or ringPosCycle : [str, tuple[int, int, int]] The assembly name of the load assembly, or the ringPosHist identifier destination : str Location where the first assembly in the chain is discharged See Also -------- performShuffle processMoveList """ if alreadyDone is None: alreadyDone = [] enrich = None # in case this is a load chain, prep for getting enrich. loadName = None assemType = None # in case this is a load chain, prep for getting an assembly type destination = None for move in moveList: fromLoc = move.fromLoc toLoc = move.toLoc if toLoc in FuelHandler.DISCHARGE_LOCS and "LoadQueue" in fromLoc: # skip dummy moves continue elif (fromLoc, toLoc) in alreadyDone: # skip this pair continue elif startingAt in fromLoc: # looking for chain involving toLoc # back-track the chain of moves chain = [fromLoc] destination = toLoc safeCount = 0 # to break out of crazy loops. ringPosCycle = None complete = False while ( chain[-1] not in ({"LoadQueue"} | FuelHandler.DISCHARGE_LOCS) and not complete and safeCount < 100 ): # look for something going to where the previous one is from lookingFor = chain[-1] for innerMove in moveList: cFromLoc = innerMove.fromLoc cToLoc = innerMove.toLoc cEnrichList = innerMove.enrichList cAssemblyType = innerMove.assemType cRingPosCycle = innerMove.ringPosCycle if cToLoc == lookingFor: chain.append(cFromLoc) if cFromLoc in ({"LoadQueue"} | FuelHandler.DISCHARGE_LOCS): # charge-discharge loop complete. enrich = cEnrichList ringPosCycle = cRingPosCycle assemType = cAssemblyType # break after finding the first predecessor to avoid duplicates break if chain[-1] == startingAt: # non-charging loop complete complete = True safeCount += 1 if not safeCount < 100: raise RuntimeError("Chain tracking got too long. Check moves.\n{0}".format(chain)) # delete the last item, it's loadqueue location or the startingFrom # location. chain.pop() # chain tracked. Can jump out of loop early. return chain, enrich, assemType, ringPosCycle, destination # if we get here, the startingAt location was not found. runLog.warning("No chain found starting at {0}".format(startingAt)) return [], enrich, assemType, loadName, destination def processMoveList(self, moveList) -> ProcessMoveListResult: """ Processes a move list and extracts fuel management loops and charges. Parameters ---------- moveList : list A list of :class:`~armi.physics.fuelCycle.fuelHandlers.AssemblyMove` objects describing each move. Returns ------- ProcessMoveListResult Structured information describing the move chains, enrichment distributions, and other shuffle data. Attributes include: loadChains : list[list[str]] Moves that include discharges. loopChains : list[list[str]] Moves without discharges. enriches : list[list[float]] Axial enrichment distribution for each load assembly. loadChargeTypes : list[Optional[str]] Assembly types for each load chain. loadNames : list[Optional[str]] Assembly names of loads (e.g., from SFP). dischargeDests : list[str] Final destinations for discharged assemblies (e.g., ``SFP`` or ``Delete``). rotations : list[tuple[str, float]] Manual rotations to apply (location, degrees). alreadyDone : list[str] Locations already processed while tracking chains. Notes ----- Used in some Monte Carlo interfaces to convert ARMI moves to their format moves. Also used in repeat shuffling. See Also -------- makeShuffleReport : writes the file that is being processed performShuffle : uses this to repeat shuffles """ alreadyDone = [] loadChains = [] # moves that have discharges loadChargeTypes = [] # the assembly types (str) to be used in a load chain. loopChains = [] # moves that don't have discharges enriches = [] # enrichments of each loadChain ringPosCycles = [] # assembly ring, position, at cycle (to read from SFP) dischargeDests = [] # final destinations for discharged assemblies rotations = [] # first handle all charge/discharge chains by looking for things going to SFP/Delete for move in moveList: fromLoc = move.fromLoc toLoc = move.toLoc rot = move.rotation if fromLoc == toLoc: if rot is not None: rotations.append((fromLoc, rot)) continue if toLoc in self.DISCHARGE_LOCS and "LoadQueue" in fromLoc: # skip dummy moves continue elif toLoc in self.DISCHARGE_LOCS: # discharge. Track chain. chain, enrichList, assemType, ringPosCycle, dest = FuelHandler.trackChain(moveList, startingAt=fromLoc) runLog.extra("Load Chain with load assem {0}: {1}".format(assemType, chain)) loadChains.append(chain) enriches.append(enrichList) loadChargeTypes.append(assemType) ringPosCycles.append(ringPosCycle) dischargeDests.append(dest) # track all the locations we saw already so we # don't use them in the loop moves. alreadyDone.extend(chain) # go through again, looking for stuff that isn't in chains. # put them in loop type 3 moves (arbitrary order) for move in moveList: fromLoc = move.fromLoc toLoc = move.toLoc if fromLoc == toLoc: # rotation or no-op continue if toLoc in self.DISCHARGE_LOCS or fromLoc in ({"LoadQueue"} | self.DISCHARGE_LOCS): # skip loads/discharges; they're already done. continue elif fromLoc in alreadyDone: # skip repeats continue else: # normal move chain, _enrichList, _assemType, _loadAssemName, _dest = FuelHandler.trackChain( moveList, startingAt=fromLoc ) loopChains.append(chain) alreadyDone.extend(chain) runLog.extra("Loop Chain: {0}".format(chain)) return ProcessMoveListResult( loadChains=loadChains, loopChains=loopChains, enriches=enriches, loadChargeTypes=loadChargeTypes, ringPosCycles=ringPosCycles, dischargeDests=dischargeDests, rotations=rotations, alreadyDone=alreadyDone, ) def doRepeatShuffle(self, loadChains, loopChains, enriches, loadChargeTypes, ringPosCycles, dischargeDests): r""" Actually does the fuel movements required to repeat a shuffle order. Parameters ---------- loadChains : list list of lists of location labels for each load chain (with charge/discharge) loopChains : list list of lists of location labels for each loop chain (no charge/discharge) enriches : list The block enrichment distribution of each load assembly loadChargeTypes :list The types of assemblies that get charged. ringPosCycles : list The ring, pos, and cycle of assemblies that get brought into the core (useful for pulling out of SFP for round 2, etc.) dischargeDests : list Final destination for each load chain (e.g., ``SFP`` or ``Delete``) See Also -------- performShuffle : coordinates the moves for this cycle processMoveList : builds the input lists Notes ----- This is a helper function for performShuffle """ moved = [] # shuffle all of the load chain assemblies (These include discharges to SFP # and loads from Loadqueue) # build a lookup table of locations throughout the current core and cache it. locContents = self.r.core.makeLocationLookup(assemblyLevel=True) # perform load swaps (with charge/discharge) for assemblyChain, enrichList, assemblyType, ringPosCycle, dest in zip( loadChains, enriches, loadChargeTypes, ringPosCycles, dischargeDests ): # convert the labels into actual assemblies to be swapped assemblyList = self.r.core.getLocationContents(assemblyChain, assemblyLevel=True, locContents=locContents) moved.extend(assemblyList) # go through and swap the assemblies knowing that there is a discharge (first one) # and a new assembly brought it (last one) for i in range(0, -(len(assemblyList) - 1), -1): self.swapAssemblies(assemblyList[i], assemblyList[i - 1]) # Now, everything has been set except the first assembly in the list, which must now be # replaced with a fresh assembly... but which one? The assemblyType string # tells us. # Sometimes enrichment is set on-the-fly by branch searches, so we must # not only use the proper assembly type but also adjust the enrichment. if ringPosCycle: ring, pos, cycle = ringPosCycle loadAssembly = self.r.core.getAssemblyWithRingPosHist(ring, pos, cycle) if not loadAssembly: msg = f"The required assembly located at ring {ring} pos {pos} at cycle {cycle} is not found" runLog.error(msg) raise RuntimeError(msg) else: # create a new assembly from the BOL assem templates and adjust the enrichment loadAssembly = self.r.core.createAssemblyOfType(enrichList=enrichList, assemType=assemblyType) # replace the goingOut guy (for continual feed cases) runLog.debug("Calling discharge swap with {} and {}".format(loadAssembly, assemblyList[0])) self.dischargeSwap(loadAssembly, assemblyList[0], toSfp=(dest == "SFP")) moved.append(loadAssembly) # shuffle all of the loop chain assemblies (no charge/discharge) for assemblyChain in loopChains: # convert the labels into actual assemblies to be swapped assemblyList = self.r.core.getLocationContents(assemblyChain, assemblyLevel=True, locContents=locContents) for a in assemblyList: moved.append(a) # go through and swap the assemblies knowing that there is a discharge (first one) # and a new assembly brought it (last one) # for i in range(0,-(len(assemblyList)-1),-1): for i in range(0, -(len(assemblyList) - 1), -1): self.swapAssemblies(assemblyList[i], assemblyList[i + 1]) return moved def workerOperate(self, cmd): """Handle a mpi command on the worker nodes.""" pass def prepShuffleMap(self): """Prepare a table of current locations for plotting shuffle maneuvers.""" self.oldLocations = {} for a in self.r.core: self.oldLocations[a.getName()] = a.spatialLocator.getGlobalCoordinates() def makeShuffleArrows(self): """ Build data for plotting all the previous shuffles as arrows. Returns ------- arrows : list Values are (currentCoords, oldCoords) tuples """ arrows = [] runLog.extra("Building list of shuffle arrows.") for a in self.r.core: currentCoords = a.spatialLocator.getGlobalCoordinates() oldCoords = self.oldLocations.get(a.getName(), None) if oldCoords is None: oldCoords = np.array((-50, -50, 0)) elif any(currentCoords != oldCoords): arrows.append((oldCoords, currentCoords)) return arrows ================================================ FILE: armi/physics/fuelCycle/hexAssemblyFuelMgmtUtils.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This is a selection of fuel management utilities that seem generally useful enough to keep in ARMI, but they still only apply to hex assembly reactors. Notes ----- We are keeping these in ARMI even if they appear unused internally. """ import math import typing import numpy as np from armi import runLog from armi.physics.fuelCycle.utils import maxBurnupBlock, maxBurnupLocator from armi.utils.mathematics import findClosest if typing.TYPE_CHECKING: from armi.reactor.assemblies import HexAssembly def getOptimalAssemblyOrientation(a: "HexAssembly", aPrev: "HexAssembly") -> int: """ Get optimal hex assembly orientation/rotation to minimize peak burnup. Works by placing the highest-burnup pin in the location (of 6 possible locations) with lowest expected pin power. We evaluated "expected pin power" based on the power distribution in ``aPrev``, the previous assembly located where ``a`` is going. The algorithm goes as follows. 1. Get all the pin powers and ``IndexLocation`` s from the block at the previous location/timenode. 2. Obtain the ``IndexLocation`` of the pin with the highest burnup in the current assembly. 3. For each possible rotation, - Find the new location with ``HexGrid.rotateIndex`` - Find the index where that location occurs in previous locations - Find the previous power at that location 4. Return the rotation with the lowest previous power This algorithm assumes a few things. 1. ``len(HexBlock.getPinCoordinates()) == len(HexBlock.p.linPowByPin)`` and, by extension, ``linPowByPin[i]`` is found at ``getPinCoordinates()[i]``. 2. Your assembly has at least 60 degree symmetry of fuel pins and powers. This means if we find a fuel pin and rotate it 60 degrees, there should be another fuel pin at that lattice site. This is mostly a safe assumption since many hexagonal reactors have at least 60 degree symmetry of fuel pin layout. This assumption holds if you have a full hexagonal lattice of fuel pins as well. 3. Fuel pins in ``a`` have similar locations in ``aPrev``. This is a safe assumption in that most fuel assemblies have similar layouts so it's plausible that if ``a`` has a fuel pin at ``(1, 0, 0)`` so does ``aPrev``. .. impl:: Provide an algorithm for rotating hexagonal assemblies to equalize burnup :id: I_ARMI_ROTATE_HEX_BURNUP :implements: R_ARMI_ROTATE_HEX_BURNUP This method will return a rotation such that the highest-burnup pin moves to the hex location with the lowest expect pin number. This rotation will be optimal in the sense that it will minimize peak burnup. Parameters ---------- a : Assembly object The assembly that is being rotated. aPrev : Assembly object The assembly that previously occupied this location (before the last shuffle). If the assembly "a" was not shuffled, it's sufficient to pass ``a``. Returns ------- int An integer from 0 to 5 representing the number of pi/3 (60 degree) counterclockwise rotations from where ``a`` is currently oriented to the "optimal" orientation Raises ------ ValueError If there is insufficient information to determine the rotation of ``a``. This could be due to a lack of fuel blocks or parameters like ``linPowByPin``. """ maxBuBlock = maxBurnupBlock(a) if maxBuBlock.spatialGrid is None: msg = f"Block {maxBuBlock} in {a} does not have a spatial grid. Cannot rotate." runLog.error(msg) raise ValueError(msg) maxBuPinLocation = maxBurnupLocator(maxBuBlock) # No need to rotate if max burnup pin is the center if maxBuPinLocation.i == 0 and maxBuPinLocation.j == 0: return 0 if aPrev is not a: blockAtPreviousLocation = aPrev[a.index(maxBuBlock)] else: blockAtPreviousLocation = maxBuBlock previousLocations = blockAtPreviousLocation.getPinLocations() previousPowers = blockAtPreviousLocation.p.linPowByPin if len(previousLocations) != len(previousPowers): msg = ( f"Inconsistent pin powers and number of pins in {blockAtPreviousLocation}. " f"Found {len(previousLocations)} locations but {len(previousPowers)} powers." ) runLog.error(msg) raise ValueError(msg) ringPowers = {(loc.i, loc.j): p for loc, p in zip(previousLocations, previousPowers)} targetGrid = blockAtPreviousLocation.spatialGrid candidateRotation = 0 candidatePower = ringPowers.get((maxBuPinLocation.i, maxBuPinLocation.j), math.inf) for rot in range(1, 6): candidateLocation = targetGrid.rotateIndex(maxBuPinLocation, rot) newPower = ringPowers.get((candidateLocation.i, candidateLocation.j), math.inf) if newPower < candidatePower: candidateRotation = rot candidatePower = newPower return candidateRotation def buildRingSchedule( maxRingInCore, chargeRing=None, dischargeRing=None, jumpRingFrom=None, jumpRingTo=None, coarseFactor=0.0, ): r""" Build a ring schedule for shuffling. Notes ----- General enough to do convergent, divergent, or any combo, plus jumprings. The center of the core is ring 1, based on the DIF3D numbering scheme. Jump ring behavior can be generalized by first building a base ring list where assemblies get charged to H and discharge from A:: [A, B, C, D, E, F, G, H] If a jump should be placed where it jumps from ring G to C, reversed back to F, and then discharges from A, we simply reverse the sublist [C,D,E,F], leaving us with:: [A, B, F, E, D, C, G, H] A less-complex, more standard convergent-divergent scheme is a subcase of this, where the sublist [A,B,C,D,E] or so is reversed, leaving:: [E, D, C, B, A, F, G, H] So the task of this function is simply to determine what subsection, if any, to reverse of the baselist. Parameters ---------- maxRingInCore : int The number of rings in the hex assembly reactor. chargeRing : int, optional The peripheral ring into which an assembly enters the core. Default is outermost ring. dischargeRing : int, optional The last ring an assembly sits in before discharging. Default is jumpRing-1 jumpRingFrom : int The last ring an assembly sits in before jumping to the center jumpRingTo : int, optional The inner ring into which a jumping assembly jumps. Default is 1. coarseFactor : float, optional A number between 0 and 1 where 0 hits all rings and 1 only hits the outer, rJ, center, and rD rings. This allows coarse shuffling, with large jumps. Default: 0 Returns ------- ringSchedule : list A list of rings in order from discharge to charge. ringWidths : list A list of integers corresponding to the ringSchedule determining the widths of each ring area Examples -------- >>> f.buildRingSchedule(17, 1, jumpRingFrom=14) ([13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, 15, 16, 17], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) """ if dischargeRing > maxRingInCore: runLog.warning( f"Discharge ring {dischargeRing} is outside the core (max {maxRingInCore}). Changing it to be the max ring" ) dischargeRing = maxRingInCore if chargeRing > maxRingInCore: runLog.warning( f"Charge ring {chargeRing} is outside the core (max {maxRingInCore}). Changing it to be the max ring." ) chargeRing = maxRingInCore # process arguments if dischargeRing is None: # No discharge ring given, so we default to converging from outside to inside # and therefore discharging from the center dischargeRing = 1 if chargeRing is None: # Charge ring not specified. Since we default to convergent shuffling, we # must insert the fuel at the periphery. chargeRing = maxRingInCore if jumpRingFrom is not None and not (1 < jumpRingFrom < maxRingInCore): raise ValueError(f"JumpRingFrom {jumpRingFrom} is not in the core.") if jumpRingTo is not None and not (1 <= jumpRingTo < maxRingInCore): raise ValueError(f"JumpRingTo {jumpRingTo} is not in the core.") if chargeRing > dischargeRing and jumpRingTo is None: # a convergent shuffle with no jumping. By setting # jumpRingTo to be 1, no jumping will be activated # in the later logic. jumpRingTo = 1 elif jumpRingTo is None: # divergent case. Disable jumpring by putting jumpring at periphery. jumpRingTo = maxRingInCore if chargeRing > dischargeRing and jumpRingFrom is not None and jumpRingFrom < jumpRingTo: raise RuntimeError("Cannot have outward jumps in convergent cases.") if chargeRing < dischargeRing and jumpRingFrom is not None and jumpRingFrom > jumpRingTo: raise RuntimeError("Cannot have inward jumps in divergent cases.") # step 1: build the base rings numSteps = int((abs(dischargeRing - chargeRing) + 1) * (1.0 - coarseFactor)) # don't let it be smaller than 2 because linspace(1,5,1)= [1], linspace(1,5,2)= [1,5] numSteps = max(numSteps, 2) baseRings = [int(ring) for ring in np.linspace(dischargeRing, chargeRing, numSteps)] # eliminate duplicates. newBaseRings = [] for br in baseRings: if br not in newBaseRings: newBaseRings.append(br) baseRings = newBaseRings # build widths widths = [] for i, ring in enumerate(baseRings[:-1]): # 0 is the most restrictive, meaning don't even look in other rings. widths.append(abs(baseRings[i + 1] - ring) - 1) widths.append(0) # add the last ring with width 0. # step 2: locate which rings should be reversed to give the jump-ring effect. if jumpRingFrom is not None: _closestRingFrom, jumpRingFromIndex = findClosest(baseRings, jumpRingFrom, indx=True) _closestRingTo, jumpRingToIndex = findClosest(baseRings, jumpRingTo, indx=True) else: jumpRingToIndex = 0 # step 3: build the final ring list, potentially with a reversed section newBaseRings = [] newWidths = [] # add in the non-reversed section before the reversed section if jumpRingFrom is not None: newBaseRings.extend(baseRings[:jumpRingToIndex]) newWidths.extend(widths[:jumpRingToIndex]) # add in reversed section that is jumped newBaseRings.extend(reversed(baseRings[jumpRingToIndex:jumpRingFromIndex])) newWidths.extend(reversed(widths[jumpRingToIndex:jumpRingFromIndex])) # add the rest. newBaseRings.extend(baseRings[jumpRingFromIndex:]) newWidths.extend(widths[jumpRingFromIndex:]) else: # no jump section. Just fill in the rest. newBaseRings.extend(baseRings[jumpRingToIndex:]) newWidths.extend(widths[jumpRingToIndex:]) return newBaseRings, newWidths def buildConvergentRingSchedule(chargeRing, dischargeRing=1, coarseFactor=0.0): r""" Builds a ring schedule for convergent shuffling from ``chargeRing`` to ``dischargeRing``. Parameters ---------- chargeRing : int The peripheral ring into which an assembly enters the core. A good default is outermost ring: ``r.core.getNumRings()``. dischargeRing : int, optional The last ring an assembly sits in before discharging. If no discharge, this is the one that gets placed where the charge happens. Default: Innermost ring coarseFactor : float, optional A number between 0 and 1 where 0 hits all rings and 1 only hits the outer, rJ, center, and rD rings. This allows coarse shuffling, with large jumps. Default: 0 Returns ------- convergent : list A list of rings in order from discharge to charge. conWidths : list A list of integers corresponding to the ringSchedule determining the widths of each ring area """ # step 1: build the convergent rings numSteps = int((chargeRing - dischargeRing + 1) * (1.0 - coarseFactor)) # don't let it be smaller than 2 because linspace(1,5,1)= [1], linspace(1,5,2)= [1,5] numSteps = max(numSteps, 2) convergent = [int(ring) for ring in np.linspace(dischargeRing, chargeRing, numSteps)] # step 2. eliminate duplicates convergent = sorted(list(set(convergent))) # step 3. compute widths conWidths = [] for i, ring in enumerate(convergent[:-1]): conWidths.append(convergent[i + 1] - ring) conWidths.append(1) # step 4. assemble and return return convergent, conWidths ================================================ FILE: armi/physics/fuelCycle/settings.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Settings for generic fuel cycle code.""" import importlib.util from armi.settings import setting, settingsValidation CONF_ASSEM_ROTATION_STATIONARY = "assemblyRotationStationary" CONF_ASSEMBLY_ROTATION_ALG = "assemblyRotationAlgorithm" CONF_CIRCULAR_RING_MODE = "circularRingMode" CONF_FUEL_HANDLER_NAME = "fuelHandlerName" CONF_SHUFFLE_SEQUENCE_FILE = "shuffleSequenceFile" CONF_JUMP_RING_NUM = "jumpRingNum" CONF_LEVELS_PER_CASCADE = "levelsPerCascade" CONF_PLOT_SHUFFLE_ARROWS = "plotShuffleArrows" CONF_RUN_LATTICE_BEFORE_SHUFFLING = "runLatticePhysicsBeforeShuffling" CONF_SHUFFLE_LOGIC = "shuffleLogic" def getFuelCycleSettings(): """Define settings for fuel cycle.""" settings = [ setting.Setting( CONF_ASSEMBLY_ROTATION_ALG, default="", label="Assembly Rotation Algorithm", description="The algorithm to use to rotate the detail assemblies while shuffling", options=["", "buReducingAssemblyRotation", "simpleAssemblyRotation"], enforcedOptions=True, ), setting.Setting( CONF_ASSEM_ROTATION_STATIONARY, default=False, label="Rotate stationary assems", description=( "Whether or not to rotate assemblies that are not shuffled.This can only be True if 'rotation' is true." ), ), setting.Setting( CONF_CIRCULAR_RING_MODE, default=False, description="Toggle between circular ring definitions to hexagonal ring definitions", label="Use Circular Rings", ), setting.Setting( CONF_RUN_LATTICE_BEFORE_SHUFFLING, default=False, description=( "Forces the Generation of Cross Sections Prior to Shuffling the Fuel Assemblies. " "Note: This is recommended when performing equilibrium shuffling branching searches." ), label="Generate XS Prior to Fuel Shuffling", ), setting.Setting( CONF_SHUFFLE_LOGIC, default="", label="Shuffle Logic", description=( "Path to a Python script or dotted module path that handles the fuel shuffling " "for this case. This is user-defined per run as a dynamic input." ), ), setting.Setting( CONF_SHUFFLE_SEQUENCE_FILE, default="", label="Shuffle Sequence File", description="Path to a YAML file defining a custom shuffle sequence", ), setting.Setting( CONF_FUEL_HANDLER_NAME, default="", label="Fuel Handler Name", description="The name of the FuelHandler class in the shuffle logic module to activate", ), setting.Setting( CONF_PLOT_SHUFFLE_ARROWS, default=False, description="Make plots with arrows showing each move.", label="Plot shuffle arrows", ), setting.Setting( CONF_JUMP_RING_NUM, default=8, label="Jump Ring Number", description="The number of hex rings jumped when distributing the feed assemblies in " "the alternating concentric rings or checkerboard shuffle patterns (convergent / " "divergent shuffling).", ), setting.Setting( CONF_LEVELS_PER_CASCADE, default=14, label="Move per cascade", description="The number of moves made per cascade when performing convergent or " "divergent shuffle patterns.", ), ] return settings def getFuelCycleSettingValidators(inspector): queries = [] queries.append( settingsValidation.Query( lambda: bool(inspector.cs[CONF_SHUFFLE_LOGIC]) ^ bool(inspector.cs[CONF_FUEL_HANDLER_NAME]), "A value was provided for `fuelHandlerName` or `shuffleLogic`, but not " "the other. Either both `fuelHandlerName` and `shuffleLogic` should be " "defined, or neither of them.", "", inspector.NO_ACTION, ) ) queries.append( settingsValidation.Query( lambda: " " in inspector.cs[CONF_SHUFFLE_LOGIC], "Spaces are not allowed in shuffleLogic file location. You have specified {0}. " "Shuffling will not occur.".format(inspector.cs[CONF_SHUFFLE_LOGIC]), "", inspector.NO_ACTION, ) ) queries.append( settingsValidation.Query( lambda: inspector.cs[CONF_SHUFFLE_SEQUENCE_FILE] and not inspector._csRelativePathExists(inspector.cs[CONF_SHUFFLE_SEQUENCE_FILE]), "The specified shuffle sequence file '{0}' cannot be found.".format( inspector.cs[CONF_SHUFFLE_SEQUENCE_FILE] ), "", inspector.NO_ACTION, ) ) def _clearShufflingInput(): inspector._assignCS(CONF_SHUFFLE_LOGIC, "") inspector._assignCS(CONF_FUEL_HANDLER_NAME, "") queries.append( settingsValidation.Query( lambda: inspector.cs[CONF_SHUFFLE_LOGIC] and not inspector._csRelativePathExists(inspector.cs[CONF_SHUFFLE_LOGIC]) and importlib.util.find_spec(inspector.cs[CONF_SHUFFLE_LOGIC]) is None, "The specified shuffle logic module or file '{0}' cannot be found. Shuffling will not occur.".format( inspector.cs[CONF_SHUFFLE_LOGIC] ), "Clear specified file value?", _clearShufflingInput, ) ) return queries ================================================ FILE: armi/physics/fuelCycle/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/physics/fuelCycle/tests/_customFuelHandlerModule.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test utilities for :mod:`armi.physics.fuelCycle.fuelHandlerFactory`.""" class MockFileFuelHandler: """Fuel handler used when importing from a file path.""" def __init__(self, operator): self.operator = operator class MockModuleFuelHandler: """Fuel handler used when importing from a module path.""" def __init__(self, operator): self.operator = operator ================================================ FILE: armi/physics/fuelCycle/tests/test_assemblyRotationAlgorithms.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests for tools used to rotate hex assemblies. Notes ----- These algorithms are defined in assemblyRotationAlgorithms.py, but they are used in: ``FuelHandler.outage()``. """ import copy import enum import math import typing from unittest import TestCase, mock import numpy as np from armi.physics.fuelCycle import assemblyRotationAlgorithms as rotAlgos from armi.physics.fuelCycle import fuelHandlers from armi.physics.fuelCycle.hexAssemblyFuelMgmtUtils import ( getOptimalAssemblyOrientation, ) from armi.physics.fuelCycle.settings import CONF_ASSEM_ROTATION_STATIONARY from armi.reactor.assemblies import HexAssembly from armi.reactor.blocks import HexBlock from armi.reactor.flags import Flags from armi.reactor.tests import test_reactors class MockFuelHandler(fuelHandlers.FuelHandler): """Implements the entire interface but with empty methods.""" def chooseSwaps(self, *args, **kwargs): pass class _PinLocations(enum.IntEnum): """Zero-indexed locations for specific points of interest. If a data vector has an entry to all ``self.N_PINS=169`` pins in the test model, then ``data[PIN_LOCATIONS.UPPER_RIGHT_VERTEX]`` will access the data for the pin along the upper right 60 symmetry line. Since we're dealing with rotations here, it does not need to literally be the pin at the vertex. Just along the symmetry line to help explain tests. The use case here is setting the pin or burnup array to be a constant value, but using a single max or minimum value to determine rotation. """ CENTER = 0 UPPER_RIGHT_VERTEX = 1 UPPER_LEFT_VERTEX = 2 DUE_LEFT_VERTEX = 3 LOWER_LEFT_VERTEX = 4 LOWER_RIGHT_VERTEX = 5 DUE_RIGHT_VERTEX = 6 class ShuffleAndRotateTestHelper(TestCase): """Fixture class to assist in testing rotation of assemblies via the fuel handler.""" N_PINS = 169 def setUp(self): self.o, self.r = test_reactors.loadTestReactor() self.r.core.locateAllAssemblies() @staticmethod def ensureBlockHasSpatialGrid(b: HexBlock): """If ``b`` does not have a spatial grid, auto create one.""" if b.spatialGrid is None: b.getPinPitch = mock.Mock(return_value=1.1) b.autoCreateSpatialGrids() def setAssemblyPinBurnups(self, a: HexAssembly, burnups: np.ndarray): """Prepare the assembly that will be shuffled and rotated.""" peakBu = burnups.max() for b in a.getChildrenWithFlags(Flags.FUEL): self.ensureBlockHasSpatialGrid(b) b.p.percentBuPeak = peakBu for c in b.getChildrenWithFlags(Flags.FUEL): c.p.pinPercentBu = burnups def setAssemblyPinPowers(self, a: HexAssembly, pinPowers: np.ndarray): """Prep the assembly that existed at the site a shuffled assembly will occupy.""" for b in a.getChildrenWithFlags(Flags.FUEL): self.ensureBlockHasSpatialGrid(b) b.p.linPowByPin = pinPowers def powerWithMinValue(self, minIndex: int) -> np.ndarray: """Create a vector of pin powers with a minimum value at a given index.""" data = np.ones(self.N_PINS) data[minIndex] = 0 return data def burnupWithMaxValue(self, maxIndex: int) -> np.ndarray: """Create a vector of pin burnups with a maximum value at a given index.""" data = np.zeros(self.N_PINS) data[maxIndex] = 50 return data def compareMockedToExpectedRotation(self, nRotations: int, mRotate: mock.Mock, msg: typing.Optional[str] = None): """Helper function to check the mocked rotate and compare against expected rotation.""" expectedRadians = nRotations * math.pi / 3 (actualRadians,) = mRotate.call_args.args self.assertAlmostEqual(actualRadians, expectedRadians, msg=msg) class TestOptimalAssemblyRotation(ShuffleAndRotateTestHelper): """Test the burnup dependent assembly rotation methods.""" def setUp(self): super().setUp() self.assembly: HexAssembly = self.r.core.getFirstAssembly(Flags.FUEL) def test_flatPowerNoRotation(self): """If all pin powers are identical, no rotation is suggested.""" burnups = self.burnupWithMaxValue(_PinLocations.UPPER_LEFT_VERTEX) powers = np.ones_like(burnups) self.setAssemblyPinBurnups(self.assembly, burnups) self.setAssemblyPinPowers(self.assembly, powers) rot = getOptimalAssemblyOrientation(self.assembly, self.assembly) self.assertEqual(rot, 0) def test_maxBurnupAtCenterNoRotation(self): """If max burnup pin is at the center, no rotation is suggested.""" burnups = self.burnupWithMaxValue(_PinLocations.CENTER) powers = np.zeros_like(burnups) self.setAssemblyPinBurnups(self.assembly, burnups) self.setAssemblyPinPowers(self.assembly, powers) rot = getOptimalAssemblyOrientation(self.assembly, self.assembly) self.assertEqual(rot, 0) def test_oppositeRotation(self): """Test a 180 degree rotation is suggested when the max burnup pin is opposite the lowest power pin. Use the second ring of the hexagon because it's easier to write out pin locations and check work. .. test:: Test the burnup equalizing rotation algorithm. :id: T_ARMI_ROTATE_HEX_BURNUP :tests: R_ARMI_ROTATE_HEX_BURNUP :acceptance_criteria: After rotating a hexagonal assembly, confirm the pin with the highest burnup is in the same sector as pin with the lowest power in the high burnup pin's ring. Notes ----- Use zero-indexed pin location not pin ID to assign burnups and powers. Since we have a single component, ``Block.p.linPowByPin[i] <-> Component.p.pinPercentBu[i]`` """ shuffledAssembly = self.assembly previousAssembly = copy.deepcopy(shuffledAssembly) pairs = ( (_PinLocations.DUE_RIGHT_VERTEX, _PinLocations.DUE_LEFT_VERTEX), (_PinLocations.UPPER_LEFT_VERTEX, _PinLocations.LOWER_RIGHT_VERTEX), (_PinLocations.UPPER_RIGHT_VERTEX, _PinLocations.LOWER_LEFT_VERTEX), (_PinLocations.DUE_LEFT_VERTEX, _PinLocations.DUE_RIGHT_VERTEX), (_PinLocations.LOWER_RIGHT_VERTEX, _PinLocations.UPPER_LEFT_VERTEX), (_PinLocations.LOWER_LEFT_VERTEX, _PinLocations.UPPER_RIGHT_VERTEX), ) for startPin, oppositePin in pairs: powers = self.powerWithMinValue(oppositePin) burnups = self.burnupWithMaxValue(startPin) self.setAssemblyPinBurnups(shuffledAssembly, burnups) self.setAssemblyPinPowers(previousAssembly, powers) rot = getOptimalAssemblyOrientation(shuffledAssembly, previousAssembly) # 180 degrees is three 60 degree rotations self.assertEqual(rot, 3, msg=f"{startPin=} :: {oppositePin=}") def test_noBlocksWithBurnup(self): """Require at least one block to have burnup.""" with self.assertRaisesRegex(ValueError, "Error finding max burnup"): getOptimalAssemblyOrientation(self.assembly, self.assembly) def test_mismatchPinPowersAndLocations(self): """Require pin powers and locations to be have the same length.""" powers = np.arange(self.N_PINS + 1) burnups = np.arange(self.N_PINS) self.setAssemblyPinBurnups(self.assembly, burnups) self.setAssemblyPinPowers(self.assembly, powers) with self.assertRaisesRegex(ValueError, "Inconsistent pin powers and number of pins"): getOptimalAssemblyOrientation(self.assembly, self.assembly) class TestFuelHandlerMgmtTools(ShuffleAndRotateTestHelper): def test_buRotationWithFreshFeed(self): """Test that rotation works if a new assembly is swapped with fresh fuel. Fresh feed assemblies will not exist in the reactor, and various checks that try to the "previous" assembly's location can fail. """ newSettings = { "fluxRecon": True, "assemblyRotationAlgorithm": "buReducingAssemblyRotation", } self.o.cs = self.o.cs.modified(newSettings=newSettings) fresh = self.r.core.createFreshFeed(self.o.cs) self.assertEqual(fresh.lastLocationLabel, HexAssembly.LOAD_QUEUE) fh = MockFuelHandler(self.o) fh.chooseSwaps = mock.Mock(side_effect=lambda _: fh.moved.append(fresh)) with mock.patch( "armi.physics.fuelCycle.assemblyRotationAlgorithms.getOptimalAssemblyOrientation", ) as p: fh.outage() # The only moved assembly was most recently outside the core so we have no need to rotate # Make sure our fake chooseSwaps added the fresh assembly to the moved assemblies fh.chooseSwaps.assert_called_once() p.assert_not_called() def test_buRotationWithStationaryRotation(self): """Test that the burnup equalizing rotation algorithm works on non-shuffled assemblies.""" newSettings = { CONF_ASSEM_ROTATION_STATIONARY: True, "fluxRecon": True, "assemblyRotationAlgorithm": "buReducingAssemblyRotation", } self.o.cs = self.o.cs.modified(newSettings=newSettings) # Grab two assemblies that were not moved. One of which will have the detailed information # needed for rotation detailedAssem, coarseAssem = self.o.r.core.getChildrenWithFlags(Flags.FUEL)[:2] self.setAssemblyPinBurnups(detailedAssem, burnups=np.arange(self.N_PINS)) self.setAssemblyPinPowers(detailedAssem, pinPowers=np.arange(self.N_PINS)) detailedAssem.rotate = mock.Mock() coarseAssem.rotate = mock.Mock() fh = MockFuelHandler(self.o) with mock.patch( "armi.physics.fuelCycle.assemblyRotationAlgorithms.getOptimalAssemblyOrientation", return_value=5, ) as p: fh.outage() p.assert_called_once_with(detailedAssem, detailedAssem) # Assembly with detailed pin powers and pin burnups will be rotated detailedAssem.rotate.assert_called_once() self.compareMockedToExpectedRotation(5, detailedAssem.rotate) # Assembly without pin level data will not be rotated coarseAssem.rotate.assert_not_called() def test_rotateInShuffleQueue(self): """Test for expected behavior when multiple assemblies are shuffled and rotated in one outage. Examine the behavior of three assemblies: ``first -> second -> third`` 1. ``first`` is moved to the location of ``second`` and rotated by comparing ``first`` burnup against ``second`` pin powers. 2. ``second`` is moved to the location of ``third`` and rotated by comparing ``second`` burnup against ``third`` pin powers. where: * ``first`` burnup is maximized in the upper left direction. * ``second`` pin power is minimized along the lower left direction. * ``second`` burnup is maximized in the upper right direction. * ``third`` pin power is minimized in the direct right direction. We should expect: 1. ``first`` is rotated from upper left to lower left => two 60 degree CCW rotations. 2. ``second`` is rotated from upper right to direct right => five 60 degree CCW rotations. """ newSettings = { CONF_ASSEM_ROTATION_STATIONARY: False, "fluxRecon": True, "assemblyRotationAlgorithm": "buReducingAssemblyRotation", } self.o.cs = self.o.cs.modified(newSettings=newSettings) first, second, third = self.r.core.getChildrenWithFlags(Flags.FUEL)[:3] firstBurnups = self.burnupWithMaxValue(_PinLocations.UPPER_LEFT_VERTEX) self.setAssemblyPinBurnups(first, firstBurnups) secondPowers = self.powerWithMinValue(_PinLocations.LOWER_LEFT_VERTEX) self.setAssemblyPinPowers(second, pinPowers=secondPowers) secondBurnups = self.burnupWithMaxValue(_PinLocations.UPPER_RIGHT_VERTEX) self.setAssemblyPinBurnups(second, burnups=secondBurnups) thirdPowers = self.powerWithMinValue(_PinLocations.DUE_RIGHT_VERTEX) self.setAssemblyPinPowers(third, thirdPowers) # Set the shuffling sequence # first -> second # second -> third second.lastLocationLabel = first.getLocation() third.lastLocationLabel = second.getLocation() first.rotate = mock.Mock() second.rotate = mock.Mock() third.rotate = mock.Mock() fh = MockFuelHandler(self.o) fh.chooseSwaps = mock.Mock(side_effect=lambda _: fh.moved.extend([second, third])) fh.outage() first.rotate.assert_called_once() self.compareMockedToExpectedRotation(2, first.rotate, "First") second.rotate.assert_called_once() self.compareMockedToExpectedRotation(5, second.rotate, "Second") third.rotate.assert_not_called() class SimpleRotationTests(ShuffleAndRotateTestHelper): """Test the simple rotation where assemblies are rotated a fixed amount.""" def test_simpleAssemblyRotation(self): """Test rotating assemblies 120 degrees with two rotation events.""" fh = fuelHandlers.FuelHandler(self.o) newSettings = {CONF_ASSEM_ROTATION_STATIONARY: True} self.o.cs = self.o.cs.modified(newSettings=newSettings) hist = self.o.getInterface("history") assems = hist.o.r.core.getAssemblies(Flags.FUEL)[:5] # add some detailed assemblies for a in assems: hist.detailAssemblyNames.append(a.getName()) b = self.o.r.core.getFirstBlock(Flags.FUEL) rotNum = b.getRotationNum() rotAlgos.simpleAssemblyRotation(fh) rotAlgos.simpleAssemblyRotation(fh) self.assertEqual(b.getRotationNum(), rotNum + 2) ================================================ FILE: armi/physics/fuelCycle/tests/test_fuelHandlerFactory.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for :mod:`armi.physics.fuelCycle.fuelHandlerFactory`.""" import unittest from pathlib import Path from armi.physics.fuelCycle import fuelHandlerFactory from armi.physics.fuelCycle.settings import CONF_FUEL_HANDLER_NAME, CONF_SHUFFLE_LOGIC from armi.physics.fuelCycle.tests import _customFuelHandlerModule class _DummySettings(dict): """Minimal stand-in for :class:`armi.settings.Settings`.""" class _DummyOperator: """Operator stub that only exposes the settings object.""" def __init__(self, settings): self.cs = settings class FuelHandlerFactoryTests(unittest.TestCase): """Exercise the custom module import logic.""" def setUp(self): self.inputDirectory = Path(__file__).resolve().parents[3] self.settings = _DummySettings() self.settings.inputDirectory = str(self.inputDirectory) self.operator = _DummyOperator(self.settings) def test_filePath(self): """Custom handlers can still be loaded from explicit file paths.""" modulePath = Path(__file__).resolve().with_name("_customFuelHandlerModule.py") self.settings.update( { CONF_FUEL_HANDLER_NAME: "MockFileFuelHandler", CONF_SHUFFLE_LOGIC: str(modulePath), } ) handler = fuelHandlerFactory.fuelHandlerFactory(self.operator) self.assertEqual(handler.__class__.__name__, "MockFileFuelHandler") def test_modulePath(self): """Module-style paths are imported using :mod:`importlib`.""" moduleName = "armi.physics.fuelCycle.tests._customFuelHandlerModule" self.settings.update( { CONF_FUEL_HANDLER_NAME: "MockModuleFuelHandler", CONF_SHUFFLE_LOGIC: moduleName, } ) handler = fuelHandlerFactory.fuelHandlerFactory(self.operator) self.assertIsInstance(handler, _customFuelHandlerModule.MockModuleFuelHandler) ================================================ FILE: armi/physics/fuelCycle/tests/test_fuelHandlers.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests some capabilities of the fuel handling machine. This test is high enough level that it requires input files to be present. The ones to use are called armiRun.yaml which is located in armi.tests """ import collections import copy import os import tempfile import unittest from unittest.mock import PropertyMock, patch import numpy as np from armi.physics.fuelCycle import fuelHandlers, settings from armi.physics.fuelCycle.fuelHandlers import AssemblyMove from armi.physics.fuelCycle.settings import ( CONF_ASSEM_ROTATION_STATIONARY, CONF_ASSEMBLY_ROTATION_ALG, CONF_PLOT_SHUFFLE_ARROWS, CONF_RUN_LATTICE_BEFORE_SHUFFLING, CONF_SHUFFLE_SEQUENCE_FILE, ) from armi.physics.neutronics.crossSectionGroupManager import CrossSectionGroupManager from armi.physics.neutronics.latticePhysics.latticePhysicsInterface import ( LatticePhysicsInterface, ) from armi.reactor import assemblies, blocks, components, grids from armi.reactor.flags import Flags from armi.reactor.parameters import ParamLocation from armi.reactor.tests import test_reactors from armi.reactor.zones import Zone from armi.settings import caseSettings from armi.settings.fwSettings.globalSettings import CONF_TRACK_ASSEMS from armi.testing import TESTING_ROOT from armi.tests import TEST_ROOT, ArmiTestHelper, mockRunLogs from armi.utils import directoryChangers from armi.utils.customExceptions import InputError class TestReadMovesYamlErrors(unittest.TestCase): """Ensure malformed YAML inputs raise informative ``InputError``.""" def _run(self, text): with tempfile.NamedTemporaryFile("w", suffix=".yaml", delete=False) as tf: tf.write(text) fname = tf.name try: fuelHandlers.FuelHandler.readMovesYaml(fname) finally: os.remove(fname) def test_missingSequence(self): yaml_text = "foo: []\n" with self.assertRaisesRegex(InputError, "sequence"): self._run(yaml_text) def test_duplicateCycle(self): yaml_text = "sequence:\n 1: []\n 1: []\n" with self.assertRaisesRegex(InputError, r"(?i)\bduplicate key\b"): self._run(yaml_text) def test_unknownActionKey(self): yaml_text = "sequence:\n 1:\n - badAction: []\n" with self.assertRaisesRegex(InputError, "Unknown action"): self._run(yaml_text) def test_badCascade(self): cases = [ ("sequence:\n 1:\n - cascade: ['only']\n", "cascade"), ("sequence:\n 1:\n - cascade: ['outer fuel', 1]\n", "cascade"), ] for yaml_text, msg in cases: with self.subTest(yaml_text=yaml_text): with self.assertRaisesRegex(InputError, msg): self._run(yaml_text) def test_badSwap(self): yaml_text = "sequence:\n 1:\n - swap: ['009-045']\n" with self.assertRaisesRegex(InputError, "swap"): self._run(yaml_text) def test_badFuelEnrichment(self): cases = [ ( """sequence:\n 1:\n - cascade: ['outer fuel', '009-045']\n fuelEnrichment: ['a']\n""", "fuelEnrichment", ), ( """sequence:\n 1:\n - cascade: ['outer fuel', '009-045']\n fuelEnrichment: [-1]\n""", "fuelEnrichment", ), ( """sequence:\n 1:\n - cascade: ['outer fuel', '009-045']\n fuelEnrichment: [101]\n""", "fuelEnrichment", ), ] for yaml_text, msg in cases: with self.subTest(yaml_text=yaml_text): with self.assertRaisesRegex(InputError, msg): self._run(yaml_text) def test_rotationInvalidLocation(self): yaml_text = "sequence:\n 1:\n - extraRotations: {'badLoc': 30}\n" with self.assertRaisesRegex(InputError, "Invalid location"): self._run(yaml_text) def test_duplicateCascadeLocation(self): yaml_text = ( "sequence:\n 1:\n - cascade: ['outer', '009-045', '008-001']\n" " - cascade: ['outer', '009-045', '007-002']\n" ) with self.assertRaisesRegex(InputError, "009-045"): self._run(yaml_text) def test_invalidCascadeLocation(self): yaml_text = "sequence:\n 1:\n - cascade: ['outer', 'badLoc']\n" with self.assertRaisesRegex(InputError, "Invalid location"): self._run(yaml_text) def test_missingCycle(self): yaml_text = "sequence:\n 1: []\n 3: []\n" with self.assertRaisesRegex(InputError, "Missing cycle 2"): self._run(yaml_text) class TestReadMovesYamlFeatures(unittest.TestCase): """Miscellaneous behavior of :meth:`FuelHandler.readMovesYaml`.""" def _read(self, text): with tempfile.NamedTemporaryFile("w", suffix=".yaml", delete=False) as tf: tf.write(text) fname = tf.name try: moves, _ = fuelHandlers.FuelHandler.readMovesYaml(fname) return moves finally: os.remove(fname) def test_cyclesOutOfOrder(self): yaml_text = "sequence:\n 1: []\n 2: []\n 4: []\n 3: []\n" moves = self._read(yaml_text) self.assertEqual(list(moves), [1, 2, 4, 3]) class FuelHandlerTestHelper(ArmiTestHelper): @classmethod def setUpClass(cls): # prepare the input files. This is important so the unit tests run from wherever # they need to run from. cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT, dumpOnException=False) cls.directoryChanger.open() @classmethod def tearDownClass(cls): cls.directoryChanger.close() def setUp(self): """ Build a dummy reactor without using input files. There are some igniters and feeds but none of these have any number densities. """ self.o, self.r = test_reactors.loadTestReactor( self.directoryChanger.destination, customSettings={"nCycles": 4, "trackAssems": True}, ) allBlocks = self.r.core.getBlocks() fakeBu = 30.0 / len(allBlocks) for bi, b in enumerate(allBlocks): b.p.flux = 5e10 if b.isFuel(): b.p.percentBu = fakeBu * bi self.nfeed = len(self.r.core.getAssemblies(Flags.FEED)) self.nigniter = len(self.r.core.getAssemblies(Flags.IGNITER)) self.nSfp = len(self.r.excore["sfp"]) # generate a reactor with assemblies # generate components with materials nPins = 271 fuelDims = {"Tinput": 273.0, "Thot": 273.0, "od": 1.0, "id": 0.0, "mult": nPins} fuel = components.Circle("fuel", "UZr", **fuelDims) cladDims = {"Tinput": 273.0, "Thot": 273.0, "od": 1.1, "id": 1.0, "mult": nPins} clad = components.Circle("clad", "HT9", **cladDims) interDims = { "Tinput": 273.0, "Thot": 273.0, "op": 16.8, "ip": 16.0, "mult": 1.0, } interSodium = components.Hexagon("interCoolant", "Sodium", **interDims) # generate a block self.block = blocks.HexBlock("TestHexBlock") self.block.setType("fuel") self.block.setHeight(10.0) self.block.add(fuel) self.block.add(clad) self.block.add(interSodium) # generate an assembly self.assembly = assemblies.HexAssembly("TestAssemblyType") self.assembly.spatialGrid = grids.AxialGrid.fromNCells(1) for _ in range(1): self.assembly.add(copy.deepcopy(self.block)) # copy the assembly to make a list of assemblies and have a reference assembly self.aList = [] for _ in range(6): self.aList.append(copy.deepcopy(self.assembly)) self.refAssembly = copy.deepcopy(self.assembly) self.directoryChanger.open() self.r.core.locateAllAssemblies() def tearDown(self): # clean up the test self.block = None self.assembly = None self.aList = None self.refAssembly = None self.r = None self.o = None self.directoryChanger.close() class MockLatticePhysicsInterface(LatticePhysicsInterface): """A mock lattice physics interface that does nothing for interactBOC.""" name = "MockLatticePhysicsInterface" def _getExecutablePath(self): return "/mock/" def interactBOC(self, cycle=None): pass class MockXSGM(CrossSectionGroupManager): """A mock cross section group manager that does nothing for interactBOC.""" def interactBOC(self, cycle=None): pass class TestFuelHandler(FuelHandlerTestHelper): @patch("armi.reactor.assemblies.Assembly.getSymmetryFactor") def test_getParamMax(self, mockGetSymmetry): a = self.assembly mockGetSymmetry.return_value = 1 expectedValue = 0.5 a.p["kInf"] = expectedValue for b in a: b.p["kInf"] = expectedValue with patch( "armi.reactor.parameters.parameterDefinitions.Parameter.location", new_callable=PropertyMock ) as mock_assemblyParameterLocation: mock_assemblyParameterLocation.return_value = ParamLocation.VOLUME_INTEGRATED # symmetry factor == 1 res = fuelHandlers.FuelHandler._getParamMax(a, "kInf", True) self.assertEqual(res, expectedValue) res = fuelHandlers.FuelHandler._getParamMax(a, "kInf", False) self.assertEqual(res, expectedValue) # symmetry factor == 3 mockGetSymmetry.return_value = 3 res = fuelHandlers.FuelHandler._getParamMax(a, "kInf", True) self.assertAlmostEqual(res, expectedValue * 3) res = fuelHandlers.FuelHandler._getParamMax(a, "kInf", False) self.assertAlmostEqual(res, expectedValue * 3) # not volume integrated and symmetry factor == 3 mock_assemblyParameterLocation.return_value = ParamLocation.AVERAGE res = fuelHandlers.FuelHandler._getParamMax(a, "kInf", True) self.assertEqual(res, expectedValue) res = fuelHandlers.FuelHandler._getParamMax(a, "kInf", False) self.assertEqual(res, expectedValue) def test_interactBOC(self): # set up mock interface self.o.addInterface(MockLatticePhysicsInterface(self.r, self.o.cs)) self.o.removeInterface(interfaceName="xsGroups") self.o.addInterface(MockXSGM(self.r, self.o.cs)) # adjust case settings self.o.cs[CONF_RUN_LATTICE_BEFORE_SHUFFLING] = True # run fhi.interactBOC fhi = self.o.getInterface("fuelHandler") with mockRunLogs.BufferLog() as mock: fhi.interactBOC() self.assertIn("lattice physics before fuel management due to the", mock._outputStream) def test_findHighBu(self): loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(5, 4) a = self.r.core.childrenByLocator[loc] # set burnup way over 1.0, which is otherwise the highest bu in the core a[0].p.percentBu = 50 fh = fuelHandlers.FuelHandler(self.o) a1 = fh.findAssembly(param="percentBu", compareTo=100, blockLevelMax=True, typeSpec=None) self.assertIs(a, a1) @patch("armi.physics.fuelCycle.fuelHandlers.FuelHandler.chooseSwaps") def test_outage(self, mockChooseSwaps): # mock up a fuel handler fh = fuelHandlers.FuelHandler(self.o) mockChooseSwaps.return_value = list(self.r.core.getAssemblies()) # edge case: cannot perform two outages on the same FuelHandler fh.moved = [self.r.core.getFirstAssembly()] with self.assertRaises(ValueError): fh.outage(factor=1.0) # edge case: fail if the shuffle file is missing fh.moved = [] self.o.cs = self.o.cs.modified(newSettings={"explicitRepeatShuffles": "fakePath"}) with self.assertRaises(RuntimeError): fh.outage(factor=1.0) # a successful run fh.moved = [] self.o.cs = self.o.cs.modified( newSettings={ "explicitRepeatShuffles": "", "fluxRecon": True, CONF_ASSEMBLY_ROTATION_ALG: "simpleAssemblyRotation", } ) fh.outage(factor=1.0) self.assertEqual(len(fh.moved), 0) def test_outageEdgeCase(self): """Check that an error is raised if the list of moved assemblies is invalid.""" class MockFH(fuelHandlers.FuelHandler): def chooseSwaps(self, factor=1.0): self.moved = [None] # mock up a fuel handler fh = MockFH(self.o) # test edge case with self.assertRaises(AttributeError): fh.outage(factor=1.0) def test_isAssemblyInAZone(self): # build a fuel handler fh = fuelHandlers.FuelHandler(self.o) # test the default value if there are no zones a = self.r.core.getFirstAssembly() self.assertTrue(fh.isAssemblyInAZone(None, a)) # If our assembly isn't in one of the supplied zones z = Zone("test_isAssemblyInAZone") self.assertFalse(fh.isAssemblyInAZone([z], a)) # If our assembly IS in one of the supplied zones z.addLoc(a.getLocation()) self.assertTrue(fh.isAssemblyInAZone([z], a)) def test_width(self): """Tests the width capability of findAssembly.""" fh = fuelHandlers.FuelHandler(self.o) assemsByRing = collections.defaultdict(list) for a in self.r.core: assemsByRing[a.spatialLocator.getRingPos()[0]].append(a) # instantiate reactor power. more power in more outer rings for ring, power in zip(range(1, 8), range(10, 80, 10)): aList = assemsByRing[ring] for a in aList: sf = a.getSymmetryFactor() # center assembly is only 1/3rd in the core for b in a: b.p.power = power / sf paramName = "power" # 1 ring outer and inner from ring 3 a = fh.findAssembly( targetRing=3, width=(1, 0), param=paramName, blockLevelMax=True, compareTo=100, ) ring = a.spatialLocator.getRingPos()[0] self.assertEqual( ring, 4, "The highest power ring returned is {0}. It should be {1}".format(ring, 4), ) a = fh.findAssembly(targetRing=3, width=(1, 0), param=paramName, blockLevelMax=True, compareTo=0) ring = a.spatialLocator.getRingPos()[0] self.assertEqual( ring, 2, "The lowest power ring returned is {0}. It should be {1}".format(ring, 2), ) # 2 rings outer from ring 3 a = fh.findAssembly( targetRing=3, width=(2, 1), param=paramName, blockLevelMax=True, compareTo=100, ) ring = a.spatialLocator.getRingPos()[0] self.assertEqual( ring, 5, "The highest power ring returned is {0}. It should be {1}".format(ring, 5), ) a = fh.findAssembly(targetRing=3, width=(2, 1), param=paramName, blockLevelMax=True, compareTo=0) ring = a.spatialLocator.getRingPos()[0] self.assertEqual( ring, 3, "The lowest power ring returned is {0}. It should be {1}".format(ring, 3), ) # 2 rings inner from ring 3 a = fh.findAssembly( targetRing=3, width=(2, -1), param=paramName, blockLevelMax=True, compareTo=100, ) ring = a.spatialLocator.getRingPos()[0] self.assertEqual( ring, 3, "The highest power ring returned is {0}. It should be {1}".format(ring, 3), ) a = fh.findAssembly( targetRing=3, width=(2, -1), param=paramName, blockLevelMax=True, compareTo=0, ) ring = a.spatialLocator.getRingPos()[0] self.assertEqual( ring, 1, "The lowest power ring returned is {0}. It should be {1}".format(ring, 1), ) def test_findMany(self): """Tests the ``findMany`` and type aspects of the fuel handler.""" fh = fuelHandlers.FuelHandler(self.o) igniters = fh.findAssembly(typeSpec=Flags.IGNITER | Flags.FUEL, findMany=True) feeds = fh.findAssembly(typeSpec=Flags.FEED | Flags.FUEL, findMany=True) fewFeeds = fh.findAssembly(typeSpec=Flags.FEED | Flags.FUEL, findMany=True, maxNumAssems=4) self.assertEqual( len(igniters), self.nigniter, "Found {0} igniters. Should have found {1}".format(len(igniters), self.nigniter), ) self.assertEqual( len(feeds), self.nfeed, "Found {0} feeds. Should have found {1}".format(len(igniters), self.nfeed), ) self.assertEqual( len(fewFeeds), 4, "Reduced findMany returned {0} assemblies instead of {1}".format(len(fewFeeds), 4), ) def test_findInSFP(self): """Tests ability to pull from the spent fuel pool.""" fh = fuelHandlers.FuelHandler(self.o) spent = fh.findAssembly( findMany=True, findFromSfp=True, param="percentBu", compareTo=100, blockLevelMax=True, ) self.assertEqual( len(spent), self.nSfp, "Found {0} assems in SFP. Should have found {1}".format(len(spent), self.nSfp), ) burnups = [a.getMaxParam("percentBu") for a in spent] bu = spent[0].getMaxParam("percentBu") self.assertEqual( bu, max(burnups), "First assembly does not have the highest burnup ({0}). It has ({1})".format(max(burnups), bu), ) def test_findByCoords(self): fh = fuelHandlers.FuelHandler(self.o) assem = fh.findAssembly(coords=(0, 0)) self.o.r.core.sortAssemsByRing() self.assertIs(assem, self.o.r.core[0]) def test_findWithMinMax(self): """Test the complex min/max comparators.""" fh = fuelHandlers.FuelHandler(self.o) assem = fh.findAssembly( param="percentBu", compareTo=100, blockLevelMax=True, minParam="percentBu", minVal=("percentBu", 0.1), maxParam="percentBu", maxVal=20.0, ) # the burnup should be the maximum bu within # up to a burnup of 20%, which by the simple # dummy data layout should be the 2/3rd block in the blocklist lastB = None for b in self.r.core.iterBlocks(Flags.FUEL): if b.p.percentBu > 20: break lastB = b expected = lastB.parent self.assertIs(assem, expected) # test the impossible: an block with burnup less than 110% of its own burnup assem = fh.findAssembly( param="percentBu", compareTo=100, blockLevelMax=True, minParam="percentBu", minVal=("percentBu", 1.1), ) self.assertIsNone(assem) def runShuffling(self, fh): """Shuffle fuel and write out a SHUFFLES.txt file.""" fh.attachReactor(self.o, self.r) # so we don't overwrite the version-controlled armiRun-SHUFFLES.txt self.o.cs.caseTitle = "armiRun2" fh.interactBOL() # expected assembly position history based on shuffling specification of this test. # do not blindly rebase these reference values. test failures using this dict # imply that the assembly shuffling definition has changed. expPosHist = {} # cycle 1 shuffle, (2, 1) moved to SFP expPosHist["A0005"] = [(2, 1), ("SFP", "SFP"), ("SFP", "SFP"), ("SFP", "SFP")] # cycle 1 shuffle, (3, 3) moved to (2, 1) in cascade # cycle 3 shuffle, (2, 1) moved to (5, 4) expPosHist["A0018"] = [(3, 3), (2, 1), (2, 1), (5, 4)] # cycle 1 shuffle, (4, 2) moved to (3, 3) in cascade expPosHist["A0019"] = [(4, 2), (3, 3), (3, 3), (3, 3)] # cycle 1 shuffle, (5, 1) moved to (4, 2) in cascade expPosHist["A0020"] = [(5, 1), (4, 2), (4, 2), (4, 2)] # cycle 1 shuffle, (6, 7) moved to (5, 1) in cascade expPosHist["A0044"] = [(6, 7), (5, 1), (5, 1), (5, 1)] # cycle 1 shuffle, fresh to (6, 7) # cycle 3 shuffle, (6, 7) moved to (5, 2) in cascade expPosHist["A0077"] = [("NotCreatedYet", "NotCreatedYet"), (6, 7), (6, 7), (5, 2)] # cycle 2 shuffle, (2, 2) moved to (5, 3) expPosHist["A0009"] = [(2, 2), (2, 2), (5, 3), (5, 3)] # cycle 2 shuffle, (3, 2) moved to (2, 2) in cascade expPosHist["A0014"] = [(3, 2), (3, 2), (2, 2), (2, 2)] # cycle 2 shuffle, (4, 1) moved to (3, 2) in cascade expPosHist["A0015"] = [(4, 1), (4, 1), (3, 2), (3, 2)] # cycle 2 shuffle, (5, 4) moved to (4, 1) in cascade expPosHist["A0034"] = [(5, 4), (5, 4), (4, 1), (4, 1)] # cycle 2 shuffle, (6, 4) moved to (5, 4) in cascade then discharged to SFP expPosHist["A0040"] = [(6, 4), (6, 4), (5, 4), ("SFP", "SFP")] # cycle 2 shuffle, fresh to (6, 4) expPosHist["A0078"] = [("NotCreatedYet", "NotCreatedYet"), ("NotCreatedYet", "NotCreatedYet"), (6, 4), (6, 4)] # cycle 1 shuffle, (5, 3) moved to SFP expPosHist["A0029"] = [(5, 3), (5, 3), ("SFP", "SFP"), ("SFP", "SFP")] # cycle 3 shuffle, (3, 1) moved to (2, 1) in cascade expPosHist["A0010"] = [(3, 1), (3, 1), (3, 1), (2, 1)] # cycle 3 shuffle, (4, 3) moved to (3, 1) in cascade expPosHist["A0024"] = [(4, 3), (4, 3), (4, 3), (3, 1)] # cycle 3 shuffle, (5, 2) moved to (4, 3) in cascade expPosHist["A0025"] = [(5, 2), (5, 2), (5, 2), (4, 3)] # cycle 3 shuffle, fresh to (6, 7) expPosHist["A0079"] = [ ("NotCreatedYet", "NotCreatedYet"), ("NotCreatedYet", "NotCreatedYet"), ("NotCreatedYet", "NotCreatedYet"), (6, 7), ] for cycle in range(4): self.r.p.cycle = cycle fh.cycle = cycle fh.manageFuel(cycle) for a in self.r.excore["sfp"]: self.assertEqual(a.getLocation(), "SFP") for b in self.r.core.iterBlocks(Flags.FUEL): self.assertGreater(b.p.kgHM, 0.0, "b.p.kgHM not populated!") self.assertGreater(b.p.kgFis, 0.0, "b.p.kgFis not populated!") # check assemblies in core for a in self.r.core: self._checkAssemblyPositionHistory(a, expPosHist) # check assemblies in SFP for a in list(self.r.excore["sfp"]): self._checkAssemblyPositionHistory(a, expPosHist) # check getter methods based on assembly location history for aName, posList in expPosHist.items(): for i, rp in enumerate(posList): if rp[0] is not None and rp[0] not in assemblies.Assembly.NOT_IN_CORE: r, p = rp self.assertEqual(self.r.core.getAssemblyWithRingPosHist(r, p, i).getName(), aName) fh.interactEOL() def _checkAssemblyPositionHistory(self, a, answerKey): if a.getName() not in answerKey: # check that location history is the same position self.assertEqual(len(set(a.p.ringPosHist)), 1) else: self.assertListEqual(a.p.ringPosHist, answerKey[a.getName()]) def test_repeatShuffles(self): """Loads the ARMI test reactor with a custom shuffle logic file and shuffles assemblies twice. .. test:: Execute user-defined shuffle operations based on a reactor model. :id: T_ARMI_SHUFFLE :tests: R_ARMI_SHUFFLE Notes ----- The custom shuffle logic is executed by :py:meth:`armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.manageFuel` in :py:meth:`armi.physics.fuelCycle.tests.test_fuelHandlers.TestFuelHandler.runShuffling`. There are two primary assertions: spent fuel pool assemblies are in the correct location and the assemblies were shuffled into their correct locations. This process is repeated twice to ensure repeatability. """ # check labels before shuffling: for a in self.r.excore["sfp"]: self.assertEqual(a.getLocation(), "SFP") # do some shuffles fh = self.o.getInterface("fuelHandler") self.runShuffling(fh) # changes caseTitle # Make sure the generated shuffles file matches the tracked one. This will need to be updated if/when more # assemblies are added to the test reactor but must be done carefully. Do not blindly rebaseline this file. self.compareFilesLineByLine( os.path.join(TESTING_ROOT, "resources", "armiRun-SHUFFLES.txt"), "armiRun2-SHUFFLES.txt" ) # store locations of each assembly firstPassResults = {} for a in self.r.core: firstPassResults[a.getLocation()] = a.getName() self.assertNotIn(a.getLocation(), a.NOT_IN_CORE) # reset core to BOL state # reset assembly counter to get the same assem nums. self.setUp() newSettings = {CONF_PLOT_SHUFFLE_ARROWS: True} # now repeat shuffles newSettings["explicitRepeatShuffles"] = os.path.join(TESTING_ROOT, "resources", "armiRun-SHUFFLES.txt") self.o.cs = self.o.cs.modified(newSettings=newSettings) fh = self.o.getInterface("fuelHandler") self.runShuffling(fh) # make sure the shuffle was repeated perfectly for a in self.r.core: self.assertEqual(a.getName(), firstPassResults[a.getLocation()]) for a in self.r.excore["sfp"]: self.assertEqual(a.getLocation(), "SFP") # Do some cleanup, since the fuelHandler Interface has code that gets around the TempDirectoryChanger os.remove("armiRun2-SHUFFLES.txt") os.remove("armiRun2.shuffles_0.png") os.remove("armiRun2.shuffles_1.png") os.remove("armiRun2.shuffles_2.png") os.remove("armiRun2.shuffles_3.png") def test_readMoves(self): """ Depends on the ``shuffleLogic`` created by ``repeatShuffles``. See Also -------- runShuffling : creates the shuffling file to be read in. """ numblocks = len(self.r.core.getFirstAssembly()) fh = fuelHandlers.FuelHandler(self.o) moves = fh.readMoves(os.path.join(TESTING_ROOT, "resources", "armiRun-SHUFFLES.txt")) self.assertEqual(len(moves), 4) firstMove = moves[1][0] self.assertEqual(firstMove.fromLoc, "002-001") self.assertEqual(firstMove.toLoc, "SFP") self.assertEqual(len(firstMove.enrichList), numblocks) self.assertEqual(firstMove.assemType, "igniter fuel") self.assertIsNone(firstMove.ringPosCycle) # check the move to the SFP sfpMove = moves[2][-1] self.assertEqual(sfpMove.fromLoc, "005-003") self.assertEqual(sfpMove.toLoc, "SFP") self.assertIsNone(sfpMove.ringPosCycle) # make sure we fail hard if the file doesn't exist with self.assertRaises(RuntimeError): fh.readMoves("totall_fictional_file.txt") def test_readMovesYaml(self): fh = fuelHandlers.FuelHandler(self.o) moves, swaps = fh.readMovesYaml(os.path.join(TESTING_ROOT, "resources", "armiRun-SHUFFLES.yaml")) self.maxDiff = None expected = { 1: [ AssemblyMove("LoadQueue", "009-045", [0.0, 0.12, 0.14, 0.15, 0.0], "igniter fuel"), AssemblyMove("009-045", "008-004"), AssemblyMove("008-004", "007-001"), AssemblyMove("007-001", "006-005"), AssemblyMove("006-005", "Delete"), AssemblyMove("009-045", "009-045", rotation=60.0), AssemblyMove("LoadQueue", "004-004", [0.0, 0.12, 0.14, 0.15, 0.0], "middle fuel"), AssemblyMove("004-004", "005-005"), AssemblyMove("005-005", "006-006"), AssemblyMove("006-006", "Delete"), ], 2: [ AssemblyMove("LoadQueue", "009-045", [0.0, 0.12, 0.14, 0.15, 0.0], "igniter fuel"), AssemblyMove("009-045", "008-004"), AssemblyMove("008-004", "007-001"), AssemblyMove("007-001", "006-005"), AssemblyMove("006-005", "Delete"), AssemblyMove("LoadQueue", "004-004", [0.0, 0.12, 0.14, 0.15, 0.0], "middle fuel"), AssemblyMove("004-004", "005-005"), AssemblyMove("005-005", "006-006"), AssemblyMove("006-006", "Delete"), AssemblyMove("009-045", "009-045", rotation=60.0), AssemblyMove("SFP", "005-003", ringPosCycle=[6, 5, 0]), AssemblyMove("005-003", "SFP"), ], 3: [ AssemblyMove("LoadQueue", "009-045", [0.0, 0.12, 0.14, 0.15, 0.0], "igniter fuel"), AssemblyMove("009-045", "008-004"), AssemblyMove("008-004", "007-001"), AssemblyMove("007-001", "006-005"), AssemblyMove("006-005", "Delete"), AssemblyMove("SFP", "002-002", ringPosCycle=[5, 3, 1]), AssemblyMove("002-002", "SFP"), ], } self.assertEqual(moves, expected) self.assertEqual(swaps, {3: [("009-045", "008-004"), ("007-001", "006-005")]}) def test_performShuffleYamlIntegration(self): fh = fuelHandlers.FuelHandler(self.o) yaml_text = """ sequence: 1: - swap: ["009-045", "008-004"] - cascade: ["igniter fuel", "009-045", "008-004", "007-001", "006-005"] fuelEnrichment: [0, 0.12, 0.14, 0.15, 0] - extraRotations: {"009-045": 60} """ with tempfile.NamedTemporaryFile("w", suffix=".yaml", delete=False) as tf: tf.write(yaml_text) fname = tf.name try: locs = ["009-045", "008-004", "007-001", "006-005"] before = {loc: self.r.core.getAssemblyWithStringLocation(loc).getName() for loc in locs} self.r.p.cycle = 1 self.o.cs = self.o.cs.modified(newSettings={CONF_SHUFFLE_SEQUENCE_FILE: fname, CONF_TRACK_ASSEMS: False}) self.r.core._trackAssems = False fh.outage() fresh = self.r.core.getAssemblyWithStringLocation("008-004") self.assertEqual(fresh.getType(), "igniter fuel") self.assertNotIn(fresh.getName(), before.values()) rotated = self.r.core.getAssemblyWithStringLocation("009-045") self.assertEqual(rotated.getName(), before["009-045"]) self.assertAlmostEqual(rotated.p.orientation[2], 60.0) self.assertEqual( self.r.core.getAssemblyWithStringLocation("007-001").getName(), before["008-004"], ) self.assertEqual( self.r.core.getAssemblyWithStringLocation("006-005").getName(), before["007-001"], ) self.assertIsNone(self.r.excore["sfp"].getAssembly(before["006-005"])) finally: os.remove(fname) def test_yamlSfpOverridesTrackAssems(self): fh = fuelHandlers.FuelHandler(self.o) yaml_text = """ sequence: 1: - cascade: ["igniter fuel", "009-045", "SFP"] fuelEnrichment: [0, 0.12, 0.14, 0.15, 0] """ with tempfile.NamedTemporaryFile("w", suffix=".yaml", delete=False) as tf: tf.write(yaml_text) fname = tf.name try: before = self.r.core.getAssemblyWithStringLocation("009-045").getName() self.r.p.cycle = 1 self.o.cs = self.o.cs.modified(newSettings={CONF_SHUFFLE_SEQUENCE_FILE: fname, CONF_TRACK_ASSEMS: False}) self.r.core._trackAssems = False fh.outage() self.assertFalse(self.r.core._trackAssems) self.assertIsNotNone(self.r.excore["sfp"].getAssembly(before)) finally: os.remove(fname) def test_readMovesYaml_loadFromSfp(self): assem = self.r.excore["sfp"].getChildren()[0] # fake the assembly location history assem.p.ringPosHist = [(2, 3), (4, 5), (5, 7)] yaml_text = """ sequence: 1: - cascade: ["SFP", "005-003", "SFP"] ringPosCycle: [5, 7, 2] """ with directoryChangers.TemporaryDirectoryChanger(): fname = "moves.yaml" with open(fname, "w", encoding="utf-8") as stream: stream.write(yaml_text) moves, _ = fuelHandlers.FuelHandler.readMovesYaml(fname) expected = { 1: [ AssemblyMove("SFP", "005-003", [], None, [5, 7, 2]), AssemblyMove("005-003", "SFP"), ] } self.assertEqual(moves, expected) def test_performShuffleYaml_loadFromSfp(self): fh = fuelHandlers.FuelHandler(self.o) sfpAssem = self.r.excore["sfp"].getChildren()[0] # fake the assembly location history ringPosHistInts = [(2, 3), (4, 5), (5, 7)] sfpAssem.p.ringPosHist = [(str(x).encode(), str(y).encode()) for x, y in ringPosHistInts] yaml_text = """ sequence: 1: - cascade: ["SFP", "009-045", "SFP"] ringPosCycle: [5, 7, 2] """ with directoryChangers.TemporaryDirectoryChanger(): fname = "moves.yaml" with open(fname, "w", encoding="utf-8") as stream: stream.write(yaml_text) before = self.r.core.getAssemblyWithStringLocation("009-045").getName() self.r.p.cycle = 1 self.o.cs = self.o.cs.modified(newSettings={CONF_SHUFFLE_SEQUENCE_FILE: fname}) fh.outage() assem = self.r.core.getAssemblyWithStringLocation("009-045") self.assertEqual(assem.getName(), sfpAssem.getName()) cycle0Loc = ("2".encode(), "3".encode()) self.assertEqual(assem.p.ringPosHist[0], cycle0Loc) self.assertEqual(assem.p.ringPosHist[1], (9, 45)) self.assertEqual(len(assem.p.ringPosHist), 2) # truncated by logic in fuelHandlers newSfpAssem = self.r.excore["sfp"].getAssembly(before) self.assertIsNotNone(newSfpAssem) self.assertEqual(newSfpAssem.p.ringPosHist[0], (9, 45)) def test_performShuffleYaml_loadFromSfp2(self): fh = fuelHandlers.FuelHandler(self.o) fname = os.path.join(TESTING_ROOT, "resources", "armiRun-SHUFFLES.yaml") self.o.cs = self.o.cs.modified(newSettings={CONF_SHUFFLE_SEQUENCE_FILE: fname}) # fake the assembly location history with directoryChangers.TemporaryDirectoryChanger(): # _moves, _ = fh.readMovesYaml() before1 = self.r.core.getAssemblyWithStringLocation("005-003") before2 = self.r.core.getAssemblyWithStringLocation("006-005") for cycle in range(4): self.r.p.cycle = cycle fh.outage() # check that the following ringPosHist exist in the SFP inSfp = [ [6, 6, 0], [6, 6, 1], [6, 5, 1], [6, 5, 2], [2, 2, 2], ] for a in self.r.excore["sfp"].getChildren(): print(a, a.p.ringPosHist) for ring, pos, cycle in inSfp: found = False for a in self.r.excore["sfp"].getChildren(): if a.p.ringPosHist[cycle] == (ring, pos): found = True break self.assertTrue(found, f"ringPosHist == ({ring}, {pos}, {cycle}) not found in SFP!") # check that SFP is in the ringPosHist of (2, 2) and (5, 3) # check that the assembly that ended up in 002-002 is the same that started in 005-003 # check that the assembly that ended up in 005-003 is the same that started in 006-005 for loc, refA in [ ("002-002", before1), ("005-003", before2), ]: a = self.r.core.getAssemblyWithStringLocation(loc) self.assertIn(("SFP", "SFP"), a.p.ringPosHist) self.assertEqual( refA.getName(), a.getName(), "Expected {a} to be the same assembly as {refA} based on shuffling!" ) def test_processMoveList(self): fh = fuelHandlers.FuelHandler(self.o) moves = fh.readMoves(os.path.join(TESTING_ROOT, "resources", "armiRun-SHUFFLES.txt")) result = fh.processMoveList(moves[2]) self.assertIn(None, result.ringPosCycles) self.assertTrue(all("SFP" not in chain for chain in result.loadChains)) self.assertTrue(all("LoadQueue" not in chain for chain in result.loadChains)) self.assertFalse(result.loopChains) self.assertFalse(result.rotations) def test_processMoveList_yaml(self): fh = fuelHandlers.FuelHandler(self.o) moves, _ = fh.readMovesYaml(os.path.join(TESTING_ROOT, "resources", "armiRun-SHUFFLES.yaml")) result = fh.processMoveList(moves[1]) self.assertEqual(len(result.loadChains), 2) self.assertTrue(any(result.enriches)) self.assertTrue(result.rotations) def test_getFactorList(self): fh = fuelHandlers.FuelHandler(self.o) factors, _ = fh.getFactorList(0) self.assertIn("eqShuffles", factors) def test_linPowByPin(self): _fh = fuelHandlers.FuelHandler(self.o) _hist = self.o.getInterface("history") newSettings = {CONF_ASSEM_ROTATION_STATIONARY: True} self.o.cs = self.o.cs.modified(newSettings=newSettings) assem = self.o.r.core.getFirstAssembly(Flags.FUEL) b = next(assem.iterBlocks(Flags.FUEL)) b.p.linPowByPin = [1, 2, 3] self.assertEqual(type(b.p.linPowByPin), np.ndarray) b.p.linPowByPin = np.array([1, 2, 3]) self.assertEqual(type(b.p.linPowByPin), np.ndarray) def test_linPowByPinNeutron(self): _fh = fuelHandlers.FuelHandler(self.o) _hist = self.o.getInterface("history") newSettings = {CONF_ASSEM_ROTATION_STATIONARY: True} self.o.cs = self.o.cs.modified(newSettings=newSettings) assem = self.o.r.core.getFirstAssembly(Flags.FUEL) b = next(assem.iterBlocks(Flags.FUEL)) b.p.linPowByPinNeutron = [1, 2, 3] self.assertEqual(type(b.p.linPowByPinNeutron), np.ndarray) b.p.linPowByPinNeutron = np.array([1, 2, 3]) self.assertEqual(type(b.p.linPowByPinNeutron), np.ndarray) def test_linPowByPinGamma(self): _fh = fuelHandlers.FuelHandler(self.o) _hist = self.o.getInterface("history") newSettings = {CONF_ASSEM_ROTATION_STATIONARY: True} self.o.cs = self.o.cs.modified(newSettings=newSettings) assem = self.o.r.core.getFirstAssembly(Flags.FUEL) b = next(assem.iterBlocks(Flags.FUEL)) b.p.linPowByPinGamma = [1, 2, 3] self.assertEqual(type(b.p.linPowByPinGamma), np.ndarray) b.p.linPowByPinGamma = np.array([1, 2, 3]) self.assertEqual(type(b.p.linPowByPinGamma), np.ndarray) def test_transferStationaryBlocks(self): """Test the _transferStationaryBlocks method. .. test:: User-specified blocks can remain in place during shuffling :id: T_ARMI_SHUFFLE_STATIONARY0 :tests: R_ARMI_SHUFFLE_STATIONARY """ # grab stationary block flags sBFList = self.r.core.stationaryBlockFlagsList # grab the assemblies assems = self.r.core.getAssemblies(Flags.FUEL) # grab two arbitrary assemblies a1 = assems[1] a2 = assems[2] # grab the stationary blocks pre swap a1PreSwapStationaryBlocks = [ [block.getName(), block.spatialLocator.k] for block in a1 if any(block.hasFlags(sbf) for sbf in sBFList) ] a2PreSwapStationaryBlocks = [ [block.getName(), block.spatialLocator.k] for block in a2 if any(block.hasFlags(sbf) for sbf in sBFList) ] # swap the stationary blocks fh = fuelHandlers.FuelHandler(self.o) fh._transferStationaryBlocks(a1, a2) # grab the stationary blocks post swap a1PostSwapStationaryBlocks = [ [block.getName(), block.spatialLocator.k] for block in a1 if any(block.hasFlags(sbf) for sbf in sBFList) ] a2PostSwapStationaryBlocks = [ [block.getName(), block.spatialLocator.k] for block in a2 if any(block.hasFlags(sbf) for sbf in sBFList) ] # validate the stationary blocks have swapped locations and are aligned self.assertEqual(a1PostSwapStationaryBlocks, a2PreSwapStationaryBlocks) self.assertEqual(a2PostSwapStationaryBlocks, a1PreSwapStationaryBlocks) def test_transStatBlocksBadNumbers(self): """ Test the _transferStationaryBlocks method for the case where the input assemblies have different numbers of stationary blocks. """ # grab stationary block flags sBFList = self.r.core.stationaryBlockFlagsList # grab the assemblies assems = self.r.core.getAssemblies(Flags.FUEL) # grab two arbitrary assemblies a1 = assems[1] a2 = assems[2] # change a block in assembly 1 to be flagged as a stationary block for block in a1: if not any(block.hasFlags(sbf) for sbf in sBFList): a1[block.spatialLocator.k].setType(a1[block.spatialLocator.k].p.type, sBFList[0]) self.assertTrue(any(block.hasFlags(sbf) for sbf in sBFList)) break # try to swap stationary blocks between assembly 1 and 2 fh = fuelHandlers.FuelHandler(self.o) with self.assertRaises(ValueError): fh._transferStationaryBlocks(a1, a2) def test_transStatBlockUnaligned(self): """ Test the _transferStationaryBlocks method for the case where the input assemblies have unaligned locations of stationary blocks. """ # grab stationary block flags sBFList = self.r.core.stationaryBlockFlagsList # grab the assemblies assems = self.r.core.getAssemblies(Flags.FUEL) # grab two arbitrary assemblies a1 = assems[1] a2 = assems[2] # move location of a stationary flag in assembly 1 for block in a1: if any(block.hasFlags(sbf) for sbf in sBFList): # change flag of first identified stationary block to fuel a1[block.spatialLocator.k].setType(a1[block.spatialLocator.k].p.type, Flags.FUEL) self.assertTrue(a1[block.spatialLocator.k].hasFlags(Flags.FUEL)) # change next or previous block flag to stationary flag try: a1[block.spatialLocator.k + 1].setType(a1[block.spatialLocator.k + 1].p.type, sBFList[0]) self.assertTrue(any(a1[block.spatialLocator.k + 1].hasFlags(sbf) for sbf in sBFList)) except Exception: a1[block.spatialLocator.k - 1].setType(a1[block.spatialLocator.k - 1].p.type, sBFList[0]) self.assertTrue(any(a1[block.spatialLocator.k - 1].hasFlags(sbf) for sbf in sBFList)) break # try to swap stationary blocks between assembly 1 and 2 fh = fuelHandlers.FuelHandler(self.o) with self.assertRaises(ValueError): fh._transferStationaryBlocks(a1, a2) def test_transStatBlockBadHeights(self): """ Test the _transferStationaryBlocks method for the case where the total height of the stationary blocks is unequal between input assemblies. """ # grab stationary block flags sBFList = self.r.core.stationaryBlockFlagsList # grab the assemblies assems = self.r.core.getAssemblies(Flags.FUEL) # grab two arbitrary assemblies a1 = assems[1] a2 = assems[2] # change height of a stationary block in assembly 1 for block in a1: if any(block.hasFlags(sbf) for sbf in sBFList): # change height of first identified stationary block nomHeight = block.getHeight() a1[block.spatialLocator.k].setHeight(nomHeight - 1e-5) # try to swap stationary blocks between assembly 1 and 2 fh = fuelHandlers.FuelHandler(self.o) with mockRunLogs.BufferLog() as mock: fh._transferStationaryBlocks(a1, a2) self.assertIn("top elevation of stationary", mock.getStdout()) def test_dischargeSwap(self): """Remove an assembly from the core and replace it with one from the SFP. .. test:: User-specified blocks can remain in place during shuffling :id: T_ARMI_SHUFFLE_STATIONARY1 :tests: R_ARMI_SHUFFLE_STATIONARY """ # grab stationary block flags sBFList = self.r.core.stationaryBlockFlagsList # grab an arbitrary fuel assembly from the core and from the SFP a1 = self.r.core.getFirstAssembly(Flags.FUEL) a2 = self.r.excore["sfp"].getChildrenWithFlags(Flags.FUEL)[0] # grab the stationary blocks pre swap a1PreSwapStationaryBlocks = [ [block.getName(), block.spatialLocator.k] for block in a1 if any(block.hasFlags(sbf) for sbf in sBFList) ] a2PreSwapStationaryBlocks = [ [block.getName(), block.spatialLocator.k] for block in a2 if any(block.hasFlags(sbf) for sbf in sBFList) ] # test discharging assembly 1 and replacing with assembly 2 fh = fuelHandlers.FuelHandler(self.o) fh.dischargeSwap(a2, a1) self.assertTrue(a1.getLocation() in a1.NOT_IN_CORE) self.assertTrue(a2.getLocation() not in a2.NOT_IN_CORE) # grab the stationary blocks post swap a1PostSwapStationaryBlocks = [ [block.getName(), block.spatialLocator.k] for block in a1 if any(block.hasFlags(sbf) for sbf in sBFList) ] a2PostSwapStationaryBlocks = [ [block.getName(), block.spatialLocator.k] for block in a2 if any(block.hasFlags(sbf) for sbf in sBFList) ] # validate the stationary blocks have swapped locations correctly and are aligned self.assertEqual(a1PostSwapStationaryBlocks, a2PreSwapStationaryBlocks) self.assertEqual(a2PostSwapStationaryBlocks, a1PreSwapStationaryBlocks) def test_dischargeSwapStationaryBlocks(self): """ Test the _transferStationaryBlocks method for the case where the input assemblies have different numbers as well as unaligned locations of stationary blocks. """ # grab stationary block flags sBFList = self.r.core.stationaryBlockFlagsList # grab an arbitrary fuel assembly from the core and from the SFP a1 = self.r.core.getFirstAssembly(Flags.FUEL) a2 = self.r.excore["sfp"].getChildren(Flags.FUEL)[0] # change a block in assembly 1 to be flagged as a stationary block for block in a1: if not any(block.hasFlags(sbf) for sbf in sBFList): a1[block.spatialLocator.k].setType(a1[block.spatialLocator.k].p.type, sBFList[0]) self.assertTrue(any(block.hasFlags(sbf) for sbf in sBFList)) break # try to discharge assembly 1 and replace with assembly 2 fh = fuelHandlers.FuelHandler(self.o) with self.assertRaises(ValueError): fh.dischargeSwap(a2, a1) # re-initialize assemblies self.setUp() a1 = self.r.core.getFirstAssembly(Flags.FUEL) a2 = self.r.excore["sfp"].getChildren(Flags.FUEL)[0] # move location of a stationary flag in assembly 1 for block in a1: if any(block.hasFlags(sbf) for sbf in sBFList): # change flag of first identified stationary block to fuel a1[block.spatialLocator.k].setType(a1[block.spatialLocator.k].p.type, Flags.FUEL) self.assertTrue(a1[block.spatialLocator.k].hasFlags(Flags.FUEL)) # change next or previous block flag to stationary flag try: a1[block.spatialLocator.k + 1].setType(a1[block.spatialLocator.k + 1].p.type, sBFList[0]) self.assertTrue(any(a1[block.spatialLocator.k + 1].hasFlags(sbf) for sbf in sBFList)) except Exception: a1[block.spatialLocator.k - 1].setType(a1[block.spatialLocator.k - 1].p.type, sBFList[0]) self.assertTrue(any(a1[block.spatialLocator.k - 1].hasFlags(sbf) for sbf in sBFList)) break # try to discharge assembly 1 and replace with assembly 2 with self.assertRaises(ValueError): fh.dischargeSwap(a2, a1) def test_getAssembliesInRings(self): fh = fuelHandlers.FuelHandler(self.o) aList0 = fh._getAssembliesInRings([0], Flags.FUEL, False, None, False) self.assertEqual(len(aList0), 1) aList1 = fh._getAssembliesInRings([0, 1, 2], Flags.FUEL, False, None, False) self.assertEqual(len(aList1), 3) aList2 = fh._getAssembliesInRings([0, 1, 2], Flags.FUEL, True, None, False) self.assertEqual(len(aList2), 3) aList3 = fh._getAssembliesInRings([0, 1, 2, "SFP"], Flags.FUEL, True, None, False) self.assertEqual(len(aList3), 4) aList4 = fh._getAssembliesInRings([0, 1, 2], Flags.FUEL, False, None, True) self.assertEqual(len(aList4), 3) class TestFuelPlugin(unittest.TestCase): """Tests that make sure the plugin is being discovered well.""" def test_settingsAreDiscovered(self): cs = caseSettings.Settings() nm = settings.CONF_JUMP_RING_NUM self.assertEqual(cs[nm], 8) ================================================ FILE: armi/physics/fuelCycle/tests/test_hexAssemblyFuelMgmtUtils.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests some fuel handling tools, specific to hex-assembly reactors.""" from armi.physics.fuelCycle import hexAssemblyFuelMgmtUtils as hexUtils from armi.tests import ArmiTestHelper from armi.utils import directoryChangers class TestHexAssemMgmtTools(ArmiTestHelper): def setUp(self): self.td = directoryChangers.TemporaryDirectoryChanger() self.td.__enter__() def tearDown(self): self.td.__exit__(None, None, None) def test_buildConvergentRingSchedule(self): schedule, widths = hexUtils.buildConvergentRingSchedule(1, 17, 0) self.assertEqual(schedule, [1, 17]) self.assertEqual(widths, [16, 1]) schedule, widths = hexUtils.buildConvergentRingSchedule(3, 17, 1) self.assertEqual(schedule, [3, 17]) self.assertEqual(widths, [14, 1]) schedule, widths = hexUtils.buildConvergentRingSchedule(12, 16, 0.5) self.assertEqual(schedule, [12, 16]) self.assertEqual(widths, [4, 1]) def test_buildRingSchedule(self): # simple divergent schedule, widths = hexUtils.buildRingSchedule(9, 1, 9) self.assertEqual(schedule, [9, 8, 7, 6, 5, 4, 3, 2, 1]) zeroWidths = [0, 0, 0, 0, 0, 0, 0, 0, 0] self.assertEqual(widths, zeroWidths) # simple with no jumps schedule, widths = hexUtils.buildRingSchedule(9, 9, 1, jumpRingTo=1) self.assertEqual(schedule, [1, 2, 3, 4, 5, 6, 7, 8, 9]) self.assertEqual(widths, zeroWidths) # simple with 1 jump schedule, widths = hexUtils.buildRingSchedule(9, 9, 1, jumpRingFrom=6) self.assertEqual(schedule, [5, 4, 3, 2, 1, 6, 7, 8, 9]) self.assertEqual(widths, zeroWidths) # 1 jump plus auto-correction to core size schedule, widths = hexUtils.buildRingSchedule(9, 1, 17, jumpRingFrom=5) self.assertEqual(schedule, [6, 7, 8, 9, 5, 4, 3, 2, 1]) self.assertEqual(widths, zeroWidths) # crash on invalid jumpring with self.assertRaises(ValueError): schedule, widths = hexUtils.buildRingSchedule(9, 1, 17, jumpRingFrom=0) # test 4: Mid way jumping schedule, widths = hexUtils.buildRingSchedule(9, 1, 9, jumpRingTo=6, jumpRingFrom=3) self.assertEqual(schedule, [9, 8, 7, 4, 5, 6, 3, 2, 1]) self.assertEqual(widths, zeroWidths) ================================================ FILE: armi/physics/fuelCycle/tests/test_utils.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from unittest import TestCase import numpy as np from armi.physics.fuelCycle import utils from armi.reactor.blocks import Block from armi.reactor.components import Circle from armi.reactor.flags import Flags from armi.reactor.grids import IndexLocation, MultiIndexLocation class FuelCycleUtilsTests(TestCase): """Tests for geometry indifferent fuel cycle routines.""" N_PINS = 169 def setUp(self): self.block = Block("test block") self.fuel = Circle( "test pin", material="UO2", Tinput=20, Thot=20, mult=self.N_PINS, id=0.0, od=1.0, ) clad = Circle( "clad", material="HT9", Tinput=20, Thot=300, id=1.0, od=1.1, ) self.block.add(self.fuel) self.block.add(clad) # Force no fuel flags self.fuel.p.flags = Flags.PIN def test_maxBurnupLocationFromComponents(self): """Test that the ``Component.p.pinPercentBu`` parameter can reveal max burnup location.""" self.fuel.spatialLocator = MultiIndexLocation(None) locations = [] for i in range(self.N_PINS): loc = IndexLocation(i, 0, 0, None) self.fuel.spatialLocator.append(loc) locations.append(loc) self.fuel.p.pinPercentBu = np.ones(self.N_PINS, dtype=float) # Pick an arbitrary index for the pin with the most burnup maxBuIndex = self.N_PINS // 3 self.fuel.p.pinPercentBu[maxBuIndex] *= 2 expectedLoc = locations[maxBuIndex] actual = utils.maxBurnupLocator(self.block) self.assertEqual(actual, expectedLoc) def test_singleLocatorWithBurnup(self): """Test that a single component with burnup can be used to find the highest burnup.""" freeComp = Circle("free fuel", material="UO2", Tinput=200, Thot=200, id=0, od=1, mult=1) freeComp.spatialLocator = IndexLocation(2, 4, 0, None) freeComp.p.pinPercentBu = [ 0.01, ] loc = utils.maxBurnupLocator([freeComp]) self.assertIs(loc, freeComp.spatialLocator) def test_maxBurnupLocatorWithNoBurnup(self): """Ensure we catch an error if no burnup is found across components.""" with self.assertRaisesRegex(ValueError, "No burnups found"): utils.maxBurnupLocator([]) def test_maxBurnupLocatorMismatchedData(self): """Ensure pin burnup and locations must agree.""" freeComp = Circle("free fuel", material="UO2", Tinput=200, Thot=200, id=0, od=1, mult=1) freeComp.spatialLocator = IndexLocation(2, 4, 0, None) freeComp.p.pinPercentBu = [ 0.01, 0.02, ] with self.assertRaisesRegex(ValueError, "Pin burnup.*pin locations.*differ"): utils.maxBurnupLocator([freeComp]) def test_assemblyHasPinPower(self): """Test the ability to check if an assembly has fuel pin powers.""" fakeAssem = [self.block] # No fuel blocks, no pin power on blocks => no pin powers self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem)) # Yes fuel blocks, no pin power on blocks => no pin powers self.block.p.flags |= Flags.FUEL self.assertFalse(utils.assemblyHasFuelPinPowers(fakeAssem)) # Yes fuel blocks, yes pin power on blocks => yes pin powers self.block.p.linPowByPin = np.arange(self.N_PINS, dtype=float) self.assertTrue(utils.assemblyHasFuelPinPowers(fakeAssem)) # Yes fuel blocks, yes pin power assigned but all zeros => no pin powers self.block.p.linPowByPin = np.zeros(self.N_PINS, dtype=float) self.assertFalse(utils.assemblyHasFuelPinPowers(fakeAssem)) def test_assemblyHasPinBurnups(self): """Test the ability to check if an assembly has fuel pin burnup.""" fakeAssem = [self.block] # No fuel components => no assembly burnups self.assertFalse(self.block.getChildrenWithFlags(Flags.FUEL)) self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem)) # No fuel with burnup => no assembly burnups self.block.p.flags |= Flags.FUEL self.fuel.p.flags |= Flags.FUEL self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem)) # Fuel pin has burnup => yes assembly burnup self.fuel.p.pinPercentBu = np.arange(self.N_PINS, dtype=float) self.assertTrue(utils.assemblyHasFuelPinBurnup(fakeAssem)) # Fuel pin has empty burnup => no assembly burnup self.fuel.p.pinPercentBu = np.zeros(self.N_PINS) self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem)) # Yes burnup but no fuel flags => no assembly burnup self.fuel.p.flags ^= Flags.FUEL self.assertFalse(self.fuel.hasFlags(Flags.FUEL)) self.fuel.p.pinPercentBu = np.arange(self.N_PINS, dtype=float) self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem)) def test_maxBurnupBlock(self): """Test the ability to find maximum burnup block in an assembly.""" reflector = Block("reflector") assem = [reflector, self.block] self.block.p.percentBuPeak = 40 expected = utils.maxBurnupBlock(assem) self.assertIs(expected, self.block) # add a new block with more burnup higher up the stack hotter = copy.deepcopy(self.block) hotter.p.percentBuPeak *= 2 expected = utils.maxBurnupBlock([reflector, self.block, hotter, self.block, reflector]) self.assertIs(expected, hotter) def test_maxBurnupBlockNoBlocks(self): """Ensure a more helpful error is provided for empty sequence.""" with self.assertRaisesRegex(ValueError, "Error finding max burnup"): utils.maxBurnupBlock([]) def test_maxBurnupBlockNoBurnup(self): """Ensure that we will not return a block with zero burnup.""" self.block.p.percentBuPeak = 0.0 with self.assertRaisesRegex(ValueError, "Error finding max burnup"): utils.maxBurnupBlock([self.block]) ================================================ FILE: armi/physics/fuelCycle/utils.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Geometric agnostic routines that are useful for fuel cycle analysis on pin-type reactors.""" import operator import typing import numpy as np from armi import runLog from armi.reactor.flags import Flags from armi.reactor.grids import IndexLocation, MultiIndexLocation if typing.TYPE_CHECKING: from armi.reactor.blocks import Block from armi.reactor.components import Component def assemblyHasFuelPinPowers(a: typing.Iterable["Block"]) -> bool: """Determine if an assembly has pin powers. These are necessary for determining rotation and may or may not be present on all assemblies. Parameters ---------- a : Assembly Assembly in question Returns ------- bool If at least one fuel block in the assembly has pin powers. """ # Avoid using Assembly.getChildrenWithFlags(Flags.FUEL) # because that creates an entire list where we may just need the first # fuel block fuelBlocks = filter(lambda b: b.hasFlags(Flags.FUEL), a) return any(b.hasFlags(Flags.FUEL) and np.any(b.p.linPowByPin) for b in fuelBlocks) def assemblyHasFuelPinBurnup(a: typing.Iterable["Block"]) -> bool: """Determine if an assembly has pin burnups. These are necessary for determining rotation and may or may not be present on all assemblies. Parameters ---------- a : Assembly Assembly in question Returns ------- bool If a block with pin burnup was found. Notes ----- Checks if any `Component.p.pinPercentBu`` is set and contains non-zero data on a fuel component in the block. """ # Avoid using Assembly.getChildrenWithFlags(Flags.FUEL) # because that creates an entire list where we may just need the first # fuel block. Same for avoiding Block.getChildrenWithFlags. hasFuelFlags = lambda o: o.hasFlags(Flags.FUEL) for b in filter(hasFuelFlags, a): for c in filter(hasFuelFlags, b): if np.any(c.p.pinPercentBu): return True return False def maxBurnupLocator( children: typing.Iterable["Component"], ) -> IndexLocation: """Find the location of the pin with highest burnup by looking at components. Parameters ---------- children : iterable[Component] Iterator over children with a spatial locator and ``pinPercentBu`` parameter Returns ------- IndexLocation Location of the pin with the highest burnup. Raises ------ ValueError If no children have burnup, or the burnup and locators differ. """ maxBu = 0 maxLocation = None withBurnupAndLocs = filter( lambda c: c.spatialLocator is not None and c.p.pinPercentBu is not None, children, ) for child in withBurnupAndLocs: pinBu = child.p.pinPercentBu if isinstance(child.spatialLocator, MultiIndexLocation): locations = child.spatialLocator else: locations = [child.spatialLocator] if len(locations) != pinBu.size: raise ValueError( f"Pin burnup (n={len(locations)}) and pin locations (n={pinBu.size}) " f"on {child} differ: {locations=} :: {pinBu=}" ) myMaxIX = pinBu.argmax() myMaxBu = pinBu[myMaxIX] if myMaxBu > maxBu: maxBu = myMaxBu maxLocation = locations[myMaxIX] if maxLocation is not None: return maxLocation raise ValueError("No burnups found!") def maxBurnupBlock(a: typing.Iterable["Block"]) -> "Block": """Find the block that contains the pin with the highest burnup.""" buGetter = operator.attrgetter("p.percentBuPeak") # Discard any blocks with zero burnup blocksWithBurnup = filter(buGetter, a) try: return max(blocksWithBurnup, key=buGetter) except Exception as ee: msg = f"Error finding max burnup block from {a}" runLog.error(msg) raise ValueError(msg) from ee ================================================ FILE: armi/physics/fuelPerformance/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Generic fuel performance plugin package. Fuel performance deals with addressing fuel system limits and predicting behaviors that are coupled to other physics within the reactor. Often fuel performance models address chemical, thermal and mechanical behaviors of the fuel system. The following general phenomena fall into the fuel performance category of physics for solid fuel (e.g., SFR, LWR, TRISO): * chemical degradation on the inside of fuel cladding such as fuel-clad chemical interaction (FCCI) * corrosion or erosion processes on the outside of the fuel cladding * the fuel-clad mechanical interaction (FCMI) resulting in cladding stress and strain * pressurization of the fuel pin due to released fission gases * high temperatures of the fuel which affect material properties and feedback during accident scenarios Fuel performance is typically coupled with thermal analysis because the thermal conditions of the fuel affects the performance and properties of the fuel change with temperature and burnup. In many cases, fuel performance is coupled with neutronic analysis as well, because the fission gases are strong neutron absorbers. In some reactors, significant composition changes during irradiation can influence neutronics as well (e.g. sodium thermal bond being squeezed out of pins). Finally, fuel temperatures impact the Doppler reactivity coefficient. """ from armi.physics.fuelPerformance.plugin import FuelPerformancePlugin # noqa: F401 ================================================ FILE: armi/physics/fuelPerformance/executers.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Baseline fuel performance related executers and options. These can be subclassed in fuel performance plugins to perform fuel performance physics calculations. Fuel performance is described in the :py:mod:`Fuel Performance subpackage <armi.physics.fuelPerformance>` """ from armi.physics import executers from armi.physics.fuelPerformance.settings import ( CONF_AXIAL_EXPANSION, CONF_BOND_REMOVAL, CONF_CLADDING_STRAIN, CONF_CLADDING_WASTAGE, CONF_FGR_REMOVAL, CONF_FGYF, CONF_FUEL_PERFORMANCE_ENGINE, ) class FuelPerformanceOptions(executers.ExecutionOptions): """Options relevant to all fuel performance engines.""" def __init__(self, label=None): executers.ExecutionOptions.__init__(self, label) self.fuelPerformanceEngine = None self.axialExpansion = None self.bondRemoval = None self.fissionGasRemoval = None self.claddingWastage = None self.claddingStrain = None def fromUserSettings(self, cs): """Copy relevant settings values from cs into this object.""" self.fuelPerformanceEngine = cs[CONF_FUEL_PERFORMANCE_ENGINE] self.axialExpansion = cs[CONF_AXIAL_EXPANSION] self.bondRemoval = cs[CONF_BOND_REMOVAL] self.fissionGasRemoval = cs[CONF_FGR_REMOVAL] self.claddingWastage = cs[CONF_CLADDING_WASTAGE] self.claddingStrain = cs[CONF_CLADDING_STRAIN] self.fissionGasYieldFraction = cs[CONF_FGYF] def fromReactor(self, reactor): """Load options from reactor.""" class FuelPerformanceExecuter(executers.DefaultExecuter): """ Prep, execute, and process a fuel performance solve. This uses the ``DefaultExecuter`` with the hope that most subclasses can use that run loop. As more fuel performance plugins are built we can reconsider this hierarchy. """ def __init__(self, options, reactor): executers.DefaultExecuter.__init__(self, options, reactor) ================================================ FILE: armi/physics/fuelPerformance/parameters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Parameter definitions for fuel performance plugins.""" from armi.reactor import parameters from armi.reactor.blocks import Block from armi.reactor.parameters import ParamLocation from armi.utils import units def getFuelPerformanceParameterDefinitions(): """Return ParameterDefinitionCollections for each appropriate ArmiObject.""" return {Block: _getFuelPerformanceBlockParams()} def _getFuelPerformanceBlockParams(): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(default=0.0, location=ParamLocation.AVERAGE) as pb: pb.defParam( "fuelCladLocked", units=units.UNITLESS, default=False, description="Boolean to indicate if the fuel is locked with the clad." " This is used to determine the expansion constraints for the fuel during" " thermal and/or burn-up expansion of the fuel and cladding materials.", ) def gasReleaseFraction(self, value): if value < 0.0 or value > 1.0: raise ValueError(f"Cannot set a gas release fraction of {value} outside of the bounds of [0.0, 1.0]") self._p_gasReleaseFraction = value pb.defParam( "gasReleaseFraction", setter=gasReleaseFraction, units=units.UNITLESS, description="Fraction of generated fission gas that no longer exists in the block.", categories=["eq cumulative shift"], ) def bondRemoved(self, value): if value < 0.0 or value > 1.0: raise ValueError(f"Cannot set a bond removed of {value} outside of the bounds of [0.0, 1.0]") self._p_bondRemoved = value pb.defParam( "bondRemoved", setter=bondRemoved, units=units.UNITLESS, description="Fraction of thermal bond between fuel and clad that has been pushed out.", categories=["eq cumulative shift"], ) pb.defParam( "cladWastage", units=units.MICRONS, description="Total cladding wastage from inner and outer surfaces.", location=ParamLocation.AVERAGE, categories=["eq cumulative shift"], ) pb.defParam( "totalCladStrain", units=units.PERCENT, description="Total diametral clad strain.", categories=["eq cumulative shift"], ) pb.defParam( "axialGrowthPct", units=units.PERCENT, description="Axial growth percentage", categories=["eq cumulative shift"], ) pb.defParam( "fpPeakFuelTemp", units=units.DEGC, description="Fuel performance calculated peak fuel temperature.", location=ParamLocation.AVERAGE, ) pb.defParam( "fpAveFuelTemp", units=units.DEGC, description="Fuel performance calculated average fuel temperature.", location=ParamLocation.AVERAGE, ) pb.defParam( "gasPorosity", units=units.UNITLESS, description="Fraction of fuel volume that is occupied by gas pores", default=0.0, categories=["eq cumulative shift"], ) pb.defParam( "liquidPorosity", units=units.UNITLESS, description="Fraction of fuel volume that is occupied by liquid filled pores", default=0.0, ) return pDefs ================================================ FILE: armi/physics/fuelPerformance/plugin.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generic Fuel Performance Plugin.""" from armi import interfaces, plugins from armi.physics.fuelPerformance import settings ORDER = interfaces.STACK_ORDER.CROSS_SECTIONS class FuelPerformancePlugin(plugins.ArmiPlugin): """Plugin for fuel performance.""" @staticmethod @plugins.HOOKIMPL def exposeInterfaces(cs): """Expose the fuel performance interfaces.""" return [] @staticmethod @plugins.HOOKIMPL def defineSettings(): """Define settings for fuel performance.""" return settings.defineSettings() @staticmethod @plugins.HOOKIMPL def defineSettingsValidators(inspector): """Define settings inspections for fuel performance.""" return settings.defineValidators(inspector) @staticmethod @plugins.HOOKIMPL def defineParameters(): """Define parameters for the plugin.""" from armi.physics.fuelPerformance import parameters return parameters.getFuelPerformanceParameterDefinitions() ================================================ FILE: armi/physics/fuelPerformance/settings.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Settings related to fuel performance.""" from armi.settings import setting from armi.settings.settingsValidation import Query CONF_AXIAL_EXPANSION = "axialExpansion" CONF_BOND_REMOVAL = "bondRemoval" CONF_CLADDING_STRAIN = "claddingStrain" CONF_CLADDING_WASTAGE = "claddingWastage" CONF_FGR_REMOVAL = "fgRemoval" CONF_FGYF = "fissionGasYieldFraction" CONF_FUEL_PERFORMANCE_ENGINE = "fuelPerformanceEngine" def defineSettings(): """Define generic fuel performance settings.""" settings = [ setting.Setting( CONF_FUEL_PERFORMANCE_ENGINE, default="", label="Fuel Performance Engine", description=( "Fuel performance engine that determines fission gas removal, bond removal," " axial growth, wastage, and cladding strain." ), options=[""], ), setting.Setting( CONF_FGYF, default=0.25, label="Fission Gas Yield Fraction", description=( "The fraction of gaseous atoms produced per fission event, assuming a fission product yield of 2.0" ), ), setting.Setting( CONF_AXIAL_EXPANSION, default=False, label="Fuel Axial Expansion", description="Perform axial fuel expansion. This will adjust fuel block lengths.", ), setting.Setting( CONF_BOND_REMOVAL, default=False, label="Thermal Bond Removal", description="Toggles fuel performance bond removal. This will remove thermal bond from the fuel.", ), setting.Setting( CONF_FGR_REMOVAL, default=False, label="Fission Gas Removal", description="Toggles fuel performance fission gas removal. This will remove fission gas from the fuel.", ), setting.Setting( CONF_CLADDING_WASTAGE, default=False, label="Cladding Wastage", description="Evaluate cladding wastage. ", ), setting.Setting( CONF_CLADDING_STRAIN, default=False, label="Cladding Strain", description="Evaluate cladding strain. ", ), ] return settings def defineValidators(inspector): return [ Query( lambda: ( inspector.cs[CONF_AXIAL_EXPANSION] or inspector.cs[CONF_BOND_REMOVAL] or inspector.cs[CONF_FGR_REMOVAL] or inspector.cs[CONF_CLADDING_WASTAGE] or inspector.cs[CONF_CLADDING_STRAIN] ) and inspector.cs[CONF_FUEL_PERFORMANCE_ENGINE] == "", "A fuel performance behavior has been selected but no fuel performance engine is selected.", "", inspector.NO_ACTION, ), ] ================================================ FILE: armi/physics/fuelPerformance/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/physics/fuelPerformance/tests/test_executers.py ================================================ # Copyright 2021 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for generic fuel performance executers.""" import unittest from armi.physics.fuelPerformance.executers import ( CONF_BOND_REMOVAL, FuelPerformanceOptions, ) from armi.settings.caseSettings import Settings class TestFuelPerformanceOptions(unittest.TestCase): def test_fuelPerformanceOptions(self): fpo = FuelPerformanceOptions("test_fuelPerformanceOptions") self.assertEqual(fpo.label, "test_fuelPerformanceOptions") cs = Settings() fpo.fromUserSettings(cs) self.assertEqual(fpo.bondRemoval, cs[CONF_BOND_REMOVAL]) ================================================ FILE: armi/physics/fuelPerformance/tests/test_fuelPerformancePlugin.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for generic fuel performance plugin.""" from armi.physics.fuelPerformance.plugin import FuelPerformancePlugin from armi.tests.test_plugins import TestPlugin class TestFuelPerformancePlugin(TestPlugin): plugin = FuelPerformancePlugin ================================================ FILE: armi/physics/fuelPerformance/tests/test_fuelPerformanceSymmetry.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Audit symmetry-aware parameters in fuel performance. See Also -------- armi.testing.symmetryTesting """ from armi.physics.fuelPerformance.parameters import getFuelPerformanceParameterDefinitions from armi.reactor.blocks import Block from armi.testing import symmetryTesting class TestFPParamSymmetry(symmetryTesting.BasicArmiSymmetryTestHelper): def setUp(self): pluginParameters = getFuelPerformanceParameterDefinitions() self.blockParamsToTest = pluginParameters[Block] self.parameterOverrides = { "gasReleaseFraction": 0.5, "bondRemoved": 0.5, } super().setUp() ================================================ FILE: armi/physics/fuelPerformance/tests/test_fuelPerformanceUtils.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for fuel performance utilities.""" import unittest from armi.physics.fuelPerformance import utils from armi.reactor.flags import Flags from armi.reactor.tests import test_blocks class TestFuelPerformanceUtils(unittest.TestCase): def test_applyFuelDisplacement(self): displacement = 0.01 block = test_blocks.loadTestBlock() fuel = block.getComponent(Flags.FUEL) originalHotODInCm = fuel.getDimension("od") utils.applyFuelDisplacement(block, displacement) finalHotODInCm = fuel.getDimension("od") self.assertAlmostEqual(finalHotODInCm, originalHotODInCm + 2 * displacement) def test_gasConductivityCorrection_morph0(self): temp = 500 # C porosity = 0.4 # No correction chi = utils.gasConductivityCorrection(temp, porosity, 0) ref = 1.0 self.assertAlmostEqual(chi, ref, 5) def test_gasConductivityCorrection_morph1(self): temp = 500 # C porosity = 0.4 # Irregular Porosity, Bauer equation chi = utils.gasConductivityCorrection(temp, porosity, 1) ref = (1.0 - porosity) ** (1.5 * 1.00) self.assertAlmostEqual(chi, ref, 5) def test_gasConductivityCorrection_morph2(self): temp = 500 # C porosity = 0.4 # Irregular Porosity, Bauer equation chi = utils.gasConductivityCorrection(temp, porosity, 2) ref = (1.0 - porosity) ** (1.5 * 1.72) self.assertAlmostEqual(chi, ref, 5) def test_gasConductivityCorrection_morph3(self): temp = 500 # C porosity = 0.4 # Mixed Morphology, low temp chi = utils.gasConductivityCorrection(temp, porosity, 3) ref = (1.0 - porosity) ** (1.5 * 1.72) self.assertAlmostEqual(chi, ref, 5) # Mixed Morphology, high temp temp = 700 chi = utils.gasConductivityCorrection(temp, porosity, 3) ref = (1.0 - porosity) ** (1.5 * 1.00) self.assertAlmostEqual(chi, ref, 5) def test_gasConductivityCorrection_morph4(self): temp = 500 # C porosity = 0.4 # maxwell-eucken chi = utils.gasConductivityCorrection(temp, porosity, 4) ref = (1.0 - porosity) / (1.0 + 1.5 * porosity) self.assertAlmostEqual(chi, ref, 5) ================================================ FILE: armi/physics/fuelPerformance/utils.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fuel performance utilities.""" from armi.reactor.flags import Flags def applyFuelDisplacement(block, displacementInCm): r""" Expands the fuel radius in a pin by a number of cm. Assumes there's thermal bond in it to displace. This adjusts the dimension of the fuel while conserving its mass. The bond mass is not conserved; it is assumed to be pushed up into the plenum but the modeling of this is not done yet by this method. .. warning:: A 0.5% buffer is included to avoid overlaps. This should be analyzed in detail as a methodology before using in any particular analysis. .. math:: n V = n\prime V\prime n\prime = \frac{V}{V\prime} n """ clad = block.getComponent(Flags.CLAD) fuel = block.getComponent(Flags.FUEL) originalHotODInCm = fuel.getDimension("od") cladID = clad.getDimension("id") # do not swell past cladding ID! (actually leave 0.5% buffer for thermal expansion) newHotODInCm = min(cladID * 0.995, originalHotODInCm + displacementInCm * 2) fuel.setDimension("od", newHotODInCm, retainLink=True, cold=False) # reduce number density of fuel to conserve number of atoms (and mass) fuel.changeNDensByFactor(originalHotODInCm**2 / newHotODInCm**2) def gasConductivityCorrection(tempInC: float, porosity: float, morphology: int = 2): """ Calculate the correction to conductivity for a porous, gas-filled solid. Parameters ---------- tempInC temperature in celsius porosity fraction of open/total volume morphology, optional correlation to use regarding pore morphology (default 2 is irregular porosity for conservatism) Returns ------- chi : float correction to conductivity due to porosity (should be multiplied) Notes ----- Morphology is treated different by different models: 0, no porosity correction 1, bauer equation, spherical porosity 2, bauer equation, irregular porosity 3, bauer equation, mixed morphology, above 660, spherical. Below 660, irregular 4, maxwell-eucken equation, beta=1.5 Source1 : In-Pile Measurement of the Thermal Conductivity of Irradiated Metallic Fuel, T.H. Bauer J.W. Holland. Nuclear Technology, Vol. 110, 1995. Pages 407-421 Source2 : The Porosity Dependence of the Thermal Conductivity for Nuclear Fuels, G. Ondracek B. Schulz. Journal of Nuclear Materials, Vol. 46, 1973. Pages 253-258 """ if morphology == 0: chi = 1.0 elif morphology == 1: epsilon = 1.0 chi = (1.0 - porosity) ** ((3.0 / 2.0) * epsilon) elif morphology == 2: epsilon = 1.72 chi = (1.0 - porosity) ** ((3.0 / 2.0) * epsilon) elif morphology == 3: epsilon = 1.0 if tempInC < 660: epsilon = 1.72 else: epsilon = 1.00 chi = (1.0 - porosity) ** ((3.0 / 2.0) * epsilon) elif morphology == 4: chi = (1.0 - porosity) / (1.0 + 1.5 * porosity) return chi ================================================ FILE: armi/physics/neutronics/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The neutronics physics package in the ARMI framework. Neutronics encompasses the modeling of nuclear chain reactions and their associated transmutation and decay. """ # ruff: noqa: F401 from enum import IntEnum from armi.physics.neutronics.const import ( ALL, FLUXFILES, GAMMA, INPUTOUTPUT, NEUTRON, NEUTRONGAMMA, RESTARTFILES, ) from armi.physics.neutronics.plugin import NeutronicsPlugin # ARC and CCCC cross section file format names COMPXS = "COMPXS" PMATRX = "PMATRX" GAMISO = "GAMISO" PMATRX_EXT = "pmatrx" GAMISO_EXT = "gamiso" ISOTXS = "ISOTXS" DIF3D = "DIF3D" # Constants for neutronics calculation types ADJOINT_CALC = "adjoint" REAL_CALC = "real" ADJREAL_CALC = "both" # Constants for boundary conditions # All external boundary conditions are set to zero outward current INFINITE = "Infinite" # "Planar" external boundaries conditions are set to zero outward current REFLECTIVE = "Reflective" # Generalized boundary conditions D * PHI PRIME + A * PHI = 0 where A is user-specified constant, # D is the diffusion coefficient, PHI PRIME and PHI are the outward current and flux at the # external boundaries. GENERAL_BC = "Generalized" # The following boundary conditions are three approximations of the vacuum boundary condition # in diffusion theory. # 'Extrapolated': sets A to 0.4692 (in generalized BC) to have the flux vanishing at # 0.7104*transport mean free path through linear extrapolation. Derived for plane # geometries - should be valid for complex geometries unless radius of curvature is # comparable to the mean free path. # 'ZeroSurfaceFlux': flux vanishes at the external boundary. # 'ZeroInwardCurrent': set A to 0.5 (in generalized BC) to have Jminus = 0 at the external boundaries. EXTRAPOLATED = "Extrapolated" ZEROFLUX = "ZeroSurfaceFlux" ZERO_INWARD_CURRENT = "ZeroInwardCurrent" # Common settings checks def gammaTransportIsRequested(cs): """ Check if gamma transport was requested by the user. Arguments --------- cs : ARMI settings object Object containing the default and user-specified ARMI settings controlling the simulation Returns ------- flag : bool Returns true if gamma transport is requested. """ from armi.physics.neutronics.settings import CONF_GLOBAL_FLUX_ACTIVE return GAMMA in cs[CONF_GLOBAL_FLUX_ACTIVE] def gammaXsAreRequested(cs): """ Check if gamma cross-sections generation was requested by the user. Arguments --------- cs : ARMI settings object Object containing the default and user-specified ARMI settings controlling the simulation. Returns ------- flag : bool Returns true if gamma cross section generation is requested. """ from armi.physics.neutronics.settings import CONF_GEN_XS return GAMMA in cs[CONF_GEN_XS] def adjointCalculationRequested(cs): """Return true if an adjoint calculation is requested based on the ``CONF_NEUTRONICS_TYPE`` setting.""" from armi.physics.neutronics.settings import CONF_NEUTRONICS_TYPE return cs[CONF_NEUTRONICS_TYPE] in [ADJOINT_CALC, ADJREAL_CALC] def realCalculationRequested(cs): """Return true if a real calculation is requested based on the ``CONF_NEUTRONICS_TYPE`` type setting.""" from armi.physics.neutronics.settings import CONF_NEUTRONICS_TYPE return cs[CONF_NEUTRONICS_TYPE] in ["real", "both"] class LatticePhysicsFrequency(IntEnum): """ Enumeration for lattice physics update frequency options. NEVER = never automatically trigger lattice physics (a custom script could still trigger it) BOL = Beginning-of-life (c0n0) BOC = Beginning-of-cycle (c*n0) everyNode = Every interaction node (c*n*) firstCoupledIteration = every node + the first coupled iteration at each node all = every node + every coupled iteration Notes ----- firstCoupledIteration only updates the cross sections during the first coupled iteration, but not on any subsequent iterations. This may be an appropriate approximation in some cases to save compute time, but each individual user should give careful consideration to whether this is the behavior they want for a particular application. The main purpose of this setting is to capture a large change in temperature distribution when running a snapshot at a different power/flow condition than the original state being loaded from the database. """ never = 0 BOL = 1 BOC = 2 everyNode = 3 firstCoupledIteration = 4 all = 5 ================================================ FILE: armi/physics/neutronics/const.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Constants and Enums. In an independent file to minimize circular imports. """ CONF_CROSS_SECTION = "crossSectionControl" # # FAST_FLUX_THRESHOLD_EV is the energy threshold above which neutrons are considered "fast" [eV] # FAST_FLUX_THRESHOLD_EV = 100000.0 # eV # CROSS SECTION LIBRARY GENERATION CONSTANTS MAXIMUM_XS_LIBRARY_ENERGY = 1.4190675e7 # eV ULTRA_FINE_GROUP_LETHARGY_WIDTH = 1.0 / 120.0 # LOWEST_ENERGY_EV cannot be zero due to integrating lethargy, and lethargy is undefined at 0.0 # The lowest lower boundary of many group structures such as any WIMS, SCALE or CASMO # is 1e-5 eV, therefore it is chosen here. This number must be lower than all of the # defined group structures. The chosen 1e-5 eV is rather arbitrary but expected to be low # enough to support other group structures. For fast reactors, there will be # no sensitivity at all to this value since there is no flux in this region. LOWEST_ENERGY_EV = 1.0e-5 # Highest energy will typically depend on what physics code is being run, but this is # a decent round number to use. HIGH_ENERGY_EV = 1.5e07 # Particle types constants GAMMA = "Gamma" NEUTRON = "Neutron" NEUTRONGAMMA = "Neutron and Gamma" # Constants for neutronics setting controlling saving of files after neutronics calculation # See setting 'neutronicsOutputsToSave' ALL = "All" RESTARTFILES = "Restart files" INPUTOUTPUT = "Input/Output" FLUXFILES = "Flux files" ================================================ FILE: armi/physics/neutronics/crossSectionGroupManager.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Cross section group manager handles burnup-dependent properties of microscopic cross sections. Blocks are specified to be in a certain *cross section type* and *burnup group*. Together, these form the *cross section group*. By advancing blocks by their burnup into different groups, we capture some of the physical effects related to depletion. XS types are typically single capital letters like A BU groups are also capital letters. A XS group of AB is in XS type ``A`` and burnup group ``B``. This module groups the blocks according to their XS groups and can determine which block is to be deemed **representative** of an entire set of blocks in a particular xs group. Then the representative block is sent to a lattice physics kernel for actual physics calculations. Generally, the cross section manager is a attribute of the lattice physics code interface Examples -------- csm = CrossSectionGroupManager() csm._setBuGroupBounds(cs['buGroups']) csm._setTempGroupBounds(cs['tempGroups']) # or empty list csm._addXsGroupsFromBlocks(blockList) csm.createRepresentativeBlocks() representativeBlockList = csm.representativeBlocks.values() blockThatRepresentsBA = csm.representativeBlocks['BA'] The class diagram is provided in `xsgm-class-diagram`_ .. _xsgm-class-diagram: .. pyreverse:: armi.physics.neutronics.crossSectionGroupManager :align: center :alt: XSGM class diagram :width: 90% Class inheritance diagram for :py:mod:`crossSectionGroupManager`. """ import collections import copy import os import string import sys import numpy as np from armi import context, interfaces, runLog from armi.physics.neutronics import LatticePhysicsFrequency from armi.physics.neutronics.const import CONF_CROSS_SECTION from armi.reactor import flags from armi.reactor.components import basicShapes from armi.reactor.flags import Flags from armi.utils import safeCopy from armi.utils.units import C_TO_K, TRACE_NUMBER_DENSITY ORDER = interfaces.STACK_ORDER.BEFORE + interfaces.STACK_ORDER.CROSS_SECTIONS def describeInterfaces(cs): """Function for exposing interface(s) to other code.""" from armi.physics.neutronics.settings import CONF_NEUTRONICS_KERNEL if "MCNP" not in cs[CONF_NEUTRONICS_KERNEL]: # MCNP does not use CSGM return (CrossSectionGroupManager, {}) return None _ALLOWABLE_XS_TYPE_LIST = list(string.ascii_uppercase + string.ascii_lowercase) def getXSTypeNumberFromLabel(xsTypeLabel: str) -> int: """ Convert a XSID label (e.g. 'AA') to an integer. Useful for visualizing XS type in XTVIEW. 2-digit labels are supported when there is only one burnup group. """ return int("".join(["{:02d}".format(ord(si)) for si in xsTypeLabel])) def getXSTypeLabelFromNumber(xsTypeNumber: int) -> str: """ Convert a XSID label (e.g. 65) to an XS label (e.g. 'A'). Useful for visualizing XS type in XTVIEW. 2-digit labels are supported when there is only one burnup group. """ try: if xsTypeNumber > ord("Z"): # two digit. Parse return chr(int(str(xsTypeNumber)[:2])) + chr(int(str(xsTypeNumber)[2:])) elif xsTypeNumber < ord("A"): raise ValueError( f"Cannot convert invalid xsTypeNumber `{xsTypeNumber}` to char. " "The number must be >= 65 (corresponding to 'A')." ) else: return chr(xsTypeNumber) except ValueError: runLog.error("Error converting {} to label.".format(xsTypeNumber)) raise def _checkConsistentNuclides(thisComp, repComp): """ Check that thisComp has the same set of nuclides as the analogous component in the representative block. This check is somewhat permissive in that it allows for the two components to differ in nuclides where one of them is at a zero number density. Warning ------- This only checks ``consistentNucs`` for ones that are important in SFRs. """ consistentNucs = {"PU239", "U238", "U235", "U234", "FE56", "NA23", "O16"} # ignore anything with zero number density theseNucs = set(nuc for nuc, ndens in thisComp.getNumberDensities().items() if ndens > 0.0) thoseNucs = set(nuc for nuc, ndens in repComp.getNumberDensities().items() if ndens > 0.0) # in the nuclide list of the component, but at a number density of 0.0 # treat this more permissively -- i.e., it could be considered as either having or not having it theseNucsAtZero = set(nuc for nuc, ndens in thisComp.getNumberDensities().items() if ndens == 0.0) thoseNucsAtZero = set(nuc for nuc, ndens in repComp.getNumberDensities().items() if ndens == 0.0) # check for any differences between which `consistentNucs` the components have diffNucsNonZero = theseNucs.symmetric_difference(thoseNucs).intersection(consistentNucs) diffNucsAtZero = theseNucsAtZero.symmetric_difference(thoseNucsAtZero).intersection(consistentNucs) diffNucs = diffNucsNonZero - diffNucsAtZero if diffNucs: raise ValueError( f"Component {thisComp} in block {repComp} and component {thisComp} in block {thisComp.parent} are in the " f"same location, but nuclides differ by {diffNucs}. \n{theseNucs} \n{thoseNucs}" ) class BlockCollection(list): """ Controls which blocks are representative of a particular cross section type/BU group. This is a list with special methods. """ def __init__(self, allNuclidesInProblem, validBlockTypes=None, averageByComponent=False): list.__init__(self) self.allNuclidesInProblem = allNuclidesInProblem self.weightingParam = None self.averageByComponent = averageByComponent # allowed to be independent of fuel component temperatures b/c Doppler self.avgNucTemperatures = {} self._validRepresentativeBlockTypes = None if validBlockTypes: self._validRepresentativeBlockTypes = [] for t in validBlockTypes: self._validRepresentativeBlockTypes.append(Flags.fromString(t)) def __repr__(self): return "<{} with {} blocks>".format(self.__class__.__name__, len(self)) def _getNewBlock(self): """ Create a new block instance. Notes ----- Should only be used by average because of name (which may not matter) """ newBlock = copy.deepcopy(self.getCandidateBlocks()[0]) newBlock.name = "AVG_" + newBlock.getMicroSuffix() return newBlock def createRepresentativeBlock(self): """Generate a block that best represents all blocks in group.""" self._checkValidWeightingFactors() representativeBlock = self._makeRepresentativeBlock() return representativeBlock def _makeRepresentativeBlock(self): raise NotImplementedError def _checkValidWeightingFactors(self): """ Verify the validity of the weighting parameter. .. warning:: Don't mix unweighted blocks (flux=0) w/ weighted ones """ if self.weightingParam is None: weights = [0.0] * len(self.getCandidateBlocks()) else: weights = [block.p[self.weightingParam] for block in self.getCandidateBlocks()] anyNonZeros = any(weights) if anyNonZeros and not all(weights): # we have at least one non-zero entry and at least one zero. This is bad. # find the non-zero ones for debugging zeros = [block for block in self if not block.p[self.weightingParam]] runLog.error("Blocks with zero `{0}` include: {1}".format(self.weightingParam, zeros)) raise ValueError( "{0} has a mixture of zero and non-zero weighting factors (`{1}`)\nSee stdout for details".format( self, self.weightingParam ) ) def calcAvgNuclideTemperatures(self): r""" Calculate the average nuclide temperatures in this collection based on the blocks in the collection. If a nuclide is in multiple components, that's taken into consideration. .. math:: T = \frac{\sum{n_i v_i T_i}}{\sum{n_i v_i}} where :math:`n_i` is a number density, :math:`v_i` is a volume, and :math:`T_i` is a temperature """ self.avgNucTemperatures = {} nvt, nv = self._getNucTempHelper() for i, nuclide in enumerate(self.allNuclidesInProblem): nvtCurrent = nvt[i] nvCurrent = nv[i] avgTemp = 0.0 if nvCurrent == 0.0 else nvtCurrent / nvCurrent self.avgNucTemperatures[nuclide] = avgTemp def _getNucTempHelper(self): """ Get temperature averaging numerator and denominator for block collection. This is abstract; you must override it. """ raise NotImplementedError def getWeight(self, block): """Get value of weighting function for this block.""" vol = block.getVolume() or 1.0 if not self.weightingParam: weight = 1.0 else: # don't return 0 weight = block.p[self.weightingParam] or 1.0 return weight * vol def getCandidateBlocks(self): """ Get blocks in this collection that are the valid representative type. Often, peripheral non-fissile blocks (reflectors, control, shields) need cross sections but cannot produce them alone. You can approximate their cross sections by placing them in certain cross section groups. However, we do not want these blocks to be included in the spectrum calculations that produce cross sections. Therefore the subset of valid representative blocks are used to compute compositions, temperatures, etc. .. tip:: The proper way to treat non-fuel blocks is to apply a leakage spectrum from fuel onto them. """ return [b for b in self if b.hasFlags(self._validRepresentativeBlockTypes)] def _calcWeightedBurnup(self): """ For a blockCollection that represents fuel, calculate the weighted average burnup. Notes ----- - Only used for logging purposes - Burnup needs to be weighted by heavy metal mass instead of volume """ weightedBurnup = 0.0 totalWeight = 0.0 for b in self: # self.getWeight(b) incorporates the volume as does mass, so divide by volume not to double-count weighting = b.p.massHmBOL * self.getWeight(b) / b.getVolume() totalWeight += weighting weightedBurnup += weighting * b.p.percentBu return 0.0 if totalWeight == 0.0 else weightedBurnup / totalWeight class MedianBlockCollection(BlockCollection): """Returns the median burnup block. This is a simple and often accurate approximation.""" def _makeRepresentativeBlock(self): """Get the median burnup block.""" medianBlock = self._getMedianBlock() # copy so we can adjust LFPs w/o changing the global ones newBlock = copy.deepcopy(medianBlock) lfpCollection = medianBlock.getLumpedFissionProductCollection() if lfpCollection: lfpCollection = lfpCollection.duplicate() lfpCollection.setGasRemovedFrac(newBlock.p.gasReleaseFraction) newBlock.setLumpedFissionProducts(lfpCollection) else: runLog.warning("Representative block {0} has no LFPs".format(medianBlock)) self.calcAvgNuclideTemperatures() return newBlock def _getNucTempHelper(self): """ Return the Median block nuclide temperature terms. In this case, there's only one block to average, so return its averaging terms. See Also -------- calcAvgNuclideTemperatures """ medianBlock = self._getMedianBlock() return getBlockNuclideTemperatureAvgTerms(medianBlock, self.allNuclidesInProblem) def _getMedianBlock(self): """ Return the median burnup Block. Build list of items for each block when sorted gives desired order Last item in each tuple is always the block itself (for easy retrieval). For instance, if you want the median burnup, this list would contain tuples of (burnup, blockName, block). Blockname is included so the order is consistent between runs when burnups are equal (e.g. 0). """ info = [] for b in self.getCandidateBlocks(): info.append((b.p.percentBu * self.getWeight(b), b.getName(), b)) info.sort() medianBlockData = info[len(info) // 2] return medianBlockData[-1] class AverageBlockCollection(BlockCollection): """ Block collection that builds a new block based on others in collection. Averages number densities, fission product yields, and fission gas removal fractions. .. impl:: Create representative blocks using volume-weighted averaging. :id: I_ARMI_XSGM_CREATE_REPR_BLOCKS0 :implements: R_ARMI_XSGM_CREATE_REPR_BLOCKS This class constructs new blocks from an existing block list based on a volume-weighted average. Inheriting functionality from the abstract :py:class:`Reactor <armi.physics.neutronics.crossSectionGroupManager.BlockCollection>` object, this class will construct representative blocks using averaged parameters of all blocks in the given collection. Number density averages can be computed at a component level or at a block level by default. Average nuclide temperatures and burnup are also included when constructing a representative block. """ def _makeRepresentativeBlock(self): """Generate a block that best represents all blocks in group.""" newBlock = self._getNewBlock() lfpCollection = self._getLFP() newBlock.setLumpedFissionProducts(lfpCollection) # check if components are similar if self._performAverageByComponent(): # set number densities and temperatures on a component basis for compIndex, c in enumerate(sorted(newBlock.getComponents())): c.setNumberDensities(self._getAverageComponentNumberDensities(compIndex)) c.temperatureInC = self._getAverageComponentTemperature(compIndex) else: newBlock.setNumberDensities(self._getAverageNumberDensities()) newBlock.p.percentBu = self._calcWeightedBurnup() newBlock.clearCache() self.calcAvgNuclideTemperatures() return newBlock def _getAverageNumberDensities(self): """ Get weighted average number densities of the collection. Returns ------- numberDensities : dict nucName, ndens data (atoms/bn-cm) """ nuclides = self.allNuclidesInProblem blocks = self.getCandidateBlocks() weights = np.array([self.getWeight(b) for b in blocks]) weights /= weights.sum() # normalize by total weight ndens = weights.dot([b.getNuclideNumberDensities(nuclides) for b in blocks]) return dict(zip(nuclides, ndens)) def _getLFP(self): """Find lumped fission product collection.""" b = self.getCandidateBlocks()[0] return b.getLumpedFissionProductCollection() def _getNucTempHelper(self): """All candidate blocks are used in the average.""" nvt = np.zeros(len(self.allNuclidesInProblem)) nv = np.zeros(len(self.allNuclidesInProblem)) for block in self.getCandidateBlocks(): wt = self.getWeight(block) nvtBlock, nvBlock = getBlockNuclideTemperatureAvgTerms(block, self.allNuclidesInProblem) nvt += nvtBlock * wt nv += nvBlock * wt return nvt, nv def _getAverageComponentNumberDensities(self, compIndex): """ Get weighted average number densities of a component in the collection. Returns ------- numberDensities : dict nucName, ndens data (atoms/bn-cm) """ blocks = self.getCandidateBlocks() weights = np.array([self.getWeight(b) for b in blocks]) weights /= weights.sum() # normalize by total weight components = [sorted(b.getComponents())[compIndex] for b in blocks] nuclides = self._getAllNucs(components) ndens = weights.dot([c.getNuclideNumberDensities(nuclides) for c in components]) return dict(zip(nuclides, ndens)) def _getAverageComponentTemperature(self, compIndex): """ Get weighted average component temperature for the collection. Notes ----- Weighting is both by the block weight within the collection and the relative mass of the Component. The block weight is already scaled by the block volume, so we need to pull that out of the block weighting because it would effectively be double-counted in the component mass. b.getHeight() is proportional to block volume, so it is used here as a computationally cheaper proxy for scaling by block volume. Returns ------- numberDensities : dict nucName, ndens data (atoms/bn-cm) """ blocks = self.getCandidateBlocks() weights = np.array([self.getWeight(b) / b.getHeight() for b in blocks]) weights /= weights.sum() # normalize by total weight components = [sorted(b.getComponents())[compIndex] for b in blocks] weightedAvgComponentMass = sum(w * c.getMass() for w, c in zip(weights, components)) if weightedAvgComponentMass == 0.0: # if there is no component mass (e.g., gap), do a regular average return np.mean(np.array([c.temperatureInC for c in components])) else: return ( weights.dot(np.array([c.temperatureInC * c.getMass() for c in components])) / weightedAvgComponentMass ) def _performAverageByComponent(self): """ Check if block collection averaging can/should be performed by component. If the components of blocks in the collection are similar and the user has requested Component-level averaging, return True. Otherwise, return False. """ if not self.averageByComponent: return False else: return self._checkBlockSimilarity() def _checkBlockSimilarity(self): """ Check if blocks in the collection have similar components. If the components of blocks in the collection are similar and the user has requested Component-level averaging, return True. Otherwise, return False. """ cFlags = dict() for b in self.getCandidateBlocks(): cFlags[b] = [c.p.flags for c in sorted(b.getComponents())] refB = b refFlags = cFlags[refB] for b, compFlags in cFlags.items(): for c, refC in zip(compFlags, refFlags): if c != refC: runLog.warning( "Non-matching block in AverageBlockCollection!\n" f"{refC} component flags in {refB} does not match {c} in {b}.\n" f"Number densities will be smeared in representative block." ) return False else: return True @staticmethod def _getAllNucs(components): """Iterate through components and get all unique nuclides.""" nucs = set() for c in components: nucs = nucs.union(c.getNuclides()) return sorted(list(nucs)) def getBlockNuclideTemperature(block, nuclide): """Return the average temperature for 1 nuclide.""" tempIntegratedVolume, volume = getBlockNuclideTemperatureAvgTerms(block, [nuclide]) return tempIntegratedVolume / volume if volume > 0 else 0.0 def getBlockNuclideTemperatureAvgTerms(block, allNucNames): """ Compute terms (numerator, denominator) of average for this block. This volume-weights the densities by component volume fraction. It's important to count zero-density nuclides (i.e. ones like AM242 that are expected to build up) as trace values at the proper component temperatures. """ def getNumberDensitiesWithTrace(component, allNucNames): """Needed to make sure temperature of 0-density nuclides in fuel get fuel temperature.""" if component.p.nuclides is None: return [0.0 for _nuc in allNucNames] allByteNucs = [nucName.encode() for nucName in allNucNames] ndens = [] nucCopy = np.array(component.p.nuclides) nDensCopy = np.array(component.p.numberDensities) reverseIndex = {nuc: i for i, nuc in enumerate(nucCopy)} for nuc in allByteNucs: i = reverseIndex.get(nuc, -1) if i >= 0: ndens.append(max(nDensCopy[i], TRACE_NUMBER_DENSITY)) else: ndens.append(0.0) return ndens vol = block.getVolume() components, volFracs = zip(*block.getVolumeFractions()) # D = CxN matrix of number densities ndens = np.array([getNumberDensitiesWithTrace(c, allNucNames) for c in components]) # C-length temperature array temperatures = np.array([c.temperatureInC for c in components]) # multiply each component's values by volume frac, now NxC nvBlock = ndens.T * np.array(volFracs) * vol nvt = sum((nvBlock * temperatures).T) # N-length array summing over components. nv = sum(nvBlock.T) # N-length array return nvt, nv class CylindricalComponentsAverageBlockCollection(AverageBlockCollection): """ Creates a representative block for the purpose of cross section generation with a one- dimensional cylindrical model. .. impl:: Create representative blocks using custom cylindrical averaging. :id: I_ARMI_XSGM_CREATE_REPR_BLOCKS1 :implements: R_ARMI_XSGM_CREATE_REPR_BLOCKS This class constructs representative blocks based on a volume-weighted average using cylindrical blocks from an existing block list. Inheriting functionality from the abstract :py:class:`Reactor <armi.physics.neutronics.crossSectionGroupManager.BlockCollection>` object, this class will construct representative blocks using averaged parameters of all blocks in the given collection. Number density averages are computed at a component level. Nuclide temperatures from a median block-average temperature are used and the average burnup is evaluated across all blocks in the block list. Notes ----- When generating the representative block within this collection, the geometry is checked against all other blocks to ensure that the number of components are consistent. This implementation is intended to be opinionated, so if a user attempts to put blocks that have geometric differences then this will fail. This selects a representative block based on the collection of candidates based on the median Block-average temperatures as an assumption. """ def _getNewBlock(self): newBlock = copy.deepcopy(self._selectCandidateBlock()) newBlock.name = "1D_CYL_AVG_" + newBlock.getMicroSuffix() return newBlock def _selectCandidateBlock(self): """Selects the candidate block with the median block-average temperature.""" info = [] for b in self.getCandidateBlocks(): info.append((b.getAverageTempInC(), b.getName(), b)) info.sort() medianBlockData = info[len(info) // 2] return medianBlockData[-1] def _makeRepresentativeBlock(self): """Build a representative fuel block based on component number densities.""" repBlock = self._getNewBlock() bWeights = [self.getWeight(b) for b in self.getCandidateBlocks()] repBlock.p.percentBu = self._calcWeightedBurnup() componentsInOrder = self._orderComponentsInGroup(repBlock) for i, (c, allSimilarComponents) in enumerate(zip(sorted(repBlock), componentsInOrder)): allNucsNames, densities = self._getAverageComponentNucs(allSimilarComponents, bWeights) for nuc, aDensity in zip(allNucsNames, densities): c.setNumberDensity(nuc, aDensity) c.temperatureInC = self._getAverageComponentTemperature(i) repBlock.clearCache() self.calcAvgNuclideTemperatures() return repBlock @staticmethod def _checkComponentConsistency(b, repBlock): """ Verify that all components being homogenized have same multiplicity and nuclides. Raises ------ ValueError When the components in a candidate block do not align with the components in the representative Block. This check includes component area, component multiplicity, and nuclide composition. """ if len(b) != len(repBlock): raise ValueError(f"Blocks {b} and {repBlock} have differing number of components and cannot be homogenized") # NOTE: We are using Fe-56 as a proxy for structure and Na-23 as proxy for coolant, which # is undesirably SFR-centric. This should be generalized in the future, if possible. for c, repC in zip(sorted(b), sorted(repBlock)): _checkConsistentNuclides(c, repC) if c.p.mult != repC.p.mult: raise ValueError( f"Component {repC} in block {repBlock} and component {c} in block {b} must have the same " f"multiplicity, but they have {repC.p.mult} and {c.p.mult}, respectively." ) def _getAverageComponentNucs(self, components, bWeights): """Compute average nuclide densities by block weights and component area fractions.""" allNucNames = self._getAllNucs(components) densities = np.zeros(len(allNucNames)) totalWeight = 0.0 for c, bWeight in zip(components, bWeights): weight = bWeight * c.getArea() totalWeight += weight densities += weight * np.array(c.getNuclideNumberDensities(allNucNames)) if totalWeight > 0.0: weightedDensities = densities / totalWeight else: weightedDensities = np.zeros_like(densities) return allNucNames, weightedDensities def _orderComponentsInGroup(self, repBlock): """Order the components based on dimension and material type within the representative Block. """ for b in self.getCandidateBlocks(): self._checkComponentConsistency(b, repBlock) componentLists = [list(sorted(b)) for b in self.getCandidateBlocks()] return [list(comps) for comps in zip(*componentLists)] def _getNucTempHelper(self): """All candidate blocks are used in the average.""" nvt = np.zeros(len(self.allNuclidesInProblem)) nv = np.zeros(len(self.allNuclidesInProblem)) for block in self.getCandidateBlocks(): wt = self.getWeight(block) nvtBlock, nvBlock = getBlockNuclideTemperatureAvgTerms(block, self.allNuclidesInProblem) nvt += nvtBlock * wt nv += nvBlock * wt return nvt, nv class CylindricalComponentsDuctHetAverageBlockCollection(CylindricalComponentsAverageBlockCollection): """ Creates a representative block for the purpose of cross section generation with a one- dimensional cylindrical model where all material inside the duct is homogenized. .. impl:: Create partially heterogeneous representative blocks. :id: I_ARMI_XSGM_CREATE_REPR_BLOCKS2 :implements: R_ARMI_XSGM_CREATE_REPR_BLOCKS This class constructs representative blocks based on a volume-weighted average using cylindrical blocks from an existing block list. Inheriting functionality from the abstract :py:class:`Reactor <armi.physics.neutronics.crossSectionGroupManager.BlockCollection>` object, this class will construct representative blocks using averaged parameters of all blocks in the given collection. Number density averages are computed at a component level. Nuclide temperatures from a median block-average temperature are used and the average burnup is evaluated across all blocks in the block list. The average nuclide temperatures are calculated only for the homogenized region inside of the duct. For the non-homogenized regions, the MC2 writer uses the component temperatures. Notes ----- The representative block for this collection is the same as the parent. The only difference between the two collection types is that this collection calculates average nuclide temperatures based only on the components that are inside of the duct. """ def _getNewBlock(self): newBlock = copy.deepcopy(self._selectCandidateBlock()) newBlock.name = "1D_CYL_DUCT_HET_AVG_" + newBlock.getMicroSuffix() return newBlock def _makeRepresentativeBlock(self): """Build a representative fuel block based on component number densities.""" self.calcAvgNuclideTemperatures() return CylindricalComponentsAverageBlockCollection._makeRepresentativeBlock(self) def _getNucTempHelper(self): """All candidate blocks are used in the average.""" from armi.reactor.converters.blockConverters import stripComponents nvt = np.zeros(len(self.allNuclidesInProblem)) nv = np.zeros(len(self.allNuclidesInProblem)) for block in self.getCandidateBlocks(): wt = self.getWeight(block) # remove the duct and intercoolant from the block before # calculating average nuclide temps newBlock, _mixtureFlags = stripComponents(block, Flags.DUCT) nvtBlock, nvBlock = getBlockNuclideTemperatureAvgTerms(newBlock, self.allNuclidesInProblem) nvt += nvtBlock * wt nv += nvBlock * wt return nvt, nv class SlabComponentsAverageBlockCollection(BlockCollection): """ Creates a representative 1D slab block. Notes ----- - Ignores lumped fission products since there is no foreseeable need for burn calculations in 1D slab geometry since it is used for low power neutronic validation. - Checks for consistent component dimensions for all blocks in a group and then creates a new Block. - Iterates through components of all blocks and calculates component average number densities. This calculation takes the first component of each block, averages the number densities, and applies this to the number density to the representative block. """ def _getNewBlock(self): newBlock = copy.deepcopy(self.getCandidateBlocks()[0]) newBlock.name = "1D_SLAB_AVG_" + newBlock.getMicroSuffix() return newBlock def _makeRepresentativeBlock(self): """Build a representative fuel block based on component number densities.""" repBlock = self._getNewBlock() bWeights = [self.getWeight(b) for b in self.getCandidateBlocks()] repBlock.p.percentBu = self._calcWeightedBurnup() componentsInOrder = self._orderComponentsInGroup(repBlock) for c, allSimilarComponents in zip(repBlock, componentsInOrder): allNucsNames, densities = self._getAverageComponentNucs(allSimilarComponents, bWeights) for nuc, aDensity in zip(allNucsNames, densities): c.setNumberDensity(nuc, aDensity) newBlock = self._removeLatticeComponents(repBlock) return newBlock def _getNucTempHelper(self): raise NotImplementedError @staticmethod def _getAllNucs(components): """Iterate through components and get all unique nuclides.""" nucs = set() for c in components: nucs = nucs.union(c.getNuclides()) return sorted(list(nucs)) @staticmethod def _checkComponentConsistency(b, repBlock, components=None): """ Verify that all components being homogenized are rectangular and have consistent dimensions. Raises ------ ValueError When the components in a candidate block do not align with the components in the representative block. This check includes component area, component multiplicity, and nuclide composition. TypeError When the shape of the component is not a rectangle. """ comps = b if components is None else components for c, repC in zip(comps, repBlock): if not isinstance(c, basicShapes.Rectangle): raise TypeError( "The shape of component {} in block {} is invalid and must be a rectangle.".format(c, b) ) compString = "Component {} in block {} and component {} in block {}".format(repC, repBlock, c, b) if c.getArea() != repC.getArea(): raise ValueError( "{} are in the same location, but have differing thicknesses. Check that the " "thicknesses are defined correctly. Note: This could also be due to " "thermal expansion".format(compString) ) _checkConsistentNuclides(c, repC) if c.p.mult != repC.p.mult: raise ValueError("{} must have the same multiplicity to homogenize".format(compString)) @staticmethod def _reverseComponentOrder(block): """Move the lattice component to the end of the components list.""" latticeComponents = [c for c in block if c.isLatticeComponent()] components = [c for c in reversed(block) if not c.isLatticeComponent()] if len(latticeComponents) > 1: raise ValueError( "Block {} contains multiple `lattice` components: {}. Remove the additional " "lattice components in the reactor blueprints.".format(block, latticeComponents) ) components.append(latticeComponents[0]) return components @staticmethod def _removeLatticeComponents(repBlock): """ Remove the lattice component from the representative block. Notes ----- - This component does not serve any purpose for XS generation as it contains void material with zero area. - Removing this component does not modify the blocks within the reactor. """ for c in repBlock.iterComponents(): if c.isLatticeComponent(): repBlock.remove(c) return repBlock def _getAverageComponentNucs(self, components, bWeights): """Compute average nuclide densities by block weights and component area fractions.""" allNucNames = self._getAllNucs(components) densities = np.zeros(len(allNucNames)) totalWeight = 0.0 for c, bWeight in zip(components, bWeights): weight = bWeight * c.getArea() totalWeight += weight densities += weight * np.array(c.getNuclideNumberDensities(allNucNames)) if totalWeight > 0.0: weightedDensities = densities / totalWeight else: weightedDensities = np.zeros_like(densities) return allNucNames, weightedDensities def _orderComponentsInGroup(self, repBlock): """Order the components based on dimension and material type within the representative block.""" orderedComponents = [[] for _ in repBlock] for b in self.getCandidateBlocks(): if len(b) != len(repBlock): raise ValueError( "Blocks {} and {} have differing number of components and cannot be homogenized".format(b, repBlock) ) try: self._checkComponentConsistency(b, repBlock) componentsToAdd = [c for c in b] except ValueError: runLog.extra( "Checking if components in block {} are in the reverse order of the components in the " "representative block {}".format(b, repBlock) ) reversedComponentOrder = self._reverseComponentOrder(b) self._checkComponentConsistency(b, repBlock, components=reversedComponentOrder) componentsToAdd = [c for c in reversedComponentOrder] for i, c in enumerate(componentsToAdd): orderedComponents[i].append(c) # group similar components return orderedComponents class FluxWeightedAverageBlockCollection(AverageBlockCollection): """Flux-weighted AverageBlockCollection.""" def __init__(self, *args, **kwargs): AverageBlockCollection.__init__(self, *args, **kwargs) self.weightingParam = "flux" class CrossSectionGroupManager(interfaces.Interface): """ Looks at the reactor and updates burnup group information based on current burnup. Contains a :py:class:`BlockCollection` for each cross section group. Notes ----- The representative blocks created in the CrossSectionGroupManager are ordered alphabetically by key. """ name = "xsGroups" _REPR_GROUP = "represented" _NON_REPR_GROUP = "non-represented" _PREGEN_GROUP = "pre-generated" def __init__(self, r, cs): interfaces.Interface.__init__(self, r, cs) self._buGroupBounds = [] self._tempGroupBounds = [] self.representativeBlocks = collections.OrderedDict() self.avgNucTemperatures = {} # this turns off updates for when core changes are made, but dont want to re-evaluate XS # for example if lattice physics was only once per cycle we might not want to re-evaluate groups self._envGroupUpdatesEnabled = True self._setBuGroupBounds(self.cs["buGroups"]) self._setTempGroupBounds(self.cs["tempGroups"]) self._unrepresentedXSIDs = [] def interactBOL(self): """Called at the Beginning-of-Life of a run, before any cycles start. .. impl:: The lattice physics interface and cross-section group manager are connected at BOL. :id: I_ARMI_XSGM_FREQ0 :implements: R_ARMI_XSGM_FREQ This method sets the cross-section block averaging method and and logic for whether all blocks in a cross section group should be used when generating a representative block. Furthermore, if the control logic for lattice physics frequency updates is set at beginning-of-life (`BOL`) through the :py:class:`LatticePhysicsInterface <armi.physics.neutronics.latticePhysics>`, the cross-section group manager will construct representative blocks for each cross-section IDs at the beginning of the reactor state. """ # now that all cs settings are loaded, apply defaults to compound XS settings from armi.physics.neutronics.settings import ( CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION, CONF_LATTICE_PHYSICS_FREQUENCY, CONF_XS_BLOCK_REPRESENTATION, ) self.cs[CONF_CROSS_SECTION].setDefaults( self.cs[CONF_XS_BLOCK_REPRESENTATION], self.cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION], ) self._latticePhysicsFrequency = LatticePhysicsFrequency[self.cs[CONF_LATTICE_PHYSICS_FREQUENCY]] if self._latticePhysicsFrequency == LatticePhysicsFrequency.BOL: self.createRepresentativeBlocks() def interactBOC(self, cycle=None): """ Update representative blocks and block burnup groups. .. impl:: The lattice physics interface and cross-section group manager are connected at BOC. :id: I_ARMI_XSGM_FREQ1 :implements: R_ARMI_XSGM_FREQ This method updates representative blocks and block burnups at the beginning-of-cycle for each cross-section ID if the control logic for lattice physics frequency updates is set at beginning-of-cycle (`BOC`) through the :py:class:`LatticePhysicsInterface <armi.physics.neutronics.latticePhysics>`. At the beginning-of-cycle, the cross-section group manager will construct representative blocks for each cross-section IDs for the current reactor state. Notes ----- The block list each each block collection cannot be emptied since it is used to derive nuclide temperatures. """ if self._latticePhysicsFrequency == LatticePhysicsFrequency.BOC: self.createRepresentativeBlocks() def interactEOC(self, cycle=None): """EOC interaction. Clear out big dictionary of all blocks to avoid memory issues and out-of-date representers. """ self.clearRepresentativeBlocks() def interactEveryNode(self, cycle=None, tn=None): """Interaction at every time node. .. impl:: The lattice physics interface and cross-section group manager are connected at every time node. :id: I_ARMI_XSGM_FREQ2 :implements: R_ARMI_XSGM_FREQ This method updates representative blocks and block burnups at every node for each cross-section ID if the control logic for lattices physics frequency updates is set for every node (`everyNode`) through the :py:class:`LatticePhysicsInterface <armi.physics.neutronics.latticePhysics>`. At every node, the cross-section group manager will construct representative blocks for each cross-section ID in the current reactor state. """ if self._latticePhysicsFrequency >= LatticePhysicsFrequency.everyNode: self.createRepresentativeBlocks() def interactCoupled(self, iteration): """Update cross-section groups on each physics coupling iteration to get latest temperatures. .. impl:: The lattice physics interface and cross-section group manager are connected during coupling. :id: I_ARMI_XSGM_FREQ3 :implements: R_ARMI_XSGM_FREQ This method updates representative blocks and block burnups at every node and the first coupled iteration for each cross-section ID if the control logic for lattices physics frequency updates is set for the first coupled iteration (``firstCoupledIteration``) through the :py:class:`LatticePhysicsInterface <armi.physics.neutronics.latticePhysics>`. The cross-section group manager will construct representative blocks for each cross-section ID at the first iteration of every time node. Notes ----- Updating the cross-section on only the first (i.e., iteration == 0) timenode can be a reasonable approximation to get new cross sections with some temperature updates but not have to run lattice physics on each coupled iteration. If the user desires to have the cross sections updated with every coupling iteration, the ``latticePhysicsFrequency: all`` option. See Also -------- :py:meth:`~armi.physics.neutronics.latticePhysics.latticePhysics.LatticePhysicsInterface.interactCoupled` """ if ( iteration == 0 and self._latticePhysicsFrequency == LatticePhysicsFrequency.firstCoupledIteration ) or self._latticePhysicsFrequency == LatticePhysicsFrequency.all: self.createRepresentativeBlocks() def clearRepresentativeBlocks(self): """Clear the representative blocks.""" runLog.extra("Clearing representative blocks") self.representativeBlocks = collections.OrderedDict() self.avgNucTemperatures = {} def _setBuGroupBounds(self, buGroupBounds): """ Set the burnup group structure. Parameters ---------- buGroupBounds : list List of upper burnup values in percent. Raises ------ ValueError If the provided burnup groups are invalid """ lastBu = 0.0 # validate structure for upperBu in buGroupBounds: if upperBu <= 0 or upperBu > 100: raise ValueError("Burnup group upper bound {0} is invalid".format(upperBu)) if upperBu < lastBu: raise ValueError("Burnup groups must be ascending") lastBu = upperBu self._buGroupBounds = buGroupBounds + [float("inf")] def _setTempGroupBounds(self, tempGroupBounds): """Set the temperature group structure.""" lastTemp = -C_TO_K # validate structure for upperTemp in tempGroupBounds: if upperTemp < -C_TO_K: raise ValueError("Temperature boundary is below absolute zero {0}.format(upperTemp)") if upperTemp < lastTemp: raise ValueError("Temp groups must be ascending") lastTemp = upperTemp self._tempGroupBounds = tempGroupBounds + [float("inf")] def _updateEnvironmentGroups(self, blockList): """ Update the burnup group of each block based on its burnup and temperature . If only one burnup group exists, then this is skipped so as to accommodate the possibility of 2-character xsGroup values (useful for detailed V&V models w/o depletion). See Also -------- armi.reactor.blocks.Block.getMicroSuffix """ if not self._envGroupUpdatesEnabled: runLog.debug("Skipping burnup group update of {0} blocks because it is disabled".format(len(blockList))) return numBuGroups = len(self._buGroupBounds) if numBuGroups == 1 and len(self._tempGroupBounds) == 1: # dont set block.p.envGroupNum since all 1 group and we want to support 2 char xsGroup return runLog.debug("Updating env groups of {0} blocks".format(len(blockList))) for block in blockList: bu = block.p.percentBu for buIndex, upperBu in enumerate(self._buGroupBounds): if bu <= upperBu: buGroupVal = buIndex tempGroupVal = 0 isotope = self._initializeXsID(block.getMicroSuffix()).xsTempIsotope if isotope and len(self._tempGroupBounds) > 1: # if statement saves this somewhat expensive calc if we are not doing temp groups tempC = getBlockNuclideTemperature(block, isotope) for tempIndex, upperTemp in enumerate(self._tempGroupBounds): if tempC <= upperTemp: tempGroupVal = tempIndex break # this ordering groups like-temperatures together in group number block.p.envGroupNum = tempGroupVal * numBuGroups + buGroupVal break def _addXsGroupsFromBlocks(self, blockCollectionsByXsGroup, blockList): """ Build all the cross section groups based on their XS type and Env group. Also ensures that their Env group is up to date with their environment. """ self._updateEnvironmentGroups(blockList) for b in blockList: xsID = b.getMicroSuffix() xsSettings = self._initializeXsID(xsID) if self.cs["tempGroups"] and xsSettings.blockRepresentation == MEDIAN_BLOCK_COLLECTION: runLog.warning( "Median block currently only consider median burnup block, and " "not median temperature block in group" ) blockCollectionType = blockCollectionFactory(xsSettings, self.r.blueprints.allNuclidesInProblem) group = blockCollectionsByXsGroup.get(xsID, blockCollectionType) group.append(b) blockCollectionsByXsGroup[xsID] = group return blockCollectionsByXsGroup def _initializeXsID(self, xsID): """Initialize a new xs id.""" if xsID not in self.cs[CONF_CROSS_SECTION]: runLog.debug("Initializing XS ID {}".format(xsID), single=True) return self.cs[CONF_CROSS_SECTION][xsID] def xsTypeIsPregenerated(self, xsID): """Return True if the cross sections for the given ``xsID`` is pre-generated.""" return self.cs[CONF_CROSS_SECTION][xsID].xsIsPregenerated def fluxSolutionIsPregenerated(self, xsID): """Return True if an external flux solution file for the given ``xsID`` is pre-generated.""" return self.cs[CONF_CROSS_SECTION][xsID].fluxIsPregenerated def _copyPregeneratedXSFile(self, xsID): # stop a race condition to copy files between all processors if context.MPI_RANK != 0: return for xsFileLocation, xsFileName in self._getPregeneratedXsFileLocationData(xsID): dest = os.path.join(os.getcwd(), xsFileName) runLog.extra( "Copying pre-generated XS file {} from {} for XS ID {}".format( xsFileName, os.path.dirname(xsFileLocation), xsID ) ) # Prevent copy error if the path and destination are the same. if xsFileLocation != dest: safeCopy(xsFileLocation, dest) def _copyPregeneratedFluxSolutionFile(self, xsID): # stop a race condition to copy files between all processors if context.MPI_RANK != 0: return fluxFileLocation, fluxFileName = self._getPregeneratedFluxFileLocationData(xsID) dest = os.path.join(os.getcwd(), fluxFileName) runLog.extra( "Copying pre-generated flux solution file {} from {} for XS ID {}".format( fluxFileName, os.path.dirname(fluxFileLocation), xsID ) ) # Prevent copy error if the path and destination are the same. if fluxFileLocation != dest: safeCopy(fluxFileLocation, dest) def _getPregeneratedXsFileLocationData(self, xsID): """ Gather the pre-generated cross section file data and check that the files exist. Notes ----- Multiple files can exist on the `file location` setting for a single XS ID. This checks that all files exist and returns a list of tuples (file path, fileName). """ fileData = [] filePaths = self.cs[CONF_CROSS_SECTION][xsID].xsFileLocation for filePath in filePaths: filePath = os.path.abspath(filePath) if not os.path.exists(filePath) or os.path.isdir(filePath): raise ValueError( "External cross section path for XS ID {} is not a valid file location {}".format(xsID, filePath) ) fileName = os.path.basename(filePath) fileData.append((filePath, fileName)) return fileData def _getPregeneratedFluxFileLocationData(self, xsID): """Gather the pre-generated flux solution file data and check that the files exist.""" filePath = self.cs[CONF_CROSS_SECTION][xsID].fluxFileLocation filePath = os.path.abspath(filePath) if not os.path.exists(filePath) or os.path.isdir(filePath): raise ValueError( "External cross section path for XS ID {} is not a valid file location {}".format(xsID, filePath) ) fileName = os.path.basename(filePath) return (filePath, fileName) def createRepresentativeBlocks(self): """Get a representative block from each cross-section ID managed here. .. impl:: Create collections of blocks based on cross-section type and burn-up group. :id: I_ARMI_XSGM_CREATE_XS_GROUPS :implements: R_ARMI_XSGM_CREATE_XS_GROUPS This method constructs the representative blocks and block burnups for each cross-section ID in the reactor model. Starting with the making of cross-section groups, it will find candidate blocks and create representative blocks from that selection. """ representativeBlocks = {} self.avgNucTemperatures = {} runLog.extra("Generating representative blocks for XS") blockCollectionsByXsGroup = self.makeCrossSectionGroups() for xsID, collection in blockCollectionsByXsGroup.items(): numCandidateBlocks = len(collection.getCandidateBlocks()) if self.xsTypeIsPregenerated(xsID): self._copyPregeneratedXSFile(xsID) continue if numCandidateBlocks > 0: runLog.debug("Creating representative block for {}".format(xsID)) if self.fluxSolutionIsPregenerated(xsID): self._copyPregeneratedFluxSolutionFile(xsID) reprBlock = collection.createRepresentativeBlock() representativeBlocks[xsID] = reprBlock self.avgNucTemperatures[xsID] = collection.avgNucTemperatures self.representativeBlocks = collections.OrderedDict(sorted(representativeBlocks.items())) self._checkForUnrepresentedXSIDs(blockCollectionsByXsGroup) self._modifyUnrepresentedXSIDs(blockCollectionsByXsGroup) self._summarizeGroups(blockCollectionsByXsGroup) def createRepresentativeBlocksUsingExistingBlocks(self, blockList, originalRepresentativeBlocks): """ Create a new set of representative blocks using provided blocks. This uses an input list of blocks and creates new representative blocks for these blocks based on the compositions and temperatures of their original representative blocks. Notes ----- This is required for computing Doppler, Voided-Doppler, Temperature, and Voided-Temperature reactivity coefficients, where the composition of the representative block must remain the same, but only the temperatures within the representative blocks are to be modified. Parameters ---------- blockList : list A list of blocks defined within the core originalRepresentativeBlocks : dict A dict of unperturbed representative blocks that the new representative blocks are formed from keys: XS group ID (e.g., "AA") values: representative block for the XS group Returns ------- blockCollectionByXsGroup : dict Mapping between XS IDs and the new block collections modifiedReprBlocks : dict Mapping between XS IDs and the new representative blocks origXSIDsFromNew : dict Mapping of original XS IDs to new XS IDs. New XS IDs are created to represent a modified state (e.g., a Doppler temperature perturbation). Raises ------ ValueError If passed list arguments are empty """ if not blockList: raise ValueError("A block list was not supplied to create new representative blocks") if not originalRepresentativeBlocks: raise ValueError( "New representative blocks cannot be created because a list of unperturbed " "representative blocks was not provided" ) newBlockCollectionsByXsGroup = collections.OrderedDict() blockCollectionByXsGroup = self.makeCrossSectionGroups() modifiedReprBlocks, origXSIDsFromNew = self._getModifiedReprBlocks(blockList, originalRepresentativeBlocks) if not modifiedReprBlocks: return None for newXSID in modifiedReprBlocks: oldXSID = origXSIDsFromNew[newXSID] oldBlockCollection = blockCollectionByXsGroup[oldXSID] # create a new block collection that inherits all of the properties # and settings from oldBlockCollection. validBlockTypes = oldBlockCollection._validRepresentativeBlockTypes if validBlockTypes is not None and len(validBlockTypes) > 0: validBlockTypes = [ flags._toString(Flags, flag) for flag in oldBlockCollection._validRepresentativeBlockTypes ] newBlockCollection = oldBlockCollection.__class__( oldBlockCollection.allNuclidesInProblem, validBlockTypes=validBlockTypes, averageByComponent=oldBlockCollection.averageByComponent, ) newBlockCollectionsByXsGroup[newXSID] = newBlockCollection # clean up any unrepresented XS IDs self._checkForUnrepresentedXSIDs(blockCollectionByXsGroup) self._modifyUnrepresentedXSIDs(blockCollectionByXsGroup) return newBlockCollectionsByXsGroup, modifiedReprBlocks, origXSIDsFromNew def _getModifiedReprBlocks(self, blockList, originalRepresentativeBlocks): """ Create a new representative block for each unique XS ID on blocks to be modified. Returns ------- modifiedReprBlocks : dict Mapping between the new XS IDs and the new representative blocks origXSIDsFromNew : dict Mapping between the new representative block XS IDs and the original representative block XS IDs """ modifiedBlockXSTypes = collections.OrderedDict() modifiedReprBlocks = collections.OrderedDict() origXSIDsFromNew = collections.OrderedDict() for b in blockList: origXSID = b.getMicroSuffix() # Filter out the pre-generated XS IDs if origXSID not in originalRepresentativeBlocks: if self.xsTypeIsPregenerated(origXSID): runLog.warning( "A modified representative block for XS ID `{}` cannot be created because it is " "mapped to a pre-generated cross section set. Please ensure that this " "approximation is valid for the analysis.".format(origXSID), single=True, ) else: origXSType = origXSID[0] if origXSType not in modifiedBlockXSTypes.keys(): nextXSType = self.getNextAvailableXsTypes(excludedXSTypes=modifiedBlockXSTypes.values())[0] modifiedBlockXSTypes[origXSType] = nextXSType newXSID = modifiedBlockXSTypes[origXSType] + origXSID[1] # New XS Type + Old Burnup Group origXSIDsFromNew[newXSID] = origXSID # Create new representative blocks based on the original XS IDs for newXSID, origXSID in origXSIDsFromNew.items(): runLog.extra( "Creating representative block `{}` with composition from representative block `{}`".format( newXSID, origXSID ) ) newXSType = newXSID[0] newReprBlock = copy.deepcopy(originalRepresentativeBlocks[origXSID]) newReprBlock.p.xsType = newXSType newReprBlock.name = "AVG_{}".format(newXSID) modifiedReprBlocks[newXSID] = newReprBlock # Update the XS types of the blocks that will be modified for b in blockList: if b.getMicroSuffix() == origXSID: b.p.xsType = newXSType # copy XS settings to new XS ID self.cs[CONF_CROSS_SECTION][newXSID] = copy.deepcopy(self.cs[CONF_CROSS_SECTION][origXSID]) self.cs[CONF_CROSS_SECTION][newXSID].xsID = newXSID return modifiedReprBlocks, origXSIDsFromNew def _checkForUnrepresentedXSIDs(self, blockCollectionsByXsGroup): """ Check for unrepresented XS IDs after self._updateEnvironmentGroups() has been called. Parameters ---------- blockCollectionsByXsGroup: dict[str, BlockCollection] Dict of BlockCollection keyed by the XS group they belong to. Notes ----- This should be run after :meth:`CrossSectionGroupManager._updateEnvironmentGroups`, which resets ``b.p.envGroup`` and can result in unrepresented cross section IDs. This is usually invoked as a result of a call to :meth:`CrossSectionGroupManager.makeCrossSectionGroups` """ self._unrepresentedXSIDs = [] for xsID, collection in blockCollectionsByXsGroup.items(): if self.xsTypeIsPregenerated(xsID) or len(collection.getCandidateBlocks()) > 0: continue else: runLog.debug( "No candidate blocks in group for {} (with a valid representative block flag). " "Will apply different environment group".format(xsID) ) self._unrepresentedXSIDs.append(xsID) def getNextAvailableXsTypes(self, howMany=1, excludedXSTypes=None): """Return the next however many available xs types. Parameters ---------- howMany : int, optional The number of requested xs types excludedXSTypes : list, optional A list of cross section types to exclude from using Raises ------ ValueError If there are no available XS types to be allocated """ allocatedXSTypes = set() for b in self.r.core.getBlocks(includeAll=True): allocatedXSTypes.add(b.p.xsType) if excludedXSTypes is not None: for xsType in excludedXSTypes: allocatedXSTypes.add(xsType) availableXsTypes = sorted(list(set(_ALLOWABLE_XS_TYPE_LIST).difference(allocatedXSTypes))) if len(availableXsTypes) < howMany: raise ValueError( "There are not enough available xs types. {} have been allocated, {} are available, and " "{} have been requested.".format(len(allocatedXSTypes), len(availableXsTypes), howMany) ) # check for lower-case on case-insensitive file system if sys.platform.startswith("win"): allXSTypes = allocatedXSTypes.union(set(availableXsTypes[:howMany])) allCaps = {c.capitalize() for c in allXSTypes} if len(allCaps) != len(allXSTypes): runLog.warning( "Mixing upper and lower-case XS group types on a Windows system, which is not " "case-sensitive. There is a chance that ARMI could overwrite previously " "generated XS files, which could cause mysterious and/or unpredictable errors." ) return availableXsTypes[:howMany] def _getMissingBlueprintBlocks(self, blockCollectionsByXsGroup): """ Gets all blocks with suffixes not yet represented. (for blocks in assemblies in the blueprints but not in the core). Notes ----- Certain cases (ZPPR validation cases) need to run cross sections for assemblies not in the core to get by region cross sections and flux factors. """ missingBlueprintBlocks = [] blockList = [] for a in self.r.blueprints.assemblies.values(): blockList.extend(b for b in a) self._updateEnvironmentGroups(blockList) for b in blockList: if b.getMicroSuffix() not in blockCollectionsByXsGroup: b2 = copy.deepcopy(b) missingBlueprintBlocks.append(b2) return missingBlueprintBlocks def makeCrossSectionGroups(self): """Make cross section groups for all blocks in reactor and unrepresented blocks from blueprints.""" bCollectXSGroup = {} # clear old groups (in case some are no longer existent) bCollectXSGroup = self._addXsGroupsFromBlocks(bCollectXSGroup, self.r.core.getBlocks()) # add blocks that are defined in blueprints, but not in core bCollectXSGroup = self._addXsGroupsFromBlocks(bCollectXSGroup, self._getMissingBlueprintBlocks(bCollectXSGroup)) blockCollectionsByXsGroup = collections.OrderedDict(sorted(bCollectXSGroup.items())) return blockCollectionsByXsGroup def _getAlternateEnvGroup(self, missingXsType): """Get a substitute block to use since there are no blocks with flags for xs gen.""" for otherXsID in self.representativeBlocks: repType, repEnvGroup = otherXsID if repType == missingXsType: return repEnvGroup def _modifyUnrepresentedXSIDs(self, blockCollectionsByXsGroup): """ Adjust the xsID of blocks in the groups that are not represented. Try to just adjust the burnup group up to something that is represented (can happen to structure in AA when only AB, AC, AD still remain, but if some fresh AA happened to be added it might be needed). """ # No blocks in in this ID had a valid representative block flag (such as `fuel` for default), # so nothing valid to run lattice physics on... for xsID in self._unrepresentedXSIDs: missingXsType, _missingEnvGroup = xsID nonRepBlocks = blockCollectionsByXsGroup.get(xsID) if nonRepBlocks: newEnvGroup = self._getAlternateEnvGroup(missingXsType) if newEnvGroup: # there were no blocks flagged to xs gen even though there were some not suitable for # generation in the group so can't make XS and use different. runLog.warning( "Changing XSID of {0} blocks from {1} to {2}".format( len(nonRepBlocks), xsID, missingXsType[0] + newEnvGroup ) ) for b in nonRepBlocks: b.p.envGroup = newEnvGroup else: runLog.warning( "No representative blocks with XS type {0} exist in the core. " "There were also no similar blocks to use. " "These XS cannot be generated and must exist in the working " "directory or the run will fail.".format(xsID) ) def _summarizeGroups(self, blockCollectionsByXsGroup): """Summarize current contents of the XS groups.""" from armi.physics.neutronics.settings import CONF_XS_BLOCK_REPRESENTATION runLog.extra("Cross section group manager summary") runLog.extra("Averaging performed by `{0}`".format(self.cs[CONF_XS_BLOCK_REPRESENTATION])) for xsID, blocks in blockCollectionsByXsGroup.items(): if blocks: xsIDGroup = self._getXsIDGroup(xsID) if xsIDGroup == self._REPR_GROUP: reprBlock = self.representativeBlocks.get(xsID) xsSettings = self._initializeXsID(reprBlock.getMicroSuffix()) temp = self.avgNucTemperatures[xsID].get(xsSettings.xsTempIsotope, "N/A") runLog.extra( ( "XS ID {} contains {:4d} blocks, with avg burnup {} " "and avg fuel temp {}, represented by: {:65s}" ).format( xsID, len(blocks), reprBlock.p.percentBu, temp, reprBlock, ) ) elif xsIDGroup == self._NON_REPR_GROUP: runLog.extra( "XS ID {} contains {:4d} blocks, but no representative block.".format(xsID, len(blocks)) ) elif xsIDGroup == self._PREGEN_GROUP: xsFileNames = [y for _x, y in self._getPregeneratedXsFileLocationData(xsID)] runLog.extra( "XS ID {} contains {:4d} blocks, represented by: {}".format(xsID, len(blocks), xsFileNames) ) else: raise ValueError("No valid group for XS ID {}".format(xsID)) def _getXsIDGroup(self, xsID): if self.xsTypeIsPregenerated(xsID): return self._PREGEN_GROUP elif xsID in self.representativeBlocks.keys(): return self._REPR_GROUP elif xsID in self._unrepresentedXSIDs: return self._NON_REPR_GROUP return None def disableEnvGroupUpdates(self): """ Turn off updating Env groups based on environment. Useful during reactivity coefficient calculations to be consistent with ref. run. See Also -------- enableEnvGroupUpdates """ runLog.extra("Environment xs group updating disabled") wasEnabled = self._envGroupUpdatesEnabled self._envGroupUpdatesEnabled = False return wasEnabled def enableEnvGroupUpdates(self): """ Turn on updating Env groups based on environment. See Also -------- disableEnvGroupUpdates """ runLog.extra("Environment xs group updating enabled") self._envGroupUpdatesEnabled = True def getNucTemperature(self, xsID, nucName): """ Return the temperature (in C) of the nuclide in the group with specified xsID. Notes ----- Returns None if the xsID or nucName are not in the average nuclide temperature dictionary `self.avgNucTemperatures` """ if xsID not in self.avgNucTemperatures: return None return self.avgNucTemperatures[xsID].get(nucName, None) def updateNuclideTemperatures(self, blockCollectionByXsGroup=None): """ Recompute nuclide temperatures for the block collections within the core. Parameters ---------- blockCollectionByXsGroup : dict, optional Mapping between the XS IDs in the core and the block collections. Note that providing this as an argument will only update the average temperatures of these XS IDs/block collections and will result in other XS ID average temperatures not included to be discarded. Notes ----- This method does not update any properties of the representative blocks. Temperatures are obtained from the BlockCollection class rather than the representative block. """ self.avgNucTemperatures = {} blockCollectionsByXsGroup = blockCollectionByXsGroup or self.makeCrossSectionGroups() runLog.info( "Updating representative block average nuclide temperatures for the following XS IDs: {}".format( blockCollectionsByXsGroup.keys() ) ) for xsID, collection in blockCollectionsByXsGroup.items(): collection.calcAvgNuclideTemperatures() self.avgNucTemperatures[xsID] = collection.avgNucTemperatures runLog.extra("XS ID: {}, Collection: {}".format(xsID, collection)) # String constants MEDIAN_BLOCK_COLLECTION = "Median" AVERAGE_BLOCK_COLLECTION = "Average" FLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION = "FluxWeightedAverage" SLAB_COMPONENTS_BLOCK_COLLECTION = "ComponentAverage1DSlab" CYLINDRICAL_COMPONENTS_BLOCK_COLLECTION = "ComponentAverage1DCylinder" CYLINDRICAL_COMPONENTS_DUCT_HET_BLOCK_COLLECTION = "ComponentAverage1DCylinderDuctHeterogeneous" # Mapping between block collection string constants and their # respective block collection classes. BLOCK_COLLECTIONS = { MEDIAN_BLOCK_COLLECTION: MedianBlockCollection, AVERAGE_BLOCK_COLLECTION: AverageBlockCollection, FLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION: FluxWeightedAverageBlockCollection, SLAB_COMPONENTS_BLOCK_COLLECTION: SlabComponentsAverageBlockCollection, CYLINDRICAL_COMPONENTS_BLOCK_COLLECTION: CylindricalComponentsAverageBlockCollection, CYLINDRICAL_COMPONENTS_DUCT_HET_BLOCK_COLLECTION: CylindricalComponentsDuctHetAverageBlockCollection, } def blockCollectionFactory(xsSettings, allNuclidesInProblem): """Build a block collection based on user settings and input.""" blockRepresentation = xsSettings.blockRepresentation if (blockRepresentation == CYLINDRICAL_COMPONENTS_BLOCK_COLLECTION) and xsSettings.ductHeterogeneous: blockRepresentation = CYLINDRICAL_COMPONENTS_DUCT_HET_BLOCK_COLLECTION validBlockTypes = xsSettings.validBlockTypes averageByComponent = xsSettings.averageByComponent return BLOCK_COLLECTIONS[blockRepresentation]( allNuclidesInProblem, validBlockTypes=validBlockTypes, averageByComponent=averageByComponent, ) ================================================ FILE: armi/physics/neutronics/crossSectionSettings.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The data structures and schema of the cross section modeling options. These are advanced/compound settings that are carried along in the normal cs object but aren't simple key/value pairs. The cs object could either hold the base data (dicts) and create instances of these data structure objects as needed, or the settings system could actually hold instances of these data structures. It is most convenient to let the cs object hold actual instances of these data. See detailed docs in `:doc: Lattice Physics <reference/physics/neutronics/latticePhysics/latticePhysics>`. """ from enum import Enum from typing import Dict, Union import voluptuous as vol from armi import context, runLog from armi.physics.neutronics import crossSectionGroupManager from armi.physics.neutronics.crossSectionGroupManager import BLOCK_COLLECTIONS from armi.settings import Setting CONF_BLOCK_REPRESENTATION = "blockRepresentation" CONF_MEMORY_REQUIREMENT = "requiredRAM" CONF_BLOCKTYPES = "validBlockTypes" CONF_BUCKLING = "criticalBuckling" CONF_DRIVER = "driverID" CONF_EXTERNAL_DRIVER = "externalDriver" CONF_EXTERNAL_RINGS = "numExternalRings" CONF_XS_FILE_LOCATION = "xsFileLocation" CONF_EXTERNAL_FLUX_FILE_LOCATION = "fluxFileLocation" CONF_GEOM = "geometry" CONF_HOMOGBLOCK = "useHomogenizedBlockComposition" CONF_INTERNAL_RINGS = "numInternalRings" CONF_MERGE_INTO_CLAD = "mergeIntoClad" CONF_MERGE_INTO_FUEL = "mergeIntoFuel" CONF_MESH_PER_CM = "meshSubdivisionsPerCm" CONF_REACTION_DRIVER = "nuclideReactionDriver" CONF_XSID = "xsID" CONF_XS_EXECUTE_EXCLUSIVE = "xsExecuteExclusive" CONF_XS_PRIORITY = "xsPriority" CONF_COMPONENT_AVERAGING = "averageByComponent" CONF_XS_MAX_ATOM_NUMBER = "xsMaxAtomNumber" CONF_MIN_DRIVER_DENSITY = "minDriverDensity" CONF_DUCT_HETEROGENEOUS = "ductHeterogeneous" CONF_TRACE_ISOTOPE_THRESHOLD = "traceIsotopeThreshold" CONF_XS_TEMP_ISOTOPE = "xsTempIsotope" class XSGeometryTypes(Enum): """ Data structure for storing the available geometry options within the framework. """ ZERO_DIMENSIONAL = 1 ONE_DIMENSIONAL_SLAB = 2 ONE_DIMENSIONAL_CYLINDER = 4 TWO_DIMENSIONAL_HEX = 8 @classmethod def _mapping(cls): mapping = { cls.ZERO_DIMENSIONAL: "0D", cls.ONE_DIMENSIONAL_SLAB: "1D slab", cls.ONE_DIMENSIONAL_CYLINDER: "1D cylinder", cls.TWO_DIMENSIONAL_HEX: "2D hex", } return mapping @classmethod def getStr(cls, typeSpec: Enum): """ Return a string representation of the given ``typeSpec``. Examples -------- XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL) == "0D" XSGeometryTypes.getStr(XSGeometryTypes.TWO_DIMENSIONAL_HEX) == "2D hex" """ geometryTypes = list(cls) if typeSpec not in geometryTypes: raise TypeError(f"{typeSpec} not in {geometryTypes}") return cls._mapping()[cls[typeSpec.name]] XS_GEOM_TYPES = { XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL), XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_SLAB), XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_CYLINDER), XSGeometryTypes.getStr(XSGeometryTypes.TWO_DIMENSIONAL_HEX), } # This dictionary defines the valid set of inputs based on # the geometry type within the ``XSModelingOptions`` _VALID_INPUTS_BY_GEOMETRY_TYPE = { XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL): { CONF_XSID, CONF_GEOM, CONF_BUCKLING, CONF_DRIVER, CONF_BLOCKTYPES, CONF_BLOCK_REPRESENTATION, CONF_EXTERNAL_FLUX_FILE_LOCATION, CONF_COMPONENT_AVERAGING, CONF_XS_EXECUTE_EXCLUSIVE, CONF_XS_PRIORITY, CONF_XS_MAX_ATOM_NUMBER, CONF_XS_TEMP_ISOTOPE, }, XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_SLAB): { CONF_XSID, CONF_GEOM, CONF_MESH_PER_CM, CONF_BLOCKTYPES, CONF_BLOCK_REPRESENTATION, CONF_EXTERNAL_FLUX_FILE_LOCATION, CONF_COMPONENT_AVERAGING, CONF_XS_EXECUTE_EXCLUSIVE, CONF_XS_PRIORITY, CONF_XS_MAX_ATOM_NUMBER, CONF_MIN_DRIVER_DENSITY, CONF_XS_TEMP_ISOTOPE, }, XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_CYLINDER): { CONF_XSID, CONF_GEOM, CONF_MERGE_INTO_CLAD, CONF_MERGE_INTO_FUEL, CONF_DRIVER, CONF_HOMOGBLOCK, CONF_INTERNAL_RINGS, CONF_EXTERNAL_RINGS, CONF_MESH_PER_CM, CONF_BLOCKTYPES, CONF_BLOCK_REPRESENTATION, CONF_EXTERNAL_FLUX_FILE_LOCATION, CONF_COMPONENT_AVERAGING, CONF_XS_EXECUTE_EXCLUSIVE, CONF_XS_PRIORITY, CONF_XS_MAX_ATOM_NUMBER, CONF_MIN_DRIVER_DENSITY, CONF_DUCT_HETEROGENEOUS, CONF_TRACE_ISOTOPE_THRESHOLD, CONF_XS_TEMP_ISOTOPE, }, XSGeometryTypes.getStr(XSGeometryTypes.TWO_DIMENSIONAL_HEX): { CONF_XSID, CONF_GEOM, CONF_BUCKLING, CONF_EXTERNAL_DRIVER, CONF_DRIVER, CONF_REACTION_DRIVER, CONF_EXTERNAL_RINGS, CONF_BLOCK_REPRESENTATION, CONF_EXTERNAL_FLUX_FILE_LOCATION, CONF_COMPONENT_AVERAGING, CONF_XS_EXECUTE_EXCLUSIVE, CONF_XS_PRIORITY, CONF_XS_MAX_ATOM_NUMBER, CONF_MIN_DRIVER_DENSITY, CONF_XS_TEMP_ISOTOPE, }, } _SINGLE_XS_SCHEMA = vol.Schema( { vol.Optional(CONF_GEOM): vol.All(str, vol.In(XS_GEOM_TYPES)), vol.Optional(CONF_BLOCK_REPRESENTATION): vol.All( str, vol.In( set(BLOCK_COLLECTIONS.keys()), ), ), vol.Optional(CONF_DRIVER): str, vol.Optional(CONF_BUCKLING): bool, vol.Optional(CONF_REACTION_DRIVER): str, vol.Optional(CONF_BLOCKTYPES): [str], vol.Optional(CONF_HOMOGBLOCK): bool, vol.Optional(CONF_EXTERNAL_DRIVER): bool, vol.Optional(CONF_INTERNAL_RINGS): vol.Coerce(int), vol.Optional(CONF_EXTERNAL_RINGS): vol.Coerce(int), vol.Optional(CONF_MERGE_INTO_CLAD): [str], vol.Optional(CONF_MERGE_INTO_FUEL): [str], vol.Optional(CONF_XS_FILE_LOCATION): [str], vol.Optional(CONF_EXTERNAL_FLUX_FILE_LOCATION): str, vol.Optional(CONF_MESH_PER_CM): vol.Coerce(float), vol.Optional(CONF_XS_EXECUTE_EXCLUSIVE): bool, vol.Optional(CONF_XS_PRIORITY): vol.Coerce(float), vol.Optional(CONF_XS_MAX_ATOM_NUMBER): vol.Coerce(int), vol.Optional(CONF_MIN_DRIVER_DENSITY): vol.Coerce(float), vol.Optional(CONF_COMPONENT_AVERAGING): bool, vol.Optional(CONF_DUCT_HETEROGENEOUS): bool, vol.Optional(CONF_TRACE_ISOTOPE_THRESHOLD): vol.Coerce(float), vol.Optional(CONF_XS_TEMP_ISOTOPE): str, vol.Optional(CONF_MEMORY_REQUIREMENT): vol.Coerce(float), } ) _XS_SCHEMA = vol.Schema({vol.All(str, vol.Length(min=1, max=2)): _SINGLE_XS_SCHEMA}) class XSSettings(dict): """ Container for holding multiple cross section settings based on their XSID. This is intended to be stored as part of a case settings and to be used for cross section modeling within a run. Notes ----- This is a specialized dictionary that functions in a similar manner as a defaultdict where if a key (i.e., XSID) is missing then a default will be set. If a missing key is being added before the ``setDefaults`` method is called then this will produce an error. This cannot just be a defaultdict because the creation of new cross section settings are dependent on user settings. """ def __init__(self, *args, **kwargs): dict.__init__(self, *args, **kwargs) self._blockRepresentation = None self._validBlockTypes = None def __repr__(self): return f"<{self.__class__.__name__} with XS IDs {self.keys()}>" def __getitem__(self, xsID): """ Return the stored settings of the same xs type and the lowest burnup group if they exist. Notes ----- 1. If ``AA`` and ``AB`` exist, but ``AC`` is created, then the intended behavior is that ``AC`` settings will be set to the settings in ``AA``. 2. If only ``YZ`` exists and ``YA`` is created, then the intended behavior is that ``YA`` settings will NOT be set to the settings in ``YZ`` 3. Requirements for using the existing cross section settings: a. The existing XS ID must match the current XS ID. b. The current xs burnup group must be larger than the lowest burnup group for the existing XS ID c. If 3a. and 3b. are not met, then the default cross section settings will be set for the current XS ID """ if xsID in self: return dict.__getitem__(self, xsID) # exact key not present so give lowest env group key, eg AA or BA as the source for # settings since users do not typically provide all combinations of second chars explicitly xsType = xsID[0] envGroup = xsID[1] existingXsOpts = [xsOpt for xsOpt in self.values() if xsOpt.xsType == xsType and xsOpt.envGroup < envGroup] if not any(existingXsOpts): return self._getDefault(xsID) else: return sorted(existingXsOpts, key=lambda xsOpt: xsOpt.envGroup)[0] def setDefaults(self, blockRepresentation, validBlockTypes): """ Set defaults for current and future xsIDs based user settings. This must be delayed after read-time since the settings affecting this may not be loaded yet and could still be at their own defaults when this input is being processed. Thus, defaults are set at a later time. Parameters ---------- blockRepresentation : str Valid options are provided in ``CrossSectionGroupManager.BLOCK_COLLECTIONS`` validBlockTypes : list of str or bool This configures which blocks (by their type) the cross section group manager will merge together to create a representative block. If set to ``None`` or ``True`` then all block types in the XS ID will be considered. If set to ``False`` then a default of ["fuel"] will be used. If set to a list of strings then the specific list will be used. A typical input may be ["fuel"] to just consider the fuel blocks. See Also -------- armi.physics.neutronics.crossSectionGroupManager.CrossSectionGroupManager.interactBOL : calls this """ self._blockRepresentation = blockRepresentation self._validBlockTypes = validBlockTypes for _xsId, xsOpt in self.items(): xsOpt.setDefaults(blockRepresentation, validBlockTypes) xsOpt.validate() def _getDefault(self, xsID): """ Process the optional ``crossSectionControl`` setting. This input allows users to override global defaults for specific cross section IDs (xsID). To simplify downstream handling of the various XS controls, we build a full data structure here that should fully define the settings for each individual cross section ID. """ # Only check since the state of the underlying cross section dictionary does not # get broadcasted to worker nodes. This check is only relevant for the first time # this is called and when called by the head node. if context.MPI_RANK == 0: if self._blockRepresentation is None: raise ValueError( f"The defaults of {self} have not been set. Call ``setDefaults`` first " "before attempting to add a new XS ID." ) xsOpt = XSModelingOptions(xsID, geometry=XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL)) xsOpt.setDefaults(self._blockRepresentation, self._validBlockTypes) xsOpt.validate() return xsOpt class XSModelingOptions: """ Cross section modeling options for a particular XS ID. Attributes ---------- xsID : str Cross section ID that is two characters maximum (i.e., AA). geometry: str The geometry modeling approximation for regions of the core with this assigned xsID. This is required if the ``xsFileLocation`` attribute is not provided. This cannot be set if the ``xsFileLocation`` is provided. xsFileLocation: list of str or None This should be a list of paths where the cross sections for this xsID can be copied from. This is required if the ``geometry`` attribute is not provided. This cannot be set if the ``geometry`` is provided. fluxFileLocation: str or None This should be a path where a pre-calculated flux solution for this xsID can be copied from. The ``geometry`` attribute must be provided with this input. validBlockTypes: str or None This is a configuration option for how the cross section group manager determines which blocks/regions to manage as part of the same collection for the current xsID. If this is set to ``None`` then all blocks/regions with the current xsID will be considered. blockRepresentation : str This is a configuration option for how the cross section group manager will select how to create a representative block based on the collection within the same xsID. See: ``crossSectionGroupManager.BLOCK_COLLECTIONS``. driverID : str This is a lattice physics configuration option used to determine which representative block can be used as a "fixed source" driver for another composition. This is particularly useful for non-fuel or highly subcritical regions. criticalBuckling : bool This is a lattice physics configuration option used to enable or disable the critical buckling search option. nuclideReactionDriver : str This is a lattice physics configuration option that is similar to the ``driverID``, but rather than applying the source from a specific representative block, the neutron source is taken from a single nuclides fission spectrum (i.e., U235). This is particularly useful for configuring SERPENT 2 lattice physics calculations. externalDriver : bool This is a lattice physics configuration option that can be used to determine if the fixed source problem is internally driven or externally driven by the ``driverID`` region. Externally driven means that the region will be placed on the outside of the current xsID block/region. If this is False then the driver region will be "inside" (i.e., an inner ring in a cylindrical model). useHomogenizedBlockComposition : bool This is a lattice physics configuration option that is useful for modeling spatially dependent problems (i.e., 1D/2D). If this is True then the representative block for the current xsID will be be a homogenized region. If this is False then the block will be represented in the geometry type selected. This is mainly used for 1D cylindrical problems. numInternalRings : int This is a lattice physics configuration option that is used to specify the number of grid-based rings for the representative block. numExternalRings : int This is a lattice physics configuration option that is used to specify the number of grid-based rings for the driver block. mergeIntoClad : list of str This is a lattice physics configuration option that is a list of component names to merge into a "clad" component. This is highly-design specific and is sometimes used to merge a "gap" or low-density region into a "clad" region to avoid numerical issues. mergeIntoFuel : list of str This is a lattice physics configuration option that is a list of component names to merge into a "fuel" component. This is highly-design specific and is sometimes used to merge a "gap" or low-density region into a "fuel" region to avoid numerical issues. meshSubdivisionsPerCm : float This is a lattice physics configuration option that can be used to control subregion meshing of the representative block in 1D problems. xsExecuteExclusive : bool The mpi task that results from this xsID will reserve a full processor and no others will allocate to it. This is useful for time balancing when you have one task that takes much longer than the others. xsPriority: int The priority of the mpi tasks that results from this xsID. Lower priority will execute first. starting longer jobs first is generally more efficient. xsMaxAtomNumber : int The maximum atom number to model for infinite dilute isotopes in lattice physics. This is used to avoid modeling isotopes with a large atomic number (e.g., fission products) as a depletion product of an isotope with a much smaller atomic number. averageByComponent: bool Controls whether the representative block averaging is performed on a component-by-component basis or on the block as a whole. If True, the resulting representative block will have component compositions that largely reflect those of the underlying blocks in the collection. If False, the number densities of some nuclides in the individual components may not be reflective of those of the underlying components due to the block number density "dehomogenization". minDriverDensity: float The minimum number density for nuclides included in driver material for a 1D lattice physics model. ductHeterogeneous : bool This is a lattice physics configuration option used to enable a partially heterogeneous approximation for a 1D cylindrical model. Everything inside of the duct will be treated as homogeneous. traceIsotopeThreshold : float This is a lattice physics configuration option used to enable a separate 0D fuel cross section calculation for trace fission products when using a 1D cross section model. This can significantly reduce the memory and run time required for the 1D model. The setting takes a float value that represents the number density cutoff for isotopes to be considered "trace". If no value is provided, the default is 0.0. xsTempIsotope: str The isotope whose temperature is interrogated when placing a block in a temperature cross section group. See `tempGroups`. "U238" is default since it tends to be dominant doppler isotope in most reactors. requiredRAM: float The amount of available memory needed to run this cross section model. Notes ----- Not all default attributes may be useful for your specific application and you may require other types of configuration options. These are provided as examples since the base ``latticePhysicsInterface`` does not implement models that use these. For additional options, consider subclassing the base ``Setting`` object and using this model as a template. """ def __init__( self, xsID, geometry=None, xsFileLocation=None, fluxFileLocation=None, validBlockTypes=None, blockRepresentation=None, driverID=None, criticalBuckling=None, nuclideReactionDriver=None, externalDriver=None, useHomogenizedBlockComposition=None, numInternalRings=None, numExternalRings=None, mergeIntoClad=None, mergeIntoFuel=None, meshSubdivisionsPerCm=None, xsExecuteExclusive=None, xsPriority=None, xsMaxAtomNumber=None, averageByComponent=False, minDriverDensity=0.0, ductHeterogeneous=False, traceIsotopeThreshold=0.0, xsTempIsotope="U238", requiredRAM=0.0, ): self.xsID = xsID self.geometry = geometry self.xsFileLocation = xsFileLocation self.validBlockTypes = validBlockTypes self.blockRepresentation = blockRepresentation # These are application specific, feel free use them # in your own lattice physics plugin(s). self.fluxFileLocation = fluxFileLocation self.driverID = driverID self.criticalBuckling = criticalBuckling self.nuclideReactionDriver = nuclideReactionDriver self.externalDriver = externalDriver self.useHomogenizedBlockComposition = useHomogenizedBlockComposition self.numInternalRings = numInternalRings self.numExternalRings = numExternalRings self.mergeIntoClad = mergeIntoClad self.mergeIntoFuel = mergeIntoFuel self.meshSubdivisionsPerCm = meshSubdivisionsPerCm self.xsMaxAtomNumber = xsMaxAtomNumber self.minDriverDensity = minDriverDensity self.averageByComponent = averageByComponent self.ductHeterogeneous = ductHeterogeneous self.traceIsotopeThreshold = traceIsotopeThreshold # these are related to execution self.xsExecuteExclusive = xsExecuteExclusive self.xsPriority = xsPriority self.xsTempIsotope = xsTempIsotope self.requiredRAM = requiredRAM def __repr__(self): if self.xsIsPregenerated: suffix = f"Pregenerated: {self.xsIsPregenerated}" else: suffix = f"Geometry Model: {self.geometry}" if self.fluxIsPregenerated: suffix = f"{suffix}, External Flux Solution: {self.fluxFileLocation}" return f"<{self.__class__.__name__}, XSID: {self.xsID}, {suffix}>" def __iter__(self): return iter(self.__dict__.items()) @property def xsType(self): """Return the single-char cross section type indicator.""" return self.xsID[0] @property def envGroup(self): """Return the single-char burnup group indicator.""" return self.xsID[1] @property def xsIsPregenerated(self): """True if this points to a pre-generated XS file.""" return self.xsFileLocation is not None @property def fluxIsPregenerated(self): """True if this points to a pre-generated flux solution file.""" return self.fluxFileLocation is not None def serialize(self): """Return as a dictionary without ``CONF_XSID`` and with ``None`` values excluded.""" doNotSerialize = [CONF_XSID] return {key: val for key, val in self if key not in doNotSerialize and val is not None} def validate(self): """ Performs validation checks on the inputs and provides warnings for option inconsistencies. Raises ------ ValueError When the mutually exclusive ``xsFileLocation`` and ``geometry`` attributes are provided or when neither are provided. """ # Check for valid inputs when the file location is supplied. if self.xsFileLocation: if self.geometry is not None: runLog.warning( f"Either file location or geometry inputs in {self} should be given, but not both. " "The file location setting will take precedence over the geometry inputs. " "Remove one or the other in the `crossSectionSettings` input to fix this warning." ) if self.xsFileLocation is None or self.fluxFileLocation is not None: if self.geometry is None: raise ValueError(f"{self} is missing a geometry input or a file location.") invalids = [] if self.xsFileLocation is not None: for var, val in self: # Skip these attributes since they are valid options # when the ``xsFileLocation`` attribute`` is set. if var in [CONF_XSID, CONF_XS_FILE_LOCATION, CONF_BLOCK_REPRESENTATION]: continue if val is not None: invalids.append((var, val)) if invalids: runLog.debug(f"The following inputs in {self} are not valid when the file location is set:") for var, val in invalids: runLog.debug(f"\tAttribute: {var}, Value: {val}") # Check for valid inputs when the geometry is supplied. invalids = [] if self.geometry is not None: validOptions = _VALID_INPUTS_BY_GEOMETRY_TYPE[self.geometry] for var, val in self: if var not in validOptions and val is not None: invalids.append((var, val)) if invalids: runLog.debug(f"The following inputs in {self} are not valid when `{self.geometry}` geometry type is set:") for var, val in invalids: runLog.debug(f"\tAttribute: {var}, Value: {val}") runLog.debug(f"The valid options for the `{self.geometry}` geometry are: {validOptions}") def setDefaults(self, blockRepresentation, validBlockTypes): """ This sets the defaults based on some recommended values based on the geometry type. Parameters ---------- blockRepresentation : str Valid options are provided in ``CrossSectionGroupManager.BLOCK_COLLECTIONS`` validBlockTypes : list of str or bool This configures which blocks (by their type) the cross section group manager will merge together to create a representative block. If set to ``None`` or ``True`` then all block types in the XS ID will be considered. If set to ``False`` then a default of ["fuel"] will be used. If set to a list of strings then the specific list will be used. A typical input may be ["fuel"] to just consider the fuel blocks. Notes ----- These defaults are application-specific and design specific. They are included to provide an example and are tuned to fit the internal needs of TerraPower. Consider a separate implementation/subclass if you would like different behavior. """ if type(validBlockTypes) is bool: validBlockTypes = None if validBlockTypes else ["fuel"] else: validBlockTypes = validBlockTypes defaults = {} if self.xsIsPregenerated: allowableBlockCollections = [ crossSectionGroupManager.MEDIAN_BLOCK_COLLECTION, crossSectionGroupManager.AVERAGE_BLOCK_COLLECTION, crossSectionGroupManager.FLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION, ] defaults = { CONF_XS_FILE_LOCATION: self.xsFileLocation, CONF_BLOCK_REPRESENTATION: blockRepresentation, } elif self.geometry == XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL): allowableBlockCollections = [ crossSectionGroupManager.MEDIAN_BLOCK_COLLECTION, crossSectionGroupManager.AVERAGE_BLOCK_COLLECTION, crossSectionGroupManager.FLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION, ] bucklingSearch = not self.fluxIsPregenerated defaults = { CONF_GEOM: self.geometry, CONF_BUCKLING: bucklingSearch, CONF_DRIVER: "", CONF_BLOCK_REPRESENTATION: blockRepresentation, CONF_BLOCKTYPES: validBlockTypes, CONF_EXTERNAL_FLUX_FILE_LOCATION: self.fluxFileLocation, } elif self.geometry == XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_SLAB): allowableBlockCollections = [ crossSectionGroupManager.SLAB_COMPONENTS_BLOCK_COLLECTION, ] defaults = { CONF_GEOM: self.geometry, CONF_MESH_PER_CM: 1.0, CONF_BLOCK_REPRESENTATION: crossSectionGroupManager.SLAB_COMPONENTS_BLOCK_COLLECTION, CONF_BLOCKTYPES: validBlockTypes, } elif self.geometry == XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_CYLINDER): allowableBlockCollections = [crossSectionGroupManager.CYLINDRICAL_COMPONENTS_BLOCK_COLLECTION] defaults = { CONF_GEOM: self.geometry, CONF_DRIVER: "", CONF_MERGE_INTO_CLAD: ["gap"], CONF_MERGE_INTO_FUEL: [], CONF_MESH_PER_CM: 1.0, CONF_INTERNAL_RINGS: 0, CONF_EXTERNAL_RINGS: 1, CONF_HOMOGBLOCK: False, CONF_BLOCK_REPRESENTATION: crossSectionGroupManager.CYLINDRICAL_COMPONENTS_BLOCK_COLLECTION, CONF_BLOCKTYPES: validBlockTypes, CONF_DUCT_HETEROGENEOUS: False, CONF_TRACE_ISOTOPE_THRESHOLD: 0.0, } elif self.geometry == XSGeometryTypes.getStr(XSGeometryTypes.TWO_DIMENSIONAL_HEX): allowableBlockCollections = [ crossSectionGroupManager.MEDIAN_BLOCK_COLLECTION, crossSectionGroupManager.AVERAGE_BLOCK_COLLECTION, crossSectionGroupManager.FLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION, ] defaults = { CONF_GEOM: self.geometry, CONF_BUCKLING: False, CONF_EXTERNAL_DRIVER: True, CONF_DRIVER: "", CONF_REACTION_DRIVER: None, CONF_EXTERNAL_RINGS: 1, CONF_BLOCK_REPRESENTATION: blockRepresentation, } defaults[CONF_XS_EXECUTE_EXCLUSIVE] = False defaults[CONF_XS_PRIORITY] = 5 defaults[CONF_COMPONENT_AVERAGING] = False defaults[CONF_MEMORY_REQUIREMENT] = 0.0 for attrName, defaultValue in defaults.items(): currentValue = getattr(self, attrName) if currentValue is None: setattr(self, attrName, defaultValue) else: if attrName == CONF_BLOCK_REPRESENTATION: if currentValue not in allowableBlockCollections: raise ValueError( f"Invalid block collection type `{currentValue}` assigned " f"for {self.xsID}. Expected one of the " f"following: {allowableBlockCollections}" ) self.validate() def serializeXSSettings(xsSettingsDict: Union[XSSettings, Dict]) -> Dict[str, Dict]: """ Return a serialized form of the ``XSSettings`` as a dictionary. Notes ----- Attributes that are not set (i.e., set to None) will be skipped. """ if not isinstance(xsSettingsDict, dict): raise TypeError(f"Expected a dictionary for {xsSettingsDict}") output = {} for xsID, xsOpts in xsSettingsDict.items(): # Setting the value to an empty dictionary # if it is set to a None or an empty # dictionary. if not xsOpts: continue if isinstance(xsOpts, XSModelingOptions): xsIDVals = xsOpts.serialize() elif isinstance(xsOpts, dict): xsIDVals = { config: confVal for config, confVal in xsOpts.items() if config != CONF_XSID and confVal is not None } else: raise TypeError( f"{xsOpts} was expected to be a ``dict`` or " f"``XSModelingOptions`` options type but is type {type(xsOpts)}" ) output[str(xsID)] = xsIDVals return output class XSSettingDef(Setting): """ Custom setting object to manage the cross section dictionary-like inputs. Notes ----- This uses the ``xsSettingsValidator`` schema to validate the inputs and will automatically coerce the value into a ``XSSettings`` dictionary. """ def __init__(self, name): description = "Data structure defining how cross sections are created" label = "Cross section control" default = XSSettings() options = None schema = xsSettingsValidator enforcedOptions = False subLabels = None isEnvironment = False oldNames = None Setting.__init__( self, name, default, description, label, options, schema, enforcedOptions, subLabels, isEnvironment, oldNames, ) def dump(self): """Return a serialized version of the ``XSSetting`` object.""" return serializeXSSettings(self._value) def xsSettingsValidator(xsSettingsDict: Dict[str, Dict]) -> XSSettings: """ Returns a ``XSSettings`` object if validation is successful. Notes ----- This provides two levels of checks. The first check is that the attributes provided as user input contains the correct key/values and the values are of the correct type. The second check uses the ``XSModelingOptions.validate`` method to check for input inconsistencies and provides warnings if there are any issues. """ xsSettingsDict = serializeXSSettings(xsSettingsDict) xsSettingsDict = _XS_SCHEMA(xsSettingsDict) vals = XSSettings() for xsID, inputParams in xsSettingsDict.items(): if not inputParams: continue xsOpt = XSModelingOptions(xsID, **inputParams) xsOpt.validate() vals[xsID] = xsOpt return vals ================================================ FILE: armi/physics/neutronics/diffIsotxs.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This script is used to compare ISOTXS files.""" from armi import runLog from armi.cli.entryPoint import EntryPoint class CompareIsotxsLibraries(EntryPoint): """Compare two ISOTXS files.""" name = "diff-isotxs" def addOptions(self): self.parser.add_argument( "reference", help="Reference ISOTXS for comparison. Percent differences are given in relation to this file.", ) self.parser.add_argument( "comparisonFiles", nargs="+", help="ISOTXS files to compare to the reference", ) self.parser.add_argument( "--nuclidesNames", "-n", nargs="+", help="For the interaction types identified only compare these nuclides.", ) self.parser.add_argument( "--interactions", "-i", nargs="+", help="Compare the cross sections for these interactins and specified nuclides.", ) self.parser.add_argument( "--fluxFile", "-f", help="Mcc3 file containing flux_bg (broad group flux) for single-group comparison.", ) def invoke(self): from armi.nuclearDataIO import isotxs, xsLibraries runLog.setVerbosity(0) refIsotxs = isotxs.readBinary(self.args.reference) for fname in self.args.comparisonFiles: cmpIsotxs = isotxs.readBinary(fname) xsLibraries.compare(refIsotxs, cmpIsotxs) ================================================ FILE: armi/physics/neutronics/energyGroups.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Energy group structures for multigroup neutronics calculations.""" import copy import itertools import math import numpy as np from armi import runLog from armi.physics.neutronics.const import ( FAST_FLUX_THRESHOLD_EV, HIGH_ENERGY_EV, MAXIMUM_XS_LIBRARY_ENERGY, ULTRA_FINE_GROUP_LETHARGY_WIDTH, ) from armi.utils.mathematics import findNearestValue def getFastFluxGroupCutoff(eGrpStruc): """ Given a constant "fast" energy threshold, return which ARMI energy group index contains this threshold. .. impl:: Return the energy group index which contains a given energy threshold. :id: I_ARMI_EG_FE :implements: R_ARMI_EG_FE This function returns the energy group within a given group structure that contains the fast flux threshold energy. The threshold energy is imported from the :py:mod:`constants <armi.physics.neutronics.const>` in the neutronics module, where it is defined as 100 keV. This is a standard definition for fast flux. This function also calculates and returns the fraction of the threshold energy group that is above the 100 keV threshold. """ gThres = -1 for g, eV in enumerate(eGrpStruc): if eV < FAST_FLUX_THRESHOLD_EV: gThres = g break dE = eGrpStruc[gThres - 1] - eGrpStruc[gThres] # eV fastFluxFracInG = (eGrpStruc[gThres - 1] - FAST_FLUX_THRESHOLD_EV) / dE return gThres - 1, fastFluxFracInG def _flatten(*numbers): result = [] for item in numbers: if isinstance(item, int): result.append(item) else: result.extend(item) return result def _create_anl_energies_with_group_lethargies(*group_lethargies): anl_energy_max = MAXIMUM_XS_LIBRARY_ENERGY en = anl_energy_max energies = [] for ee in _flatten(*group_lethargies): energies.append(en) en *= math.e ** (-ee * ULTRA_FINE_GROUP_LETHARGY_WIDTH) return energies def getGroupStructure(name): """ Return descending neutron energy group upper bounds in eV for a given structure name. .. impl:: Provide the neutron energy group bounds for a given group structure. :id: I_ARMI_EG_NE :implements: R_ARMI_EG_NE There are several built-in group structures that are defined in this module, which are stored in a dictionary. This function takes a group structure name as an input parameter, which it uses as a key for the group structure dictionary. If the group structure name is valid, it returns a copy of the energy group structure resulting from the dictionary lookup. Otherwise, it throws an error. Notes ----- Copy of the group structure is return so that modifications of the energy bounds does not propagate back to the `GROUP_STRUCTURE` dictionary. """ try: return copy.copy(GROUP_STRUCTURE[name]) except KeyError as ke: runLog.error( 'Could not find groupStructure with the name "{}".\nChoose one of: {}'.format( name, ", ".join(GROUP_STRUCTURE.keys()) ) ) raise ke def getGroupStructureType(neutronEnergyBoundsInEv): """Return neutron energy group structure name for a given set of neutron energy group bounds in eV.""" neutronEnergyBoundsInEv = np.array(neutronEnergyBoundsInEv) for groupStructureType in GROUP_STRUCTURE: refNeutronEnergyBoundsInEv = np.array(getGroupStructure(groupStructureType)) if len(refNeutronEnergyBoundsInEv) != len(neutronEnergyBoundsInEv): continue if np.allclose(refNeutronEnergyBoundsInEv, neutronEnergyBoundsInEv, 1e-5): return groupStructureType raise ValueError( "Neutron energy group structure type does not exist for the given neutron energy bounds: {}".format( neutronEnergyBoundsInEv ) ) GROUP_STRUCTURE = {} """ Energy groups for use in multigroup neutronics. Values are the upper bound of each energy in eV from highest energy to lowest (because neutrons typically downscatter...) :meta hide-value: """ GROUP_STRUCTURE["2"] = [HIGH_ENERGY_EV, 6.25e-01] # for calculating fast flux GROUP_STRUCTURE["FastFlux"] = [HIGH_ENERGY_EV, FAST_FLUX_THRESHOLD_EV] # Nuclear Reactor Engineering: Reactor Systems Engineering, Vol. 1 GROUP_STRUCTURE["4gGlasstoneSesonske"] = [HIGH_ENERGY_EV, 5.00e04, 5.00e02, 6.25e-01] # http://serpent.vtt.fi/mediawiki/index.php/CASMO_4-group_structure GROUP_STRUCTURE["CASMO4"] = [HIGH_ENERGY_EV, 8.21e05, 5.53e03, 6.25e-01] GROUP_STRUCTURE["CASMO12"] = [ HIGH_ENERGY_EV, 2.23e06, 8.21e05, 5.53e03, 4.81e01, 4.00e00, 6.25e-01, 3.50e-01, 2.80e-01, 1.40e-01, 5.80e-02, 3.00e-02, ] # For typically for use with MCNP will need conversion to MeV, and ordering from low to high. # reference: https://www.sciencedirect.com/science/article/pii/S0149197022003778 # reference: https://mcnp.lanl.gov/pdf_files/TechReport_2017_LANL_LA-UR-17-29981_WernerArmstrongEtAl.pdf GROUP_STRUCTURE["CINDER63"] = [ 2.5000e7, 2.0000e7, 1.6905e7, 1.4918e7, 1.0000e7, 6.0650e6, 4.9658e6, 3.6788e6, 2.8651e6, 2.2313e6, 1.7377e6, 1.3534e6, 1.1080e6, 8.2085e5, 6.3928e5, 4.9790e5, 3.8870e5, 3.0200e5, 1.8320e5, 1.1110e5, 6.7380e4, 4.0870e4, 2.5540e4, 1.9890e4, 1.5030e4, 9.1190e3, 5.5310e3, 3.3550e3, 2.8400e3, 2.4040e3, 2.0350e3, 1.2340e3, 7.4850e2, 4.5400e2, 2.7540e2, 1.6700e2, 1.0130e2, 6.1440e1, 3.7270e1, 2.2600e1, 1.3710e1, 8.3150, 5.0430, 3.0590, 1.8550, 1.1250, 6.8300e-1, 4.1400e-1, 2.5100e-1, 1.5200e-1, 1.0000e-1, 8.0000e-2, 6.7000e-2, 5.8000e-2, 5.0000e-2, 4.2000e-2, 3.5000e-2, 3.0000e-2, 2.5000e-2, 2.0000e-2, 1.5000e-2, 1.0000e-2, 5.0000e-3, ] # Group structures below here are derived from Appendix E in # https://www.osti.gov/biblio/1483949-mc2-multigroup-cross-section-generation-code-fast-reactor-analysis-nuclear GROUP_STRUCTURE["ANL9"] = _create_anl_energies_with_group_lethargies(222, 120, itertools.repeat(180, 5), 540, 300) GROUP_STRUCTURE["ANL33"] = _create_anl_energies_with_group_lethargies(42, itertools.repeat(60, 28), 90, 240, 29, 1) GROUP_STRUCTURE["ANL70"] = _create_anl_energies_with_group_lethargies(42, itertools.repeat(30, 67), 29, 1) # fmt: off GROUP_STRUCTURE["ANL116"] = _create_anl_energies_with_group_lethargies( 15*[6] + [3] + 2*[6] + [3] + [12] + 3*[6] + 3*[12] + 2*[6] + 2*[12] + [4] + [6] + [2] + [12] + 2*[6] + [12] + 2*[6] +2*[12] + [6] + [12] + 2*[6] + 6*[12] + [6] + 4*[12] + 4*[6] + 5*[12] + [6] + 3*[12] + [6] + 2*[30] + 2*[15] + [30] + 4*[15] + [18] + [12] + 5*[30] + [24] + [12] + [24] + [19] + [11] + [18] + [24] + 3*[18] + 2*[12] + 14*[60] + 2*[30] + [29] + [1] ) GROUP_STRUCTURE["ANL230"] = _create_anl_energies_with_group_lethargies( [ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 3, 3, 3, 3, 3, 6, 6, 6, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 6, 6, 6, 6, 2, 2, 1, 1, 2, 2, 2, 6, 6, 3, 3, 3, 3, 6, 6, 3, 3, 3, 3, 6, 6, 6, 6, 3, 3, 6, 6, 6, 3, 2, 1, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 15, 15, 15, 15, 9, 6, 6, 9, 15, 15, 15, 3, 3, 9, 15, 9, 6, 3, 3, 9, 3, 12, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 12, 12, 6, 6, 12, 12, 12, 7, 5, 6, 6, 12, 12, 12, 12, 6, 6, 12, 12, 6, 6, 6, 6, 6, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 6, 24, 10, 20, 29, 1, ] ) # Reactor agnostic. Similar to ANL1041 but with 6 UFGs grouped together. # More likely to not error out on memory than 703 GROUP_STRUCTURE["348"] = _create_anl_energies_with_group_lethargies(itertools.repeat(6, 346), 5, 1) # Note that at one point the MC2 manual was inconsistent with the code itself GROUP_STRUCTURE["ANL703"] = _create_anl_energies_with_group_lethargies( [ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, ] ) # fmt: on GROUP_STRUCTURE["ANL1041"] = _create_anl_energies_with_group_lethargies(itertools.repeat(2, 1041)) GROUP_STRUCTURE["ANL2082"] = _create_anl_energies_with_group_lethargies(itertools.repeat(1, 2082)) def _create_multigroup_structures_on_finegroup_energies(multigroup_energy_bounds, finegroup_energy_bounds): """Set energy group bounds to the nearest ultra-fine group boundaries.""" modifiedEnergyBounds = set() modifiedEnergyBounds.add(max(finegroup_energy_bounds)) for energyBound in multigroup_energy_bounds[1:]: modifiedEnergyBounds.add(findNearestValue(finegroup_energy_bounds, energyBound)) return sorted(modifiedEnergyBounds, reverse=True) def _create_anl_energies_with_group_energies(group_energy_bounds): """Set energy group bounds to the nearest ultra-fine group boundaries.""" ufgEnergies = _create_anl_energies_with_group_lethargies(itertools.repeat(1, 2082)) return _create_multigroup_structures_on_finegroup_energies(group_energy_bounds, ufgEnergies) """ Taken from Section A3.1 SHEM-361 in Ngeleka, Tholakele Prisca. "Examination and improvement of the SHEM energy group structure for HTR and deep burn HTR design and analysis." (2012). """ GROUP_STRUCTURE["SHEM361"] = [ 19640300, 14918200, 13840300, 11618300, 9999990, 9048360, 8187300, 7408170, 6703190, 6065300, 4965850, 4065690, 3328710, 2725310, 2231300, 1901390, 1636540, 1405770, 1336940, 1286960, 1162050, 1051150, 951119, 860006, 706511, 578443, 494002, 456021, 412501, 383884, 320646, 267826, 230014, 195008, 164999, 140000, 122773, 115624, 94664.5, 82297.4, 67379.4, 55165.6, 49915.9, 40867.7, 36978.6, 33459.6, 29281, 27394.4, 26100.1, 24999.1, 22699.4, 18584.7, 16200.5, 14899.7, 13603.7, 11137.7, 9118.81, 7465.85, 6112.52, 5004.51, 4097.35, 3481.07, 2996.18, 2700.24, 2397.29, 2084.1, 1811.83, 1586.2, 1343.58, 1134.67, 1064.32, 982.494, 909.681, 832.218, 748.517, 677.287, 646.837, 612.834, 600.099, 592.941, 577.146, 539.204, 501.746, 453.999, 419.094, 390.76, 371.703, 353.575, 335.323, 319.928, 295.922, 288.327, 284.888, 276.468, 268.297, 256.748, 241.796, 235.59, 224.325, 212.108, 200.958, 195.996, 193.078, 190.204, 188.877, 187.559, 186.251, 184.952, 183.295, 175.229, 167.519, 163.056, 154.176, 146.657, 139.504, 132.701, 126.229, 120.554, 117.577, 116.524, 115.48, 112.854, 110.288, 105.646, 103.038, 102.115, 101.605, 101.098, 100.594, 97.3287, 93.3256, 88.7741, 83.9393, 79.3679, 76.3322, 73.5595, 71.8869, 69.0682, 66.8261, 66.4929, 66.1612, 65.8312, 65.5029, 65.046, 64.5923, 63.6306, 62.3083, 59.925, 57.0595, 54.06, 52.9895, 51.7847, 49.2591, 47.5173, 46.2053, 45.2904, 44.1721, 43.1246, 42.1441, 41.227, 39.7295, 38.7874, 37.7919, 37.3038, 36.8588, 36.4191, 36.0568, 35.698, 34.5392, 33.0855, 31.693, 27.8852, 24.6578, 22.5356, 22.3788, 22.1557, 22.0011, 21.7018, 21.4859, 21.336, 21.2296, 21.1448, 21.0604, 20.9763, 20.7676, 20.6847, 20.6021, 20.5199, 20.4175, 20.2751, 20.0734, 19.5974, 19.3927, 19.1997, 19.0848, 17.9591, 17.759, 17.5648, 17.4457, 16.8305, 16.5501, 16.0498, 15.7792, 14.8662, 14.7301, 14.5952, 14.4702, 14.2505, 14.0496, 13.546, 13.3297, 12.6, 12.4721, 12.3086, 12.1302, 11.9795, 11.8153, 11.7094, 11.5894, 11.2694, 11.0529, 10.8038, 10.5793, 9.50002, 9.14031, 8.97995, 8.80038, 8.67369, 8.52407, 8.30032, 8.13027, 7.97008, 7.83965, 7.73994, 7.60035, 7.38015, 7.13987, 6.99429, 6.91778, 6.87021, 6.83526, 6.8107, 6.79165, 6.77605, 6.75981, 6.74225, 6.71668, 6.63126, 6.60611, 6.58829, 6.57184, 6.55609, 6.53907, 6.51492, 6.48178, 6.43206, 6.35978, 6.28016, 6.16011, 6.05991, 5.96014, 5.80021, 5.72015, 5.61979, 5.53004, 5.48817, 5.41025, 5.38003, 5.32011, 5.21008, 5.10997, 4.93323, 4.76785, 4.4198, 4.30981, 4.21983, 4, 3.88217, 3.71209, 3.54307, 3.14211, 2.88405, 2.77512, 2.74092, 2.7199, 2.70012, 2.64004, 2.62005, 2.59009, 2.55, 2.46994, 2.33006, 2.27299, 2.21709, 2.15695, 2.0701, 1.98992, 1.90008, 1.77997, 1.66895, 1.58803, 1.51998, 1.44397, 1.41001, 1.38098, 1.33095, 1.29304, 1.25094, 1.21397, 1.16999, 1.14797, 1.12997, 1.11605, 1.10395, 1.09198, 1.07799, 1.03499, 1.02101, 1.00904, 0.996501, 0.981959, 0.96396, 0.944022, 0.919978, 0.880024, 0.800371, 0.719999, 0.624999, 0.594993, 0.55499, 0.520011, 0.475017, 0.431579, 0.390001, 0.352994, 0.325008, 0.305012, 0.279989, 0.254997, 0.231192, 0.20961, 0.190005, 0.161895, 0.137999, 0.119995, 0.104298, 0.0897968, 0.0764969, 0.0651999, 0.0554982, 0.0473019, 0.0402999, 0.0343998, 0.0292989, 0.0249394, 0.0200104, 0.01483, 0.0104505, 0.00714526, 0.00455602, 0.0024999, ] # Energy bounds of ARMI33 and ARMI45 are modified to the nearest ultra-fine group boundaries GROUP_STRUCTURE["ARMI33"] = _create_anl_energies_with_group_energies( [ 1.4190e07, 1.0000e07, 6.0650e06, 3.6780e06, 2.2313e06, 1.3530e06, 8.2080e05, 4.9787e05, 3.0190e05, 1.8310e05, 1.1109e05, 6.7370e04, 4.0860e04, 2.4788e04, 1.5030e04, 9.1180e03, 5.5308e03, 3.3540e03, 2.0340e03, 1.2341e03, 7.4850e02, 4.5390e02, 3.0432e02, 1.4860e02, 9.1660e01, 6.7904e01, 4.0160e01, 2.2600e01, 1.3709e01, 8.3150e00, 4.0000e00, 5.4000e-01, 4.1400e-01, ] ) # Energy bounds of SHEM33_361 is ANL33 modified to the nearest SHEM361 fine group boundaries GROUP_STRUCTURE["SHEM33_361"] = _create_multigroup_structures_on_finegroup_energies( GROUP_STRUCTURE["ANL33"], GROUP_STRUCTURE["SHEM361"] ) GROUP_STRUCTURE["ARMI45"] = _create_anl_energies_with_group_energies( [ 1.419e07, 1.000e07, 6.065e06, 4.966e06, 3.679e06, 2.865e06, 2.231e06, 1.738e06, 1.353e06, 1.108e06, 8.209e05, 6.393e05, 4.979e05, 3.887e05, 3.020e05, 1.832e05, 1.111e05, 6.738e04, 4.087e04, 2.554e04, 1.989e04, 1.503e04, 9.119e03, 5.531e03, 3.355e03, 2.840e03, 2.404e03, 2.035e03, 1.234e03, 7.485e02, 4.540e02, 2.754e02, 1.670e02, 1.013e02, 6.144e01, 3.727e01, 2.260e01, 1.371e01, 8.315e00, 5.043e00, 3.059e00, 1.855e00, 1.125e00, 6.830e-01, 4.140e-01, ] ) """ Taken from Table 5.1 of "GAMSOR: Gamma Source Preparation and DIF3D Flux Solution", ANL/NE-16/50 Rev 2.0, M.A. Smith, C.H. Lee, R.N. Hill, Aug 30 2022. """ GROUP_STRUCTURE["ANL21G"] = [ 2.0e7, 1.0e7, 8.0e6, 7.0e6, 6.0e6, 5.0e6, 4.0e6, 3.0e6, 2.5e6, 2.0e6, 1.5e6, 1.0e6, 7.0e5, 4.5e5, 3.0e5, 1.5e5, 1.0e5, 7.5e4, 4.5e4, 3.0e4, 2.0e4, ] """ Taken from Table 5.2 of "GAMSOR: Gamma Source Preparation and DIF3D Flux Solution", ANL/NE-16/50 Rev 2.0, M.A. Smith, C.H. Lee, R.N. Hill, Aug 30 2022. """ GROUP_STRUCTURE["ANL94G"] = [ 2.000e07, 1.400e07, 1.200e07, 1.100e07, 1.060e07, 1.000e07, 9.500e06, 9.000e06, 8.500e06, 8.000e06, 7.750e06, 7.500e06, 7.250e06, 7.000e06, 6.750e06, 6.500e06, 6.250e06, 6.000e06, 5.750e06, 5.500e06, 5.400e06, 5.200e06, 5.000e06, 4.700e06, 4.500e06, 4.400e06, 4.200e06, 4.000e06, 3.900e06, 3.800e06, 3.650e06, 3.500e06, 3.333e06, 3.166e06, 3.000e06, 2.833e06, 2.666e06, 2.500e06, 2.333e06, 2.166e06, 2.000e06, 1.875e06, 1.750e06, 1.660e06, 1.600e06, 1.500e06, 1.420e06, 1.330e06, 1.250e06, 1.200e06, 1.125e06, 1.000e06, 9.000e05, 8.650e05, 8.250e05, 8.000e05, 7.500e05, 7.000e05, 6.750e05, 6.500e05, 6.250e05, 6.000e05, 5.750e05, 5.500e05, 5.250e05, 5.000e05, 4.500e05, 4.250e05, 4.000e05, 3.750e05, 3.500e05, 3.250e05, 3.000e05, 2.600e05, 2.200e05, 1.900e05, 1.600e05, 1.500e05, 1.400e05, 1.200e05, 1.000e05, 9.000e04, 8.000e04, 7.500e04, 6.500e04, 6.000e04, 5.500e04, 4.500e04, 4.000e04, 3.500e04, 3.000e04, 2.000e04, 1.500e04, 1.000e04, ] ================================================ FILE: armi/physics/neutronics/fissionProductModel/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The Fission product model subpackage.""" import os from armi.context import RES REFERENCE_LUMPED_FISSION_PRODUCT_FILE = os.path.join(RES, "referenceFissionProducts.dat") ================================================ FILE: armi/physics/neutronics/fissionProductModel/fissionProductModel.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains the implementation of the ``FissionProductModel`` interface. This ``FissionProductModel`` class implements the management of fission products within the reactor core and can be extended to support more general applications. Currently, the fission product model supports explicit modeling of fission products in each of the blocks/components, independent management of lumped fission products for each blocks/components within the core, or global management of lumped fission products where the fission products between all blocks/components are shared and are modified together. Within the framework, there is a coupling between the management of the fission products through this model to neutronics evaluations of flux and depletion calculations. When using a Monte Carlo solver, such as MCNP (i.e., there is an interface that is attached to the operator that has a name of "mcnp"), the fission products will always be treated independently and fission products (either explicit or lumped) will be added to all blocks/components in the core. The reason for this is that Monte Carlo solvers, like MCNP, may implement their own coupling between flux and depletion evaluations and having the initialization of these fission products in each block/component independently will allow that solver to manage the inventory over time. When determining which fission product model to use (either explicit or lumped) it is important to consider which cross section data is available to the flux and/or depletion solvers, and what level of fidelity is required for the analysis. This is where decisions as a developer/user need to be made, and the implementation of this specific model may not be, in general, accurate for any reactor system. It is dependent on which plugins are implemented and the requirements of the individual flux/depletion solver. Lumped fission products are generally useful for fast reactor applications, especially in fuel cycle calculations or scoping evaluations where the tracking of the detailed nuclide inventory would not have substantial impacts on core reactivity predictions. This is typically done by collapsing all fission products into lumped nuclides, like ``LFP35``, ``LFP38``, ``LFP39``, ``LFP40``, and ``LFP41``. This is the implementation in the framework, which is discussed a bit more in the ``fpModel`` setting. These lumped fission products are separated into different bins that represent the fission product yields from U-235, U-238, Pu-239, Pu-240, and Pu-241/Am-241, respectively. The exact binning of which fission events from which target nuclides is specified by the ``burn-chain.yaml`` file, which can be modified by a user/developer. When selecting this modeling option, the blocks/components will have these ``LFP`` nuclides in the number density dictionaries. The key thing here is that these lumped nuclides do not exist in nature and therefore do not have nuclear data directly available in cross section evaluations, like ENDF/B. If the user wishes to consider these nuclides in the flux/depletion evaluations, then cross sections for these ``LFP`` nuclides will need to be prepared. Generally speaking, the the ``crossSectionGroupManager`` and the ``latticePhysicsInterface`` could be used to implement this for cross section generation codes, like NJOY, CASMO, MC2-3, Serpent, etc. .. warning:: The lumped fission product model and the ``burn-chain.yaml`` data may not be directly applicable to light water reactor systems, especially if there are strong reactivity impacts with fission products like ``Xe`` and ``Sm`` that need to be tracked independently. A user/developer may update the ``referenceFissionProducts.dat`` data file to exclude these important nuclides from the lumped fission product models if need be, but this would also require updating the ``burn-chain.yaml`` file as well as updating the ``nuclideFlags`` specification within the reactor blueprints input. A further simplified option for lumped fission product treatment that is available is to treat all fission products explicitly as ``Mo-99``. This is not guaranteed to be an accurate treatment of the fission products from a reactivity/depletion perspective, but it is available for quick scoping evaluations and model building. Finally, the explicit fission product modeling aims to include as many nuclides on the blocks/components as the user wishes to consider, but the nuclides that are modeled must be compatible with the plugins that are implemented for the application. When using this option, the user should look to set the ``fpModelLibrary`` setting. - If this setting is not set, then it is expected that the user will need to manually add all nuclides to the ``nuclideFlags`` section of the reactor core blueprints. - If the ``fpModelLibrary`` is selected then this will automatically add to the ``nuclideFlags`` input using :py:func:`isotopicOptions.autoUpdateNuclideFlags` and this class will initialize all added nuclides to have zero number densities. .. warning:: The explicit fission product model is being implemented with the vision of using generating multi-group cross sections for nuclides that are added with the ``fpModelLibrary`` setting with follow-on depletion calculations that will be managed by a detailed depletion solver, like ORIGEN. There are many caveats to how this model is initialized and may not be an out-of-the-box general solution. """ from armi import interfaces, runLog from armi.physics.neutronics.fissionProductModel import lumpedFissionProduct from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import ( CONF_FP_MODEL, CONF_MAKE_ALL_BLOCK_LFPS_INDEPENDENT, ) from armi.reactor.flags import Flags NUM_FISSION_PRODUCTS_PER_LFP = 2.0 ORDER = interfaces.STACK_ORDER.AFTER + interfaces.STACK_ORDER.PREPROCESSING def describeInterfaces(_cs): """Function for exposing interface(s) to other code.""" return (FissionProductModel, {}) class FissionProductModel(interfaces.Interface): """Coordinates the fission product model on the reactor.""" name = "fissionProducts" def __init__(self, r, cs): interfaces.Interface.__init__(self, r, cs) self._globalLFPs = lumpedFissionProduct.lumpedFissionProductFactory(self.cs) @property def _explicitFissionProducts(self): return self.cs[CONF_FP_MODEL] == "explicitFissionProducts" @property def _useGlobalLFPs(self): return not (self.cs[CONF_MAKE_ALL_BLOCK_LFPS_INDEPENDENT] or self._explicitFissionProducts) @property def _fissionProductBlockType(self): """ Set the block type that the fission products will be applied to. Notes ----- Some Monte Carlo codes require all nuclides to be consistent in all materials when assemblies are shuffled. This requires that fission products be consistent across all blocks, even if fission products are not generated when the block is depleted. """ return None if self.getInterface("mcnp") is not None else Flags.FUEL def interactBOL(self): interfaces.Interface.interactBOL(self) if self._explicitFissionProducts: self.setAllComponentFissionProducts() else: self.setAllBlockLFPs() def setAllComponentFissionProducts(self): """ Initialize all nuclides for each ``DEPLETABLE`` component in the core. Notes ----- This should be called when explicit fission product modeling is enabled to ensure that all isotopes are initialized on the depletable components within the reactor data model so that there is some density as a starting point. When explicit fission products are enabled and the user has not already included all fission products in the blueprints (in ``nuclideFlags``), the ``fpModelLibrary`` setting is used to autofill all the nuclides in a given library into the ``blueprints.allNuclidesInProblem`` list. All nuclides that were not manually initialized by the user are added to the ``DEPLETABLE`` components throughout every block in the core. The ``DEPLETABLE`` flag is based on the user adding this explicitly in the blueprints, or is based on the user setting a nuclide to ``burn: true`` in the blueprint ``nuclideFlags``. See Also -------- armi.reactor.blueprints.isotopicOptions.autoUpdateNuclideFlags armi.reactor.blueprints.isotopicOptions.getAllNuclideBasesByLibrary """ for b in self.r.core.getBlocks(includeAll=True): b.setLumpedFissionProducts(None) for c in b.getComponents(Flags.DEPLETABLE): # Add all isotopes in problem at 0.0 density updatedNDens = c.getNumberDensities() # self.r.blueprints.allNuclidesInProblem contains ~everything in ENDF if _explicitFissionProducts for nuc in self.r.blueprints.allNuclidesInProblem: if nuc in updatedNDens: continue updatedNDens[nuc] = 0.0 c.updateNumberDensities(updatedNDens) def setAllBlockLFPs(self): """ Sets all the block lumped fission products attributes. See Also -------- armi.reactor.components.Component.setLumpedFissionProducts """ for b in self.r.core.getBlocks(self._fissionProductBlockType, includeAll=True): if self._useGlobalLFPs: b.setLumpedFissionProducts(self.getGlobalLumpedFissionProducts()) else: independentLFPs = self.getGlobalLumpedFissionProducts().duplicate() b.setLumpedFissionProducts(independentLFPs) def getGlobalLumpedFissionProducts(self): r""" Lookup the detailed fission product object associated with a xsType and burnup group. See Also -------- armi.physics.neutronics.isotopicDepletion.depletion.DepletionInterface.buildFissionProducts armi.reactor.blocks.Block.getLumpedFissionProductCollection : same thing, but block-level compatible. Use this """ return self._globalLFPs def setGlobalLumpedFissionProducts(self, lfps): r""" Lookup the detailed fission product object associated with a xsType and burnup group. See Also -------- armi.reactor.blocks.Block.getLumpedFissionProductCollection : same thing, but block-level compatible. Use this """ self._globalLFPs = lfps def interactBOC(self, cycle=None): if self._explicitFissionProducts: self.setAllComponentFissionProducts() else: self.setAllBlockLFPs() def interactDistributeState(self): if self._explicitFissionProducts: self.setAllComponentFissionProducts() else: self.setAllBlockLFPs() def getAllFissionProductNames(self): """ Find all fission product names from the lumped fission product collection. Notes ----- This considers all LFP collections, whether they are global, block-level, or a mix of these. """ runLog.debug("Gathering all possible fission products that are modeled.") fissionProductNames = [] lfpCollections = [] # get all possible lfp collections (global + block-level) for b in self.r.core.getBlocks(Flags.FUEL, includeAll=True): lfpCollection = b.getLumpedFissionProductCollection() if lfpCollection and lfpCollection not in lfpCollections: lfpCollections.append(lfpCollection) # get all possible FP names in each LFP collection for lfpCollection in lfpCollections: for fpName in lfpCollection.getAllFissionProductNames(): if fpName not in fissionProductNames: fissionProductNames.append(fpName) return fissionProductNames def removeFissionGasesFromBlocks(self): """ Return False to indicate that no fission products are being removed. Notes ----- This should be implemented on an application-specific model. """ runLog.warning(f"Fission gas removal is not implemented in {self}") return False ================================================ FILE: armi/physics/neutronics/fissionProductModel/fissionProductModelSettings.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Settings related to the fission product model.""" from armi.physics.neutronics import fissionProductModel from armi.settings import setting CONF_FP_MODEL = "fpModel" CONF_MAKE_ALL_BLOCK_LFPS_INDEPENDENT = "makeAllBlockLFPsIndependent" CONF_LFP_COMPOSITION_FILE_PATH = "lfpCompositionFilePath" CONF_FISSION_PRODUCT_LIBRARY_NAME = "fpModelLibrary" def defineSettings(): """Define settings for the plugin.""" settings = [ setting.Setting( CONF_FP_MODEL, default="infinitelyDilute", label="Fission Product Model", description=( "This setting is used to determine how fission products are treated in an " "analysis. By choosing `noFissionProducts`, no fission products will be added. By " "selecting, `infinitelyDilute`, lumped fission products will be initialized to a " "very small number on the blocks/components that require them. By choosing `MO99`, " "the fission products will be represented only by Mo-99. This is a simplistic " "assumption that is commonly used by fast reactor analyses in scoping calculations " "and is not necessarily a great assumption for depletion evaluations. Finally, by " "choosing `explicitFissionProducts` the fission products will be added explicitly " "to the blocks/components that are depletable. This is useful for detailed tracking " "of fission products." ), options=[ "noFissionProducts", "infinitelyDilute", "MO99", "explicitFissionProducts", ], ), setting.Setting( CONF_FISSION_PRODUCT_LIBRARY_NAME, default="", label="Fission Product Library", description=( f"This setting should be used when `{CONF_FP_MODEL}` is set to " "`explicitFissionProducts`. It is used in conjunction with any nuclideFlags " "defined in the blueprints to configure all the nuclides that are modeled within " "the core. Selecting any library option will add all nuclides from the selected " "library to the model so that analysts do not need to change their inputs when " "modifying the fission product treatment for calculations." ), options=[ "", "MC2-3", ], ), setting.Setting( CONF_MAKE_ALL_BLOCK_LFPS_INDEPENDENT, default=False, label="Use Independent LFPs", description=( "Flag to make all blocks have independent lumped fission products. Note that this " "is forced to be True when the `explicitFissionProducts` modeling option is " "selected or an interface named `mcnp` is on registered on the operator stack." ), ), setting.Setting( CONF_LFP_COMPOSITION_FILE_PATH, default=fissionProductModel.REFERENCE_LUMPED_FISSION_PRODUCT_FILE, label="LFP Definition File", description=( "Path to the file that contains lumped fission product composition definitions " "(e.g. equilibrium yields). This is unused when the `explicitFissionProducts` or " "`MO99` modeling options are selected." ), ), ] return settings def getFissionProductModelSettingValidators(inspector): """The standard helper method, to provide validators to the fission product model.""" # Import the Query class here to avoid circular imports. from armi.settings.settingsValidation import Query queries = [] queries.append( Query( lambda: inspector.cs[CONF_FP_MODEL] != "explicitFissionProducts" and not bool(inspector.cs["initializeBurnChain"]), ( "The burn chain is not being initialized and the fission product model is not set " "to `explicitFissionProducts`. This will likely fail." ), f"Would you like to set the `{CONF_FP_MODEL}` to `explicitFissionProducts`?", lambda: inspector._assignCS(CONF_FP_MODEL, "explicitFissionProducts"), ) ) queries.append( Query( lambda: inspector.cs[CONF_FP_MODEL] != "explicitFissionProducts" and inspector.cs[CONF_FISSION_PRODUCT_LIBRARY_NAME] != "", ( "The explicit fission product model is disabled and the fission product model " "library is set. This will have no impact on the results, but it is best to " f"disable the `{CONF_FISSION_PRODUCT_LIBRARY_NAME}` option." ), "Would you like to do this?", lambda: inspector._assignCS(CONF_FISSION_PRODUCT_LIBRARY_NAME, ""), ) ) queries.append( Query( lambda: inspector.cs[CONF_FP_MODEL] == "explicitFissionProducts" and bool(inspector.cs["initializeBurnChain"]), ( "The explicit fission product model is enabled, but initializing the burn chain is " "also enabled. This will likely fail." ), "Would you like to disable the burn chain initialization?", lambda: inspector._assignCS("initializeBurnChain", False), ) ) queries.append( Query( lambda: inspector.cs[CONF_FP_MODEL] == "explicitFissionProducts" and inspector.cs[CONF_FISSION_PRODUCT_LIBRARY_NAME] == "", ( "The explicit fission product model is enabled and the fission product model " "library is disabled. May result in no fission product nuclides being added to the " "case, unless these have manually added in `nuclideFlags`." ), ( f"Would you like to set the `{CONF_FISSION_PRODUCT_LIBRARY_NAME}` option to be " "equal to the default implementation of MC2-3?." ), lambda: inspector._assignCS(CONF_FISSION_PRODUCT_LIBRARY_NAME, "MC2-3"), ) ) return queries ================================================ FILE: armi/physics/neutronics/fissionProductModel/lumpedFissionProduct.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The lumped fission product (LFP) module deals with representing LFPs and loading them from files. These are generally managed by the :py:mod:`~armi.physics.neutronics.fissionProductModel.fissionProductModel.FissionProductModel` """ import os from armi import runLog from armi.nucDirectory import elements, nuclideBases from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import ( CONF_FP_MODEL, CONF_LFP_COMPOSITION_FILE_PATH, ) class LumpedFissionProduct: r""" Lumped fission product. The yields are in number fraction and they sum to 2.0 in general so a fission of an actinide results in one LFP, which represents 2 real FPs. This object is a data structure and works a lot like a dictionary in terms of accessing and modifying the data. The yields are indexed by nuclideBase -- in self.yld the yield fraction is indexed by nuclideBases of the individual fission product isotopes Examples -------- >>> fpd = FissionProductDefinitionFile(stream) >>> lfp = fpd.createSingleLFPFromFile("LFP39") >>> lfp[<nuclidebase for EU151>] 2.9773e-05 See Also -------- armi.reactor.blocks.Block.getLumpedFissionProductCollection : how you should access these. """ def __init__(self, name=None): """ Make an LFP. Parameters ---------- name : str, optional A name for the LFP. Will be overwritten if you load from file. Provide only if you are spinning your own custom LFPs. """ self.name = name self.yld = {} def duplicate(self): """Make a copy of this w/o using deepcopy.""" new = self.__class__(self.name) for key, val in self.yld.items(): new.yld[key] = val return new def __getitem__(self, fissionProduct): """ Return the yield of a particular fission product. This allows the LFP to be accessed via indexing, like this: ``lfp[fp]`` Returns ------- yld : yield of the fission product. """ return self.yld.get(fissionProduct, 0.0) def __setitem__(self, key, val): from armi.physics.neutronics.fissionProductModel.fissionProductModel import ( NUM_FISSION_PRODUCTS_PER_LFP, ) if val < 0.0: raise ValueError(f"Cannot set the yield of {key} in {self} to be less than zero as this is non-physical.") if val > NUM_FISSION_PRODUCTS_PER_LFP: raise ValueError( f"Cannot set the yield of {key} in {self} to be greater than {NUM_FISSION_PRODUCTS_PER_LFP}." ) self.yld[key] = val def __contains__(self, item): return item in self.yld def __repr__(self): return f"<Lumped Fission Product {self.name}>" def keys(self): return self.yld.keys() def values(self): return self.yld.values() def items(self): for nuc in self.keys(): yield nuc, self[nuc] def getGaseousYieldFraction(self): """Return the yield fraction of the gaseous nuclides.""" yld = 0.0 for nuc in self.keys(): if not isGas(nuc): continue yld += self[nuc] return yld def getTotalYield(self): """ Get the fractional yield of all nuclides in this lumped fission product. Accounts for any fission gas that may be removed. Returns ------- total yield of all fps """ return sum([self[nuc] for nuc in self.yld]) def getMassFracs(self): """ Return a dictionary of mass fractions indexed by nuclide. Returns ------- massFracs : dict mass fractions (floats) of LFP masses """ massFracs = {} for nuc in self.keys(): massFracs[nuc] = self.getMassFrac(nuclideBase=nuc) return massFracs def getMassFrac(self, nucName=None, nuclideBase=None): """ Return the mass fraction of the given nuclide. Returns ------- nuclide mass fraction (float) """ massFracDenom = self.getMassFracDenom() if not nuclideBase: nuclideBase = nuclideBases.byName[nucName] return self.__getitem__(nuclideBase) * (nuclideBase.weight / massFracDenom) def getMassFracDenom(self): """ See Also -------- armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct.getMassFrac """ massFracDenom = 0.0 for nuc in self.keys(): massFracDenom += self[nuc] * nuc.weight return massFracDenom class LumpedFissionProductCollection(dict): """ A set of lumped fission products. Typically there would be one of these on a block or on a global level. """ def __init__(self): self.collapsible = False def duplicate(self): new = self.__class__() for lfpName, lfp in self.items(): new[lfpName] = lfp.duplicate() return new def getLumpedFissionProductNames(self): return self.keys() def getAllFissionProductNames(self): """Gets names of all fission products in this collection.""" fpNames = set() for lfp in self.values(): for fp in lfp.keys(): fpNames.add(fp.name) return sorted(fpNames) def getAllFissionProductNuclideBases(self): """Gets names of all fission products in this collection.""" nucs = set() for _lfpName, lfp in self.items(): for fp in lfp.keys(): nucs.add(fp) return sorted(nucs) def getNumberDensities(self, objectWithParentDensities=None, densFunc=None): """ Gets all FP number densities in collection. Parameters ---------- objectWithParentDensities : ArmiObject object (probably block) that can be called with getNumberDensity('LFP35'), etc. to get densities of LFPs. densFunc : function, optional Optional method to extract LFP densities Returns ------- fpDensities : dict keys are fp names, vals are fission product number density in atoms/bn-cm. """ if not densFunc: densFunc = lambda lfpName: objectWithParentDensities.getNumberDensity(lfpName) fpDensities = {} for lfpName, lfp in self.items(): lfpDens = densFunc(lfpName) for fp, fpFrac in lfp.items(): fpDensities[fp.name] = fpDensities.get(fp.name, 0.0) + fpFrac * lfpDens return fpDensities def getMassFrac(self, oldMassFrac=None): """Returns the mass fraction vector of the collection of lumped fission products.""" if not oldMassFrac: raise ValueError("You must define a massFrac vector") massFrac = {} for lfpName, lfp in self.items(): lfpMFrac = oldMassFrac[lfpName] for nuc, mFrac in lfp.getMassFracs().items(): try: massFrac[nuc] += lfpMFrac * mFrac except KeyError: massFrac[nuc] = lfpMFrac * mFrac return massFrac class FissionProductDefinitionFile: """ Reads a file that has definitions of one or more LFPs in it to produce LFPs. The format for this file is as follows:: LFP35 GE73 5.9000E-06 LFP35 GE74 1.4000E-05 LFP35 GE76 1.6000E-04 LFP35 AS75 8.9000E-05 and so on Examples -------- >>> fpd = FissionProductDefinitionFile(stream) >>> lfps = fpd.createLFPsFromFile() The path to this file is specified by the `lfpCompositionFilePath` user setting. """ def __init__(self, stream): self.stream = stream def createLFPsFromFile(self): """ Read the file and create LFPs from the contents. Returns ------- lfps : list List of LumpedFissionProducts contained in the file """ lfps = LumpedFissionProductCollection() for lfpLines in self._splitIntoIndividualLFPLines(): lfp = self._readOneLFP(lfpLines) lfps[lfp.name] = lfp return lfps def createSingleLFPFromFile(self, name): """Read one LFP from the file.""" lfpLines = self._splitIntoIndividualLFPLines(name) lfp = self._readOneLFP(lfpLines[0]) # only one LFP expected. Use it. return lfp def _splitIntoIndividualLFPLines(self, lfpName=None): """ The lfp file can contain one or more LFPs. This splits them. Ignores DUMPs. Parameters ---------- lfpName : str, optional Restrict to just these names if desired. Returns ------- allLFPLines : list of list each entry is a list of lines that define one LFP """ lines = self.stream.readlines() allLFPLines = [] thisLFPLines = [] lastName = None for line in lines: name = line.split()[0] if "DUMP" in name or (lfpName and lfpName not in name): continue if lastName and name != lastName: allLFPLines.append(thisLFPLines) thisLFPLines = [] thisLFPLines.append(line) lastName = name if thisLFPLines: allLFPLines.append(thisLFPLines) return allLFPLines def _readOneLFP(self, linesOfOneLFP): lfp = LumpedFissionProduct() totalYield = 0.0 for line in linesOfOneLFP: data = line.split() parent = data[0] nucLibId = data[1] nuc = nuclideBases.byName[nucLibId] yld = float(data[2]) lfp.yld[nuc] = yld totalYield += yld lfp.name = parent # e.g. LFP38 runLog.debug("Loaded {0} {1} nuclides for a total yield of {2}".format(len(lfp.yld), lfp.name, totalYield)) return lfp def lumpedFissionProductFactory(cs): """Build lumped fission products.""" if cs[CONF_FP_MODEL] == "explicitFissionProducts": return None if cs[CONF_FP_MODEL] == "MO99": return _buildMo99LumpedFissionProduct() lfpPath = cs[CONF_LFP_COMPOSITION_FILE_PATH] if not lfpPath or not os.path.exists(lfpPath): raise ValueError( f"The fission product reference file does not exist or is not a valid path. Path provided: {lfpPath}" ) runLog.extra(f"Loading global lumped fission products (LFPs) from {lfpPath}") with open(lfpPath) as lfpStream: lfpFile = FissionProductDefinitionFile(lfpStream) lfps = lfpFile.createLFPsFromFile() return lfps def _buildMo99LumpedFissionProduct(): """ Build a dummy MO-99 LFP collection. This is a very bad FP approximation from a physics standpoint but can be very useful for rapid-running test cases. """ mo99 = nuclideBases.byName["MO99"] mo99LFPs = LumpedFissionProductCollection() for lfp in nuclideBases.where(lambda nb: isinstance(nb, nuclideBases.LumpNuclideBase)): # Not all lump nuclides bases defined are fission products, so ensure that only fission # products are considered. if not ("FP" in lfp.name or "REGN" in lfp.name): continue mo99FP = LumpedFissionProduct(lfp.name) mo99FP[mo99] = 2.0 mo99LFPs[lfp.name] = mo99FP return mo99LFPs def isGas(nuc): """True if nuclide is considered a gas.""" # ruff: noqa: SIM110 for element in elements.getElementsByChemicalPhase(elements.ChemicalPhase.GAS): if element == nuc.element: return True return False ================================================ FILE: armi/physics/neutronics/fissionProductModel/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/physics/neutronics/fissionProductModel/tests/test_fissionProductModel.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test the fission product module to ensure all FP are available.""" import unittest from armi.physics.neutronics.fissionProductModel import fissionProductModel from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import ( CONF_FISSION_PRODUCT_LIBRARY_NAME, CONF_FP_MODEL, ) from armi.physics.neutronics.fissionProductModel.tests import test_lumpedFissionProduct from armi.physics.neutronics.isotopicDepletion.isotopicDepletionInterface import ( isDepletable, ) from armi.reactor.flags import Flags from armi.reactor.tests.test_reactors import ( buildOperatorOfEmptyHexBlocks, loadTestReactor, ) class TestFPMLumpedFP(unittest.TestCase): """ Tests the fission product model interface behavior when lumped fission products are enabled. Notes ----- This loads the global fission products from a file stream. """ def setUp(self): o = buildOperatorOfEmptyHexBlocks() o.removeAllInterfaces() self.fpModel = fissionProductModel.FissionProductModel(o.r, o.cs) o.addInterface(self.fpModel) # Load the fission products from a file stream. dummyLFPs = test_lumpedFissionProduct.getDummyLFPFile() self.fpModel.setGlobalLumpedFissionProducts(dummyLFPs.createLFPsFromFile()) # Set up the global LFPs and check that they are setup. self.fpModel.interactBOL() self.assertTrue(self.fpModel._useGlobalLFPs) def test_loadGlobalLFPsFromFile(self): """Tests that loading lumped fission products from a file.""" self.assertEqual(len(self.fpModel._globalLFPs), 3) lfps = self.fpModel.getGlobalLumpedFissionProducts() self.assertIn("LFP39", lfps) def test_getAllFissionProductNames(self): """Tests retrieval of the fission product names within all the lumped fission products of the core.""" fissionProductNames = self.fpModel.getAllFissionProductNames() self.assertGreater(len(fissionProductNames), 5) self.assertIn("XE135", fissionProductNames) def test_fpApplication(self): o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") fpModel = fissionProductModel.FissionProductModel(o.r, o.cs) # Set up the global LFPs and check that they are setup. self.assertTrue(fpModel._useGlobalLFPs) fpModel.interactBOL() for b in r.core.iterBlocks(): if b.isFuel(): self.assertTrue(b._lumpedFissionProducts is not None) else: self.assertTrue(b._lumpedFissionProducts is None) # now check if all depletable blocks do not have all nuclides if not detailedAxialExpansion fpModel.allBlocksNeedAllNucs = False fpModel.interactBOL() allNucsInProblem = set(r.blueprints.allNuclidesInProblem) for b in r.core.iterBlocks(): if isDepletable(b): if len(allNucsInProblem - set(b.getNuclides())) > 0: break else: self.assertTrue(False, "All blocks have all nuclides!") class TestFPMExplicitMC2Lib(unittest.TestCase): """ Tests the fission product model interface behavior when explicit fission products are enabled. These tests can use a smaller test reactor, and so will be faster. """ def setUp(self): o, r = loadTestReactor( customSettings={ CONF_FP_MODEL: "explicitFissionProducts", CONF_FISSION_PRODUCT_LIBRARY_NAME: "MC2-3", }, inputFileName="smallestTestReactor/armiRunSmallest.yaml", ) self.r = r self.nuclideBases = self.r.nuclideBases self.fpModel = fissionProductModel.FissionProductModel(o.r, o.cs) # Set up the global LFPs and check that they are setup. self.assertFalse(self.fpModel._useGlobalLFPs) def test_nuclideFlags(self): """Test that the nuclide flags contain the set of MC2-3 modeled nuclides.""" # Run the ``interactBOL`` here to trigger setting up the fission # products in the reactor data model. self.fpModel.interactBOL() for nb in self.nuclideBases.byMcc3Id.values(): self.assertIn(nb.name, self.r.blueprints.nuclideFlags.keys()) def test_nuclidesInModelFuel(self): """Test that the fuel blocks contain all the MC2-3 modeled nuclides.""" # Run the ``interactBOL`` here to trigger setting up the fission # products in the reactor data model. self.fpModel.interactBOL() b = self.r.core.getFirstBlock(Flags.FUEL) nuclideList = b.getNuclides() for nb in self.nuclideBases.byMcc3Id.values(): self.assertIn(nb.name, nuclideList) class TestFPMExplicitMC2LibSlow(unittest.TestCase): """ Tests the fission product model interface behavior when explicit fission products are enabled. These tests require a large test reactor, and will lead to slower tests. """ def setUp(self): o, r = loadTestReactor( customSettings={ CONF_FP_MODEL: "explicitFissionProducts", CONF_FISSION_PRODUCT_LIBRARY_NAME: "MC2-3", } ) self.r = r self.nuclideBases = self.r.nuclideBases self.fpModel = fissionProductModel.FissionProductModel(o.r, o.cs) # Set up the global LFPs and check that they are setup. self.assertFalse(self.fpModel._useGlobalLFPs) def test_nuclidesInModelAllDepletableBlocks(self): """Test that the depletable blocks contain all the MC2-3 modeled nuclides.""" # Check that there are some fuel and control blocks in the core model. fuelBlocks = self.r.core.getBlocks(Flags.FUEL) controlBlocks = self.r.core.getBlocks(Flags.CONTROL) self.assertGreater(len(fuelBlocks), 0) self.assertGreater(len(controlBlocks), 0) # prove that the control blocks are not depletable for b in controlBlocks: self.assertFalse(isDepletable(b)) # as a corrolary of the above, prove that no components in the control blocks are depletable for b in controlBlocks: for c in b.getComponents(): self.assertFalse(isDepletable(c)) # Force the the first component in the control blocks # to be labeled as depletable for checking that explicit # fission products can be assigned. for b in controlBlocks: c = b.getComponents()[0] c.p.flags |= Flags.DEPLETABLE # now each control block should be depletable for b in controlBlocks: self.assertTrue(isDepletable(b)) # as a corrolary of the above, prove that only the first component in each control block is depletable for b in controlBlocks: comps = list(b.getComponents()) for i, c in enumerate(comps): if i == 0: self.assertTrue(isDepletable(c)) else: self.assertFalse(isDepletable(c)) # Run the ``interactBOL`` here to trigger setting up the fission # products in the reactor data model. self.fpModel.interactBOL() # Check that the depletable blocks have all explicit # fission products in them. for b in self.r.core.iterBlocks(): nuclideList = b.getNuclides() if isDepletable(b): for nb in self.nuclideBases.byMcc3Id.values(): self.assertIn(nb.name, nuclideList) else: self.assertLess(len(b.getNuclides()), len(self.nuclideBases.byMcc3Id)) ================================================ FILE: armi/physics/neutronics/fissionProductModel/tests/test_lumpedFissionProduct.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for lumpedFissionProduce module.""" import io import math import os import unittest from armi.context import RES from armi.nucDirectory.nuclideBases import NuclideBases from armi.physics.neutronics.fissionProductModel import ( REFERENCE_LUMPED_FISSION_PRODUCT_FILE, lumpedFissionProduct, ) from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import ( CONF_FP_MODEL, CONF_LFP_COMPOSITION_FILE_PATH, ) from armi.reactor.flags import Flags from armi.reactor.tests.test_reactors import buildOperatorOfEmptyHexBlocks from armi.settings import Settings LFP_TEXT = """LFP35 GE73 5.9000E-06 LFP35 GE74 1.4000E-05 LFP35 GE76 1.6000E-04 LFP35 AS75 8.9000E-05 LFP35 KR85 8.9000E-05 LFP35 MO99 8.9000E-05 LFP35 SM150 8.9000E-05 LFP35 XE135 8.9000E-05 LFP39 XE135 8.9000E-05 LFP38 XE135 8.9000E-05 """ def getDummyLFPFile(): return lumpedFissionProduct.FissionProductDefinitionFile(io.StringIO(LFP_TEXT)) class TestFissionProductDefinitionFile(unittest.TestCase): """Test of the fission product model.""" def setUp(self): self.fpd = getDummyLFPFile() self.nuclideBases = NuclideBases() def test_createLFPs(self): """Test of the fission product model creation.""" lfps = self.fpd.createLFPsFromFile() xe135 = self.nuclideBases.fromName("XE135") self.assertEqual(len(lfps), 3) self.assertIn("LFP35", lfps) for lfp in lfps.values(): self.assertIn(xe135, lfp) def test_createReferenceLFPs(self): """Test of the reference fission product model creation.""" with open(REFERENCE_LUMPED_FISSION_PRODUCT_FILE, "r") as LFP_FILE: LFP_TEXT = LFP_FILE.read() fpd = lumpedFissionProduct.FissionProductDefinitionFile(io.StringIO(LFP_TEXT)) fpd.fName = REFERENCE_LUMPED_FISSION_PRODUCT_FILE lfps = fpd.createLFPsFromFile() self.assertEqual(len(lfps), 5) LFP_IDS = [ "LFP35", "LFP38", "LFP39", "LFP40", "LFP41", ] for lfp_id in LFP_IDS: self.assertIn(lfp_id, lfps) mo99 = self.nuclideBases.fromName("MO99") ref_mo99_yields = [0.00091, 0.00112, 0.00099, 0.00108, 0.00101] for ref_fp_yield, lfp_id in zip(ref_mo99_yields, LFP_IDS): lfp = lfps[lfp_id] self.assertIn(mo99, lfp) error = math.fabs(ref_fp_yield - lfp[mo99]) / ref_fp_yield self.assertLess(error, 1e-6) class TestLFP(unittest.TestCase): """Test of the lumped fission product yields.""" def setUp(self): self.fpd = lumpedFissionProduct.FissionProductDefinitionFile(io.StringIO(LFP_TEXT)) self.nuclideBases = NuclideBases() def test_getYield(self): """Test of the yield of a fission product.""" xe135 = self.nuclideBases.fromName("XE135") lfp = self.fpd.createSingleLFPFromFile("LFP39") lfp[xe135] = 1.2 val3 = lfp[xe135] self.assertEqual(val3, 1.2) self.assertEqual(lfp[5], 0.0) def test_gaseousYieldFraction(self): lfp = self.fpd.createSingleLFPFromFile("LFP39") # This is equal to the Xe yield set in the dummy ``LFP_TEXT`` # data for these tests. self.assertEqual(lfp.getGaseousYieldFraction(), 8.9000e-05) def test_isGas(self): """Tests that a nuclide is a gas or not at STP based on its chemical phase.""" nb = self.nuclideBases.byName["H1"] self.assertTrue(lumpedFissionProduct.isGas(nb)) nb = self.nuclideBases.byName["H2"] self.assertTrue(lumpedFissionProduct.isGas(nb)) nb = self.nuclideBases.byName["H3"] self.assertTrue(lumpedFissionProduct.isGas(nb)) nb = self.nuclideBases.byName["U235"] self.assertFalse(lumpedFissionProduct.isGas(nb)) nb = self.nuclideBases.byName["O16"] self.assertTrue(lumpedFissionProduct.isGas(nb)) nb = self.nuclideBases.byName["XE135"] self.assertTrue(lumpedFissionProduct.isGas(nb)) class TestLFPCollection(unittest.TestCase): """Test of the fission product collection.""" def setUp(self): fpd = lumpedFissionProduct.FissionProductDefinitionFile(io.StringIO(LFP_TEXT)) self.lfps = fpd.createLFPsFromFile() self.nuclideBases = NuclideBases() def test_getAllFissionProductNames(self): """Test to ensure the fission product names are present.""" names = self.lfps.getAllFissionProductNames() self.assertIn("XE135", names) self.assertIn("KR85", names) def test_getAllFissionProductNuclideBases(self): """Test to ensure the fission product nuclide bases are present.""" clideBases = self.lfps.getAllFissionProductNuclideBases() xe135 = self.nuclideBases.fromName("XE135") kr85 = self.nuclideBases.fromName("KR85") self.assertIn(xe135, clideBases) self.assertIn(kr85, clideBases) def test_duplicate(self): """Test to ensure that when we duplicate, we don't adjust the original file.""" newLfps = self.lfps.duplicate() ba = self.nuclideBases.fromName("XE135") lfp1 = self.lfps["LFP39"] lfp2 = newLfps["LFP39"] v1 = lfp1[ba] lfp1[ba] += 1.3 # make sure copy doesn't change w/ first. v2 = lfp2[ba] self.assertEqual(v1, v2) def test_getNumberDensities(self): o = buildOperatorOfEmptyHexBlocks() b = next(o.r.core.iterBlocks(Flags.FUEL)) fpDensities = self.lfps.getNumberDensities(objectWithParentDensities=b) for fp in ["GE73", "GE74", "GE76", "AS75", "KR85", "MO99", "SM150", "XE135"]: self.assertEqual(fpDensities[fp], 0.0) # basic test reactor has no fission products in it def test_getMassFrac(self): with self.assertRaises(ValueError): self.lfps.getMassFrac(oldMassFrac=None) oldMassFrac = { "LFP35": 0.5, "LFP38": 0.2, "LFP39": 0.3, } newMassFracs = self.lfps.getMassFrac(oldMassFrac) refMassFrac = { "GE73": 0.0034703064077030933, "GE74": 0.00834728937688672, "GE76": 0.09797894499881823, "AS75": 0.053783069618403435, "KR85": 0.0609551394006646, "MO99": 0.07100169460812283, "SM150": 0.1076193196365748, "XE135": 0.5968442359528263, } for fp, newMassFrac in newMassFracs.items(): self.assertAlmostEqual(newMassFrac, refMassFrac[fp.name]) class TestLFPFromRefFile(unittest.TestCase): """Tests loading from the `referenceFissionProducts.dat` file.""" def test_fissionProductYields(self): """Test that the fission product yields for the lumped fission products sums to 2.0.""" cs = Settings() cs[CONF_FP_MODEL] = "infinitelyDilute" cs[CONF_LFP_COMPOSITION_FILE_PATH] = os.path.join(RES, "referenceFissionProducts.dat") self.lfps = lumpedFissionProduct.lumpedFissionProductFactory(cs) for lfp in self.lfps.values(): self.assertAlmostEqual(lfp.getTotalYield(), 2.0, places=3) class TestLFPExplicit(unittest.TestCase): """Tests loading fission products with explicit modeling.""" def test_explicitFissionProducts(self): """Tests that there are no lumped fission products added when the `explicitFissionProducts` model is enabled.""" cs = Settings() cs[CONF_FP_MODEL] = "explicitFissionProducts" self.lfps = lumpedFissionProduct.lumpedFissionProductFactory(cs) self.assertIsNone(self.lfps) class TestMo99LFP(unittest.TestCase): """Test of the fission product model from Mo99.""" def setUp(self): self.lfps = lumpedFissionProduct._buildMo99LumpedFissionProduct() def test_getAllFissionProductNames(self): """Test to ensure that Mo99 is present, but other FP are not.""" names = self.lfps.getAllFissionProductNames() self.assertIn("MO99", names) self.assertNotIn("KR85", names) self.assertAlmostEqual(self.lfps["LFP35"].getTotalYield(), 2.0) ================================================ FILE: armi/physics/neutronics/globalFlux/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Global flux solvers.""" RX_ABS_MICRO_LABELS = ["nGamma", "fission", "nalph", "np", "nd", "nt"] RX_PARAM_NAMES = ["rateCap", "rateFis", "rateProdN2n", "rateProdFis", "rateAbs"] ================================================ FILE: armi/physics/neutronics/globalFlux/globalFluxInterface.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The Global flux interface provide a base class for all neutronics tools that compute the neutron and/or photon flux. """ import math from typing import Dict, Optional import numpy as np from armi import interfaces, runLog from armi.physics import constants, executers, neutronics from armi.physics.neutronics.globalFlux import RX_ABS_MICRO_LABELS, RX_PARAM_NAMES from armi.reactor import geometry, reactors from armi.reactor.blocks import Block from armi.reactor.converters import geometryConverters, uniformMesh from armi.reactor.flags import Flags from armi.settings.caseSettings import Settings from armi.utils import getBurnSteps, getMaxBurnSteps, units ORDER = interfaces.STACK_ORDER.FLUX class GlobalFluxInterface(interfaces.Interface): """ A general abstract interface for global flux-calculating modules. Should be subclassed by more specific implementations. """ name = "GlobalFlux" # make sure to set this in subclasses purpose = "globalFlux" _ENERGY_BALANCE_REL_TOL = 1e-5 def __init__(self, r, cs): interfaces.Interface.__init__(self, r, cs) if self.cs["nCycles"] > 1000: self.cycleFmt = "04d" # produce ig0001.inp else: self.cycleFmt = "03d" # produce ig001.inp if getMaxBurnSteps(self.cs) > 10: self.nodeFmt = "03d" # produce ig001_001.inp else: self.nodeFmt = "1d" # produce ig001_1.inp. self._bocKeff = None # for tracking rxSwing self._setTightCouplingDefaults() def _setTightCouplingDefaults(self): """Enable tight coupling defaults for the interface. - allows users to set tightCoupling: true in settings without having to specify the specific tightCouplingSettings for this interface. - this is splt off from self.__init__ for testing """ if self.coupler is None and self.cs["tightCoupling"]: self.coupler = interfaces.TightCoupler("keff", 1.0e-4, self.cs["tightCouplingMaxNumIters"]) @staticmethod def getHistoryParams(): """Return parameters that will be added to assembly versus time history printouts.""" return ["detailedDpa", "detailedDpaPeak", "detailedDpaPeakRate"] def interactBOC(self, cycle=None): interfaces.Interface.interactBOC(self, cycle) self.r.core.p.rxSwing = 0.0 # zero out rxSwing until last time node. self.r.core.p.maxDetailedDpaThisCycle = 0.0 # zero out cumulative params self.r.core.p.dpaFullWidthHalfMax = 0.0 self.r.core.p.elevationOfACLP3Cycles = 0.0 self.r.core.p.elevationOfACLP7Cycles = 0.0 for b in self.r.core.iterBlocks(): b.p.detailedDpaThisCycle = 0.0 b.p.newDPA = 0.0 def interactEveryNode(self, cycle, node): """ Calculate flux, power, and keff for this cycle and node. Flux, power, and keff are generally calculated at every timestep to ensure flux is up to date with the reactor state. """ interfaces.Interface.interactEveryNode(self, cycle, node) self._setRxSwingRelatedParams() def interactCoupled(self, iteration): """Runs during a tightly-coupled physics iteration to updated the flux and power.""" interfaces.Interface.interactCoupled(self, iteration) self._setRxSwingRelatedParams() def _setRxSwingRelatedParams(self): """Set Params Related to Rx Swing.""" if self.r.p.timeNode == 0: # track boc uncontrolled keff for rxSwing param. self._bocKeff = self.r.core.p.keffUnc or self.r.core.p.keff # A 1 burnstep cycle would have 2 nodes, and the last node would be node index 1 (first is zero) lastNodeInCycle = getBurnSteps(self.cs)[self.r.p.cycle] if self.r.p.timeNode == lastNodeInCycle and self._bocKeff is not None: eocKeff = self.r.core.p.keffUnc or self.r.core.p.keff swing = (eocKeff - self._bocKeff) / (eocKeff * self._bocKeff) self.r.core.p.rxSwing = swing * units.ABS_REACTIVITY_TO_PCM runLog.info( f"BOC Uncontrolled keff: {self._bocKeff}, " f"EOC Uncontrolled keff: {self.r.core.p.keffUnc}, " f"Cycle Reactivity Swing: {self.r.core.p.rxSwing} pcm" ) def checkEnergyBalance(self): """Check that there is energy balance between the power generated and the specified power. .. impl:: Validate the energy generation matches user specifications. :id: I_ARMI_FLUX_CHECK_POWER :implements: R_ARMI_FLUX_CHECK_POWER This method checks that the global power computed from flux evaluation matches the global power specified from the user within a tolerance; if it does not, a ``ValueError`` is raised. The global power from the flux solve is computed by summing the block-wise power in the core. This value is then compared to the user-specified power and raises an error if relative difference is above :math:`10^{-5}`. """ powerGenerated = ( self.r.core.calcTotalParam("power", calcBasedOnFullObj=False, generationNum=2) / units.WATTS_PER_MW ) self.r.core.setPowerIfNecessary() specifiedPower = self.r.core.p.power / units.WATTS_PER_MW / self.r.core.powerMultiplier if not math.isclose(powerGenerated, specifiedPower, rel_tol=self._ENERGY_BALANCE_REL_TOL): raise ValueError( "The power generated in {} is {} MW, but the user specified power is {} MW.\n" "This indicates a software bug. Please report to the developers.".format( self.r.core, powerGenerated, specifiedPower ) ) def getIOFileNames(self, cycle, node, coupledIter=None, additionalLabel=""): """ Return the input and output file names for this run. Parameters ---------- cycle : int The cycle number node : int The burn node number (e.g. 0 for BOC, 1 for MOC, etc.) coupledIter : int, optional Coupled iteration number (for tightly-coupled cases) additionalLabel : str, optional An optional tag to the file names to differentiate them from another case. Returns ------- inName : str Input file name outName : str Output file name stdName : str Standard output file name """ timeId = "{0:" + self.cycleFmt + "}_{1:" + self.nodeFmt + "}" # build names with proper number of zeros if coupledIter is not None: timeId += "_{0:03d}".format(coupledIter) inName = self.cs.caseTitle + timeId.format(cycle, node) + "{}.{}.inp".format(additionalLabel, self.name) outName = self.cs.caseTitle + timeId.format(cycle, node) + "{}.{}.out".format(additionalLabel, self.name) stdName = outName.strip(".out") + ".stdout" return inName, outName, stdName def calculateKeff(self, label="keff"): """ Runs neutronics tool and returns keff without applying it to the reactor. Used for things like direct-eigenvalue reactivity coefficients and CR worth iterations. For anything more complicated than getting keff, clients should call ``getExecuter`` to build their case. """ raise NotImplementedError() class GlobalFluxInterfaceUsingExecuters(GlobalFluxInterface): """ A global flux interface that makes use of the ARMI Executer system to run. Using Executers is optional but seems to allow easy interoperability between the myriad global flux solvers in the world. If a new global flux solver does not fit easily into the Executer pattern, then it will be best to just start from the base GlobalFluxInterface rather than trying to adjust the Executer pattern to fit. Notes ----- This points library users to the Executer object, which is intended to provide commonly-used structure useful for many global flux plugins. """ def interactEveryNode(self, cycle, node): """ Calculate flux, power, and keff for this cycle and node. Flux, power, and keff are generally calculated at every timestep to ensure flux is up to date with the reactor state. """ executer = self.getExecuter(label=self.getLabel(self.cs.caseTitle, cycle, node)) executer.run() GlobalFluxInterface.interactEveryNode(self, cycle, node) def interactCoupled(self, iteration): """Runs during a tightly-coupled physics iteration to updated the flux and power.""" executer = self.getExecuter( label=self.getLabel(self.cs.caseTitle, self.r.p.cycle, self.r.p.timeNode, iteration) ) executer.run() GlobalFluxInterface.interactCoupled(self, iteration) def getTightCouplingValue(self): """Return the parameter value.""" if self.coupler.parameter == "keff": return self.r.core.p.keff if self.coupler.parameter == "power": scaledCorePowerDistribution = [] for a in self.r.core: scaledPower = [] assemPower = sum(b.p.power for b in a) for b in a: scaledPower.append(b.p.power / assemPower) scaledCorePowerDistribution.append(scaledPower) return scaledCorePowerDistribution return None @staticmethod def getOptionsCls(): """ Get a blank options object. Subclass this to allow generic updating of options. """ return GlobalFluxOptions @staticmethod def getExecuterCls(): return GlobalFluxExecuter def getExecuterOptions(self, label=None): """ Get an executer options object populated from current user settings and reactor. If you want to set settings more deliberately (e.g. to specify a cross section library rather than use an auto-derived name), use ``getOptionsCls`` and build your own. """ opts = self.getOptionsCls()(label) opts.fromUserSettings(self.cs) opts.fromReactor(self.r) return opts def getExecuter(self, options=None, label=None): """ Get executer object for performing custom client calcs. This allows plugins to update options in a somewhat generic way. For example, reactivity coefficients plugin may want to request adjoint flux. """ if options and label: raise ValueError( f"Cannot supply a label (`{label}`) and options at the same time. Apply label to options object first." ) opts = options or self.getExecuterOptions(label) executer = self.getExecuterCls()(options=opts, reactor=self.r) return executer def calculateKeff(self, label="keff"): """ Run global flux with current user options and just return keff without applying it. Used for things like direct-eigenvalue reactivity coefficients and CR worth iterations. """ executer = self.getExecuter(label=label) executer.options.applyResultsToReactor = False executer.options.calcReactionRatesOnMeshConversion = False output = executer.run() return output.getKeff() @staticmethod def getLabel(caseTitle, cycle, node, iteration=None): """ Make a label (input/output file name) for the executer based on cycle, node, iteration. Parameters ---------- caseTitle : str, required The caseTitle for the ARMI run cycle : int, required The cycle number node : int, required The time node index iteration : int, optional The coupled iteration index """ if iteration is not None: return f"{caseTitle}-flux-c{cycle}n{node}i{iteration}" else: return f"{caseTitle}-flux-c{cycle}n{node}" class GlobalFluxOptions(executers.ExecutionOptions): """Data structure representing common options in Global Flux Solvers. .. impl:: Options for neutronics solvers. :id: I_ARMI_FLUX_OPTIONS :implements: R_ARMI_FLUX_OPTIONS This class functions as a data structure for setting and retrieving execution options for performing flux evaluations, these options involve: * What sort of problem is to be solved, i.e. real/adjoint, eigenvalue/fixed-source, neutron/gamma, boundary conditions * Convergence criteria for iterative algorithms * Geometry type and mesh conversion details * Specific parameters to be calculated after flux has been evaluated These options can be retrieved by directly accessing class members. The options are set by specifying a :py:class:`Settings <armi.settings.caseSettings.Settings>` object and optionally specifying a :py:class:`Reactor <armi.reactor.reactors.Reactor>` object. Attributes ---------- adjoint : bool True if the ``CONF_NEUTRONICS_TYPE`` setting is set to ``adjoint`` or ``real``. calcReactionRatesOnMeshConversion : bool This option is used to recalculate reaction rates after a mesh conversion and remapping of neutron flux. This can be disabled in certain global flux implementations if reaction rates are not required, but by default it is enabled. eigenvalueProblem : bool Whether this is a eigenvalue problem or a fixed source problem includeFixedSource : bool This can happen in eig if Fredholm Alternative satisfied. photons : bool Run the photon/gamma uniform mesh converter? real : bool True if ``CONF_NEUTRONICS_TYPE`` setting is set to ``real``. aclpDoseLimit : float Dose limit in dpa used to position the above-core load pad (if one exists) boundaries : str External Neutronic Boundary Conditions. Reflective does not include axial. cs : Settings Settings for this run detailedAxialExpansion : bool Turn on detailed axial expansion? from settings dpaPerFluence : float A quick and dirty conversion that is used to get dpaPeak energyDepoCalcMethodStep : str For gamma transport/normalization epsEigenvalue : float Convergence criteria for calculating the eigenvalue in the global flux solver epsFissionSourceAvg : float Convergence criteria for average fission source, from settings epsFissionSourcePoint : float Convergence criteria for point fission source, from settings geomType : geometry.GeomType Reactor Core geometry type (HEX, RZ, RZT, etc) hasNonUniformAssems: bool Has any non-uniform assembly flags, from settings isRestart : bool Restart global flux case using outputs from last time as a guess kernelName : str The neutronics / depletion solver for global flux solve. loadPadElevation : float The elevation of the bottom of the above-core load pad (ACLP) from the bottom of the upper grid plate (in cm). loadPadLength : float The length of the load pad. Used to compute average and peak dose. maxOuters : int XY and Axial partial current sweep max outer iterations. savePhysicsFilesList : bool Is this timestamp in the list of savePhysicsFiles in the settings? symmetry : str Reactor symmetry: full core, third-core, etc xsKernel : str Lattice Physics Kernel, from settings """ def __init__(self, label: Optional[str] = None): executers.ExecutionOptions.__init__(self, label) # have defaults self.adjoint: bool = False self.calcReactionRatesOnMeshConversion: bool = True self.eigenvalueProblem: bool = True self.includeFixedSource: bool = False self.photons: bool = False self.real: bool = True # no defaults self.aclpDoseLimit: Optional[float] = None self.boundaries: Optional[str] = None self.cs: Optional[Settings] = None self.detailedAxialExpansion: Optional[bool] = None self.dpaPerFluence: Optional[float] = None self.energyDepoCalcMethodStep: Optional[str] = None self.epsEigenvalue: Optional[float] = None self.epsFissionSourceAvg: Optional[float] = None self.epsFissionSourcePoint: Optional[float] = None self.geomType: Optional[geometry.GeomType] = None self.hasNonUniformAssems: Optional[bool] = None self.isRestart: Optional[bool] = None self.kernelName: Optional[str] = None self.loadPadElevation: Optional[float] = None self.loadPadLength: Optional[float] = None self.maxOuters: Optional[int] = None self.savePhysicsFilesList: Optional[bool] = None self.symmetry: Optional[str] = None self.xsKernel: Optional[str] = None def fromUserSettings(self, cs: Settings): """ Map user input settings from cs to a set of specific global flux options. This is not required; these options can alternatively be set programmatically. """ from armi.physics.neutronics.settings import ( CONF_ACLP_DOSE_LIMIT, CONF_BOUNDARIES, CONF_DPA_PER_FLUENCE, CONF_EIGEN_PROB, CONF_LOAD_PAD_ELEVATION, CONF_LOAD_PAD_LENGTH, CONF_NEUTRONICS_KERNEL, CONF_RESTART_NEUTRONICS, CONF_XS_KERNEL, ) from armi.settings.fwSettings.globalSettings import ( CONF_DETAILED_AXIAL_EXPANSION, CONF_NON_UNIFORM_ASSEM_FLAGS, CONF_PHYSICS_FILES, ) self.kernelName = cs[CONF_NEUTRONICS_KERNEL] self.setRunDirFromCaseTitle(cs.caseTitle) self.isRestart = cs[CONF_RESTART_NEUTRONICS] self.adjoint = neutronics.adjointCalculationRequested(cs) self.real = neutronics.realCalculationRequested(cs) self.detailedAxialExpansion = cs[CONF_DETAILED_AXIAL_EXPANSION] self.hasNonUniformAssems = any([Flags.fromStringIgnoreErrors(f) for f in cs[CONF_NON_UNIFORM_ASSEM_FLAGS]]) self.eigenvalueProblem = cs[CONF_EIGEN_PROB] # dose/dpa specific (should be separate subclass?) self.dpaPerFluence = cs[CONF_DPA_PER_FLUENCE] self.aclpDoseLimit = cs[CONF_ACLP_DOSE_LIMIT] self.loadPadElevation = cs[CONF_LOAD_PAD_ELEVATION] self.loadPadLength = cs[CONF_LOAD_PAD_LENGTH] self.boundaries = cs[CONF_BOUNDARIES] self.xsKernel = cs[CONF_XS_KERNEL] self.cs = cs self.savePhysicsFilesList = cs[CONF_PHYSICS_FILES] def fromReactor(self, reactor: reactors.Reactor): self.geomType = reactor.core.geomType self.symmetry = reactor.core.symmetry cycleNodeStamp = f"{reactor.p.cycle:03d}{reactor.p.timeNode:03d}" if self.savePhysicsFilesList: self.savePhysicsFiles = cycleNodeStamp in self.savePhysicsFilesList else: self.savePhysicsFiles = False class GlobalFluxExecuter(executers.DefaultExecuter): """ A short-lived object that coordinates the prep, execution, and processing of a flux solve. There are many forms of global flux solves: * Eigenvalue/Fixed source * Adjoint/real * Diffusion/PN/SN/MC * Finite difference/nodal There are also many reasons someone might need a flux solve: * Update multigroup flux and power on reactor and compute keff * Just compute keff in a temporary perturbed state * Just compute flux and adjoint flux on a state to There may also be some required transformations when a flux solve is done: * Add/remove edge assemblies * Apply a uniform axial mesh There are also I/O performance complexities, including running on fast local paths and copying certain user-defined files back to the working directory on error or completion. Given all these options and possible needs for information from global flux, this class provides a unified interface to everything. .. impl:: Ensure the mesh in the reactor model is appropriate for neutronics solver execution. :id: I_ARMI_FLUX_GEOM_TRANSFORM :implements: R_ARMI_FLUX_GEOM_TRANSFORM The primary purpose of this class is perform geometric and mesh transformations on the reactor model to ensure a flux evaluation can properly perform. This includes: * Applying a uniform axial mesh for the 3D flux solve * Expanding symmetrical geometries to full-core if necessary * Adding/removing edge assemblies if necessary * Undoing any transformations that might affect downstream calculations """ def __init__(self, options: GlobalFluxOptions, reactor): executers.DefaultExecuter.__init__(self, options, reactor) self.options: GlobalFluxOptions self.geomConverters: Dict[str, geometryConverters.GeometryConverter] = {} def _performGeometryTransformations(self, makePlots=False): """ Apply geometry conversions to make reactor work in neutronics. There are two conditions where things must happen: 1. If you are doing finite-difference, you need to add the edge assemblies (fast). For this, we just modify the reactor in place 2. If you are doing detailed axial expansion, you need to average out the axial mesh (slow!) For this we need to create a whole copy of the reactor and use that. In both cases, we need to undo the modifications between reading the output and applying the result to the data model. See Also -------- _undoGeometryTransformations """ if any(self.geomConverters): raise RuntimeError( "The reactor has been transformed, but not restored to the original.\n" + "Geometry converter is set to {} \n.".format(self.geomConverters) + "This is a programming error and requires further investigation." ) neutronicsReactor = self.r converter = self.geomConverters.get("axial") if not converter: if self.options.detailedAxialExpansion or self.options.hasNonUniformAssems: converter = uniformMesh.converterFactory(self.options) converter.convert(self.r) neutronicsReactor = converter.convReactor if makePlots: converter.plotConvertedReactor() self.geomConverters["axial"] = converter if self.edgeAssembliesAreNeeded(): converter = self.geomConverters.get("edgeAssems", geometryConverters.EdgeAssemblyChanger()) converter.addEdgeAssemblies(neutronicsReactor.core) self.geomConverters["edgeAssems"] = converter self.r = neutronicsReactor def _undoGeometryTransformations(self): """ Restore original data model state and/or apply results to it. Notes ----- These transformations occur in the opposite order than that which they were applied in. Otherwise, the uniform mesh guy would try to add info to assem's on the source reactor that don't exist. See Also -------- _performGeometryTransformations """ geomConverter = self.geomConverters.get("edgeAssems") if geomConverter: geomConverter.scaleParamsRelatedToSymmetry( self.r.core, paramsToScaleSubset=self.options.paramsToScaleSubset ) # Resets the reactor core model to the correct symmetry and removes # stored attributes on the converter to ensure that there is # state data that is long-lived on the object in case the garbage # collector does not remove it. Additionally, this will reset the # global assembly counter. geomConverter.removeEdgeAssemblies(self.r.core) meshConverter = self.geomConverters.get("axial") if meshConverter: if self.options.applyResultsToReactor or self.options.hasNonUniformAssems: meshConverter.applyStateToOriginal() self.r = meshConverter._sourceReactor # Resets the stored attributes on the converter to # ensure that there is state data that is long-lived on the # object in case the garbage collector does not remove it. # Additionally, this will reset the global assembly counter. meshConverter.reset() # clear the converters in case this function gets called twice self.geomConverters = {} def edgeAssembliesAreNeeded(self) -> bool: """ True if edge assemblies are needed in this calculation. We only need them in finite difference cases that are not full core. """ return ( "FD" in self.options.kernelName and self.options.symmetry.domain == geometry.DomainType.THIRD_CORE and self.options.symmetry.boundary == geometry.BoundaryType.PERIODIC and self.options.geomType == geometry.GeomType.HEX ) class GlobalFluxResultMapper(interfaces.OutputReader): """ A short-lived class that maps neutronics output data to a reactor mode. Neutronics results can come from a file or a pipe or in memory. This is always subclassed for specific neutronics runs but contains some generic methods that are universally useful for any global flux calculation. These are mostly along the lines of information that can be derived from other information, like dpa rate coming from dpa deltas and cycle length. """ def getKeff(self): raise NotImplementedError() def clearFlux(self): """Delete flux on all blocks. Needed to prevent stale flux when partially reloading.""" for b in self.r.core.iterBlocks(): b.p.mgFlux = [] b.p.adjMgFlux = [] b.p.mgFluxGamma = [] b.p.extSrc = [] def _renormalizeNeutronFluxByBlock(self, renormalizationCorePower): """ Normalize the neutron flux within each block to meet the renormalization power. Parameters ---------- renormalizationCorePower: float Specified power to renormalize the neutron flux for using the isotopic energy generation rates on the cross section libraries (in Watts) See Also -------- getTotalEnergyGenerationConstants """ # update the block power param here as well so # the ratio/multiplications below are consistent currentCorePower = 0.0 for b in self.r.core.iterBlocks(): # The multi-group flux is volume integrated, so J/cm * n-cm/s gives units of Watts b.p.power = np.dot(b.getTotalEnergyGenerationConstants(), b.getIntegratedMgFlux()) b.p.flux = sum(b.getMgFlux()) currentCorePower += b.p.power powerRatio = renormalizationCorePower / currentCorePower runLog.info( "Renormalizing the neutron flux in {:<s} by a factor of {:<8.5e}, " "which is derived from the current core power of {:<8.5e} W and " "desired power of {:<8.5e} W".format(self.r.core, powerRatio, currentCorePower, renormalizationCorePower) ) for b in self.r.core.iterBlocks(): b.p.mgFlux *= powerRatio b.p.flux *= powerRatio b.p.fluxPeak *= powerRatio b.p.power *= powerRatio b.p.pdens = b.p.power / b.getVolume() def _updateDerivedParams(self): """Computes some params that are derived directly from flux and power parameters.""" for maxParamKey in ["percentBu", "pdens"]: maxVal = self.r.core.getMaxBlockParam(maxParamKey, Flags.FUEL) if maxVal != 0.0: self.r.core.p["max" + maxParamKey] = maxVal maxFlux = self.r.core.getMaxBlockParam("flux") self.r.core.p.maxFlux = maxFlux conversion = units.CM2_PER_M2 / units.WATTS_PER_MW for a in self.r.core: area = a.getArea() for b in a: b.p.arealPd = b.p.power / area * conversion a.p.arealPd = a.calcTotalParam("arealPd") self.r.core.p.maxPD = self.r.core.getMaxParam("arealPd") self._updateAssemblyLevelParams() def getDpaXs(self, b: Block): """Determine which cross sections should be used to compute dpa for a block. Parameters ---------- b: Block The block we want the cross sections for Returns ------- list : cross section values """ from armi.physics.neutronics.settings import ( CONF_DPA_XS_SET, CONF_GRID_PLATE_DPA_XS_SET, ) if self.cs[CONF_GRID_PLATE_DPA_XS_SET] and b.hasFlags(Flags.GRID_PLATE): dpaXsSetName = self.cs[CONF_GRID_PLATE_DPA_XS_SET] else: dpaXsSetName = self.cs[CONF_DPA_XS_SET] try: return constants.DPA_CROSS_SECTIONS[dpaXsSetName] except KeyError: raise KeyError("DPA cross section set {} does not exist".format(dpaXsSetName)) def getBurnupPeakingFactor(self, b: Block): """ Get the radial peaking factor to be applied to burnup and DPA for a Block. This may be informed by previous runs which used detailed pin reconstruction and rotation. In that case, it should be set on the cs setting ``burnupPeakingFactor``. Otherwise, it just takes the current flux peaking, which is typically conservatively high. Parameters ---------- b: Block The block we want the peaking factor for Returns ------- burnupPeakingFactor : float The peak/avg factor for burnup and DPA. """ burnupPeakingFactor = self.cs["burnupPeakingFactor"] if not burnupPeakingFactor and b.p.fluxPeak: burnupPeakingFactor = b.p.fluxPeak / b.p.flux elif not burnupPeakingFactor: # no peak available. Finite difference model? # Use 0.0 for peaking so that there isn't misuse of peaking values that don't actually have peaking applied. # Uet self.cs["burnupPeakingFactor"] or b.p.fluxPeak for different behavior burnupPeakingFactor = 0.0 return burnupPeakingFactor def updateDpaRate(self, blockList=None): """ Update state parameters that can be known right after the flux is computed. See Also -------- updateFluenceAndDpa : uses values computed here to update cumulative dpa """ if blockList is None: blockList = self.r.core.iterBlocks() hasDPA = False for b in blockList: xs = self.getDpaXs(b) hasDPA = True flux = b.getMgFlux() # n/cm^2/s dpaPerSecond = computeDpaRate(flux, xs) b.p.detailedDpaPeakRate = dpaPerSecond * self.getBurnupPeakingFactor(b) b.p.detailedDpaRate = dpaPerSecond if not hasDPA: return peakRate = self.r.core.getMaxBlockParam("detailedDpaPeakRate", typeSpec=Flags.GRID_PLATE, absolute=False) self.r.core.p.peakGridDpaAt60Years = peakRate * 60.0 * units.SECONDS_PER_YEAR # also update maxes at this point (since this runs at every timenode, not just those w/ depletion steps) self.updateMaxDpaParams() def updateMaxDpaParams(self): """ Update params that track the peak dpa. Only consider fuel because CRs, etc. aren't always reset. """ maxDpa = self.r.core.getMaxBlockParam("detailedDpaPeak", Flags.FUEL) self.r.core.p.maxdetailedDpaPeak = maxDpa self.r.core.p.maxDPA = maxDpa # add grid plate max maxGridDose = self.r.core.getMaxBlockParam("detailedDpaPeak", Flags.GRID_PLATE) self.r.core.p.maxGridDpa = maxGridDose def _updateAssemblyLevelParams(self): for a in self.r.core: totalAbs = 0.0 # for calculating assembly average k-inf totalSrc = 0.0 for b in a: totalAbs += b.p.rateAbs totalSrc += b.p.rateProdNet a.p.maxPercentBu = a.getMaxParam("percentBu") a.p.maxDpaPeak = a.getMaxParam("detailedDpaPeak") a.p.timeToLimit = a.getMinParam("timeToLimit", Flags.FUEL) a.p.buLimit = a.getMaxParam("buLimit") if totalAbs > 0: a.p.kInf = totalSrc / totalAbs # assembly average k-inf. def computeDpaRate(mgFlux, dpaXs): r""" Compute the DPA rate incurred by exposure of a certain flux spectrum. .. impl:: Compute DPA rates. :id: I_ARMI_FLUX_DPA :implements: R_ARMI_FLUX_DPA This method calculates DPA rates using the inputted multigroup flux and DPA cross sections. Displacements calculated by displacement cross-section: .. math:: :nowrap: \begin{aligned} \text{Displacement rate} &= \phi N_{\text{HT9}} \sigma \\ &= (\#/\text{cm}^2/s) \cdot (1/cm^3) \cdot (\text{barn})\\ &= (\#/\text{cm}^5/s) \cdot \text{(barn)} * 10^{-24} \text{cm}^2/\text{barn} \\ &= \#/\text{cm}^3/s \end{aligned} :: DPA rate = displacement density rate / (number of atoms/cc) = dr [#/cm^3/s] / (nHT9) [1/cm^3] = flux * barn * 1e-24 .. math:: \frac{\text{dpa}}{s} = \frac{\phi N \sigma}{N} = \phi * \sigma the number density of the structural material cancels out. It's in the macroscopic cross-section and in the original number of atoms. Parameters ---------- mgFlux : list multigroup neutron flux in #/cm^2/s dpaXs : list DPA cross section in barns to convolute with flux to determine DPA rate Returns ------- dpaPerSecond : float The dpa/s in this material due to this flux Raises ------ RuntimeError Negative dpa rate. """ displacements = 0.0 if len(mgFlux) != len(dpaXs): runLog.warning( "Multigroup flux of length {} is incompatible with dpa cross section of length {};" "dpa rate will be set do 0.0".format(len(mgFlux), len(dpaXs)), single=True, ) return displacements for flux, barns in zip(mgFlux, dpaXs): displacements += flux * barns dpaPerSecond = displacements * units.CM2_PER_BARN if dpaPerSecond < 0: runLog.warning( "Negative DPA rate calculated at {}".format(dpaPerSecond), single=True, label="negativeDpaPerSecond", ) # ensure physical meaning of dpaPerSecond, it is likely just slightly negative if dpaPerSecond < -1.0e-10: raise RuntimeError("Calculated DPA rate is substantially negative at {}".format(dpaPerSecond)) dpaPerSecond = 0.0 return dpaPerSecond def calcReactionRates(obj, keff, lib): r""" Compute 1-group reaction rates for this object (usually a block). .. impl:: Return the reaction rates for a given ArmiObject :id: I_ARMI_FLUX_RX_RATES :implements: R_ARMI_FLUX_RX_RATES This method computes 1-group reaction rates for the inputted :py:class:`ArmiObject <armi.reactor.composites.ArmiObject>` These reaction rates include: * fission * nufission * n2n * absorption Scatter could be added as well. This function is quite slow so it is skipped for now as it is uncommonly needed. Reaction rates are: .. math:: \Sigma \phi = \sum_{\text{nuclides}} \sum_{\text{energy}} \Sigma \phi The units of :math:`N \sigma \phi` are:: [#/bn-cm] * [bn] * [#/cm^2/s] = [#/cm^3/s] The group-averaged microscopic cross section is: .. math:: \sigma_g = \frac{\int_{E g}^{E_{g+1}} \phi(E) \sigma(E) dE}{\int_{E_g}^{E_{g+1}} \phi(E) dE} Parameters ---------- obj : Block The object to compute reaction rates on. Notionally this could be upgraded to be any kind of ArmiObject but with params defined as they are it currently is only implemented for a block. keff : float The keff of the core. This is required to get the neutron production rate correct via the neutron balance statement (since nuSigF has a 1/keff term). lib : XSLibrary Microscopic cross sections to use in computing the reaction rates. """ rate = {} for simple in RX_PARAM_NAMES: rate[simple] = 0.0 numberDensities = obj.getNumberDensities() for nucName, numberDensity in numberDensities.items(): if numberDensity == 0.0: continue nucrate = {} for simple in RX_PARAM_NAMES: nucrate[simple] = 0.0 nucMc = lib.getNuclide(nucName, obj.getMicroSuffix()) micros = nucMc.micros # absorption is fission + capture (no n2n here) mgFlux = obj.getMgFlux() for name in RX_ABS_MICRO_LABELS: for g, (groupFlux, xs) in enumerate(zip(mgFlux, micros[name])): dphi = numberDensity * groupFlux nucrate["rateAbs"] += dphi * xs if name != "fission": nucrate["rateCap"] += dphi * xs else: nucrate["rateFis"] += dphi * xs # scale nu by keff. nucrate["rateProdFis"] += dphi * xs * micros.neutronsPerFission[g] / keff for groupFlux, n2nXs in zip(mgFlux, micros.n2n): # this n2n xs is reaction based. Multiply by 2. dphi = numberDensity * groupFlux nucrate["rateProdN2n"] += 2.0 * dphi * n2nXs for simple in RX_PARAM_NAMES: if nucrate[simple]: rate[simple] += nucrate[simple] for paramName, val in rate.items(): obj.p[paramName] = val # put in #/cm^3/s vFuel = obj.getComponentAreaFrac(Flags.FUEL) if rate["rateFis"] > 0.0 else 1.0 obj.p.fisDens = rate["rateFis"] / vFuel obj.p.fisDensHom = rate["rateFis"] ================================================ FILE: armi/physics/neutronics/globalFlux/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/physics/neutronics/globalFlux/tests/test_globalFluxInterface.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for generic global flux interface.""" import unittest from unittest.mock import patch import numpy as np from armi import settings from armi.nuclearDataIO.cccc import isotxs from armi.physics.neutronics.globalFlux import globalFluxInterface from armi.physics.neutronics.settings import ( CONF_GRID_PLATE_DPA_XS_SET, CONF_XS_KERNEL, ) from armi.reactor import geometry from armi.reactor.blocks import HexBlock from armi.reactor.flags import Flags from armi.reactor.tests import test_blocks, test_reactors from armi.tests import ISOAA_PATH class MockReactorParams: def __init__(self): self.cycle = 1 self.timeNode = 2 class MockCoreParams: pass class MockCore: def __init__(self): # just pick a random geomType self.geomType = geometry.GeomType.CARTESIAN self.symmetry = "full" self.p = MockCoreParams() class MockReactor: def __init__(self): self.core = MockCore() self.o = None self.p = MockReactorParams() class MockGlobalFluxInterface(globalFluxInterface.GlobalFluxInterface): """ Add fake keff calc to a the general gf interface. This simulates a 1000 pcm keff increase over 1 step. """ def interactBOC(self, cycle=None): globalFluxInterface.GlobalFluxInterface.interactBOC(self, cycle=cycle) self.r.core.p.keff = 1.00 def interactEveryNode(self, cycle, node): globalFluxInterface.GlobalFluxInterface.interactEveryNode(self, cycle, node) self.r.core.p.keff = 1.01 class MockGlobalFluxWithExecuters(globalFluxInterface.GlobalFluxInterfaceUsingExecuters): def getExecuterCls(self): return MockGlobalFluxExecuter class MockGlobalFluxWithExecutersNonUniform(MockGlobalFluxWithExecuters): def getExecuterOptions(self, label=None): """Return modified executerOptions.""" opts = globalFluxInterface.GlobalFluxInterfaceUsingExecuters.getExecuterOptions(self, label=label) opts.hasNonUniformAssems = True # to increase test coverage return opts class MockGlobalFluxExecuter(globalFluxInterface.GlobalFluxExecuter): """Tests for code that uses Executers, which rely on OutputReaders to update state.""" def _readOutput(self): class MockOutputReader: def apply(self, r): r.core.p.keff += 0.01 def getKeff(self): return 1.05 return MockOutputReader() class TestGlobalFluxOptions(unittest.TestCase): """Tests for GlobalFluxOptions.""" def test_readFromSettings(self): """Test reading global flux options from case settings. .. test:: Tests GlobalFluxOptions. :id: T_ARMI_FLUX_OPTIONS_CS :tests: R_ARMI_FLUX_OPTIONS """ cs = settings.Settings() opts = globalFluxInterface.GlobalFluxOptions("neutronics-run") opts.fromUserSettings(cs) self.assertFalse(opts.adjoint) def test_readFromReactors(self): """Test reading global flux options from reactor objects. .. test:: Tests GlobalFluxOptions. :id: T_ARMI_FLUX_OPTIONS_R :tests: R_ARMI_FLUX_OPTIONS """ reactor = MockReactor() opts = globalFluxInterface.GlobalFluxOptions("neutronics-run") opts.fromReactor(reactor) self.assertEqual(opts.geomType, geometry.GeomType.CARTESIAN) self.assertFalse(opts.savePhysicsFiles) def test_savePhysicsFiles(self): reactor = MockReactor() opts = globalFluxInterface.GlobalFluxOptions("neutronics-run") # savePhysicsFilesList matches MockReactor parameters opts.savePhysicsFilesList = ["001002"] opts.fromReactor(reactor) self.assertTrue(opts.savePhysicsFiles) # savePhysicsFilesList does not match MockReactor parameters opts.savePhysicsFilesList = ["001000"] opts.fromReactor(reactor) self.assertFalse(opts.savePhysicsFiles) class TestGFI(unittest.TestCase): def test_computeDpaRate(self): """ Compute DPA and DPA rates from multi-group neutron flux and cross sections. .. test:: Compute DPA rates. :id: T_ARMI_FLUX_DPA :tests: R_ARMI_FLUX_DPA """ xs = [1, 2, 3] flx = [0.5, 0.75, 2] res = globalFluxInterface.computeDpaRate(flx, xs) self.assertEqual(res, 10**-24 * (0.5 + 1.5 + 6)) def test_interaction(self): """ Ensure the basic interaction hooks work. Check that a 1000 pcm rx swing is observed due to the mock. """ cs = settings.Settings() cs["burnSteps"] = 2 _o, r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") gfi = MockGlobalFluxInterface(r, cs) bocKeff = 1.1 r.core.p.keffUnc = 1.1 gfi.interactBOC() r.p.cycle, r.p.timeNode = 0, 0 gfi.interactEveryNode(0, 0) self.assertAlmostEqual(gfi._bocKeff, r.core.p.keffUnc) r.core.p.keffUnc = 1.05 r.p.cycle, r.p.timeNode = 0, 1 gfi.interactEveryNode(0, 1) # doesn't change since its not the first node self.assertAlmostEqual(gfi._bocKeff, bocKeff) r.core.p.keffUnc = 1.01 r.p.cycle, r.p.timeNode = 0, 2 gfi.interactEveryNode(0, 2) self.assertAlmostEqual(gfi._bocKeff, bocKeff) self.assertAlmostEqual(r.core.p.rxSwing, -1e5 * (1.1 - 1.01) / (1.1 * 1.01)) gfi.interactBOC(0) # now its zeroed at BOC self.assertAlmostEqual(r.core.p.rxSwing, 0) def test_getIOFileNames(self): cs = settings.Settings() gfi = MockGlobalFluxInterface(MockReactor(), cs) inf, _outf, _stdname = gfi.getIOFileNames(1, 2, 1) self.assertEqual(inf, "armi001_2_001.GlobalFlux.inp") def test_getHistoryParams(self): params = globalFluxInterface.GlobalFluxInterface.getHistoryParams() self.assertEqual(len(params), 3) self.assertIn("detailedDpa", params) def test_checkEnergyBalance(self): """Test energy balance check. .. test:: Block-wise power is consistent with reactor data model power. :id: T_ARMI_FLUX_CHECK_POWER :tests: R_ARMI_FLUX_CHECK_POWER """ cs = settings.Settings() _o, r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") gfi = MockGlobalFluxInterface(r, cs) self.assertEqual(gfi.checkEnergyBalance(), None) # Test when nameplate power doesn't equal sum of block power r.core.p.power = 1e-10 with self.assertRaises(ValueError): gfi.checkEnergyBalance() class TestGFIWithExecuters(unittest.TestCase): """Tests for the default global flux execution.""" @classmethod def setUpClass(cls): cls.cs = settings.Settings() cls.r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")[1] def setUp(self): self.r.core.p.keff = 1.0 self.gfi = MockGlobalFluxWithExecuters(self.r, self.cs) @patch("armi.physics.neutronics.globalFlux.globalFluxInterface.GlobalFluxExecuter._execute") @patch("armi.physics.neutronics.globalFlux.globalFluxInterface.GlobalFluxExecuter._performGeometryTransformations") def test_executerInteraction(self, mockGeometryTransform, mockExecute): """Run the global flux interface and executer though one time now. .. test:: Run the global flux interface to check that the mesh converter is called before the neutronics solver. :id: T_ARMI_FLUX_GEOM_TRANSFORM_ORDER :tests: R_ARMI_FLUX_GEOM_TRANSFORM """ call_order = [] mockGeometryTransform.side_effect = lambda *a, **kw: call_order.append(mockGeometryTransform) mockExecute.side_effect = lambda *a, **kw: call_order.append(mockExecute) gfi = self.gfi gfi.interactBOC() gfi.interactEveryNode(0, 0) self.assertEqual([mockGeometryTransform, mockExecute], call_order) def test_calculateKeff(self): self.assertEqual(self.gfi.calculateKeff(), 1.05) # set in mock def test_getExecuterCls(self): class0 = globalFluxInterface.GlobalFluxInterfaceUsingExecuters.getExecuterCls() self.assertEqual(class0, globalFluxInterface.GlobalFluxExecuter) def test_setTightCouplingDefaults(self): """Assert that tight coupling defaults are only set if cs["tightCoupling"]=True.""" self.assertIsNone(self.gfi.coupler) self._setTightCouplingTrue() self.assertEqual(self.gfi.coupler.parameter, "keff") self._setTightCouplingFalse() def test_getTightCouplingValue(self): """Test getTightCouplingValue returns the correct value for keff and type for power.""" self._setTightCouplingTrue() self.assertEqual(self.gfi.getTightCouplingValue(), 1.0) # set in setUp self.gfi.coupler.parameter = "power" for a in self.r.core: for b in a: b.p.power = 10.0 self.assertEqual( self.gfi.getTightCouplingValue(), self._getCouplingPowerDistributions(self.r.core), ) self._setTightCouplingFalse() @staticmethod def _getCouplingPowerDistributions(core): scaledPowers = [] for a in core: assemblyPower = sum(b.p.power for b in a) scaledPowers.append([b.p.power / assemblyPower for b in a]) return scaledPowers def _setTightCouplingTrue(self): self.cs["tightCoupling"] = True self.gfi._setTightCouplingDefaults() def _setTightCouplingFalse(self): self.cs["tightCoupling"] = False class TestGFIWithExecutersNonUniform(unittest.TestCase): """Tests for global flux execution with non-uniform assemblies.""" @classmethod def setUpClass(cls): cs = settings.Settings() _o, cls.r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") cls.r.core.p.keff = 1.0 cls.gfi = MockGlobalFluxWithExecutersNonUniform(cls.r, cs) @patch("armi.reactor.converters.uniformMesh.converterFactory") def test_executerInteractionNonUniformAssems(self, mockConverterFactory): """Run the global flux interface with non-uniform assemblies. This will serve as a broad end-to-end test of the interface, and also stress test the mesh issues with non-uniform assemblies. .. test:: Run the global flux interface to show the geometry converter is called when the nonuniform mesh option is used. :id: T_ARMI_FLUX_GEOM_TRANSFORM_CONV :tests: R_ARMI_FLUX_GEOM_TRANSFORM """ gfi = self.gfi gfi.interactBOC() gfi.interactEveryNode(0, 0) self.assertTrue(gfi.getExecuterOptions().hasNonUniformAssems) mockConverterFactory.assert_called() def test_calculateKeff(self): self.assertEqual(self.gfi.calculateKeff(), 1.05) # set in mock def test_getExecuterCls(self): class0 = globalFluxInterface.GlobalFluxInterfaceUsingExecuters.getExecuterCls() self.assertEqual(class0, globalFluxInterface.GlobalFluxExecuter) class TestGlobalFluxResultMapper(unittest.TestCase): """ Test that global flux result mappings run. Notes ----- This does not test that the flux mapping is correct. That has to be done at another level. """ def test_mapper(self): # Switch to MC2v2 setting to make sure the isotopic/elemental expansions are compatible with # actually doing some math using the ISOAA test microscopic library o, r = test_reactors.loadTestReactor( customSettings={CONF_XS_KERNEL: "MC2v2"}, inputFileName="smallestTestReactor/armiRunSmallest.yaml", ) applyDummyFlux(r) r.core.lib = isotxs.readBinary(ISOAA_PATH) mapper = globalFluxInterface.GlobalFluxResultMapper(cs=o.cs) mapper.r = r mapper._renormalizeNeutronFluxByBlock(100) self.assertAlmostEqual(r.core.calcTotalParam("power", generationNum=2), 100) mapper._updateDerivedParams() self.assertGreater(r.core.p.maxPD, 0.0) self.assertGreater(r.core.p.maxFlux, 0.0) mapper.updateDpaRate() block = r.core.getFirstBlock() self.assertGreater(block.p.detailedDpaRate, 0) self.assertEqual(block.p.detailedDpa, 0) mapper.clearFlux() self.assertEqual(len(block.p.mgFlux), 0) def test_getDpaXs(self): cs = settings.Settings() mapper = globalFluxInterface.GlobalFluxResultMapper(cs=cs) # test fuel block b = HexBlock("fuel", height=10.0) vals = mapper.getDpaXs(b) self.assertEqual(len(vals), 33) self.assertAlmostEqual(vals[0], 2345.69, 1) # build a grid plate block b = HexBlock("grid_plate", height=10.0) b.p.flags = Flags.GRID_PLATE self.assertTrue(b.hasFlags(Flags.GRID_PLATE)) # test grid plate block mapper.cs[CONF_GRID_PLATE_DPA_XS_SET] = "dpa_EBRII_PE16" vals = mapper.getDpaXs(b) self.assertEqual(len(vals), 33) self.assertAlmostEqual(vals[0], 2478.95, 1) # test null case mapper.cs[CONF_GRID_PLATE_DPA_XS_SET] = "fake" with self.assertRaises(KeyError): mapper.getDpaXs(b) def test_getBurnupPeakingFactor(self): cs = settings.Settings() mapper = globalFluxInterface.GlobalFluxResultMapper(cs=cs) # test fuel block mapper.cs["burnupPeakingFactor"] = 0.0 b = HexBlock("fuel", height=10.0) b.p.flux = 100.0 b.p.fluxPeak = 250.0 factor = mapper.getBurnupPeakingFactor(b) self.assertEqual(factor, 2.5) def test_getBurnupPeakingFactorZero(self): cs = settings.Settings() mapper = globalFluxInterface.GlobalFluxResultMapper(cs=cs) # test fuel block without any peaking factor set b = HexBlock("fuel", height=10.0) factor = mapper.getBurnupPeakingFactor(b) self.assertEqual(factor, 0.0) class TestGlobalFluxUtils(unittest.TestCase): def test_calcReactionRates(self): """ Test that the reaction rate code executes and sets a param > 0.0. .. test:: Return the reaction rates for a given ArmiObject. :id: T_ARMI_FLUX_RX_RATES :tests: R_ARMI_FLUX_RX_RATES """ b = test_blocks.loadTestBlock() test_blocks.applyDummyData(b) self.assertAlmostEqual(b.p.rateAbs, 0.0) globalFluxInterface.calcReactionRates(b, 1.01, b.core.lib) self.assertGreater(b.p.rateAbs, 0.0) vfrac = b.getComponentAreaFrac(Flags.FUEL) self.assertEqual(b.p.fisDens, b.p.rateFis / vfrac) self.assertEqual(b.p.fisDensHom, b.p.rateFis) def applyDummyFlux(r, ng=33): """Set arbitrary flux distribution on a Reactor.""" for b in r.core.iterBlocks(): b.p.power = 1.0 b.p.mgFlux = np.arange(ng, dtype=np.float64) ================================================ FILE: armi/physics/neutronics/isotopicDepletion/__init__.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This package houses helper tools that allow ARMI to communicate with external isotopic depletion programs.""" ================================================ FILE: armi/physics/neutronics/isotopicDepletion/crossSectionTable.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Module containing the CrossSectionTable class. The CrossSectionTable is useful for performing isotopic depletion analysis by storing one-group cross sections of interest to such an analysis. This used to live alongside the isotopicDepletionInterface, but that proved to be an unpleasant coupling between the ARMI composite model and the physics code contained therein. Separating it out at least means that the composite model doesn't need to import the isotopicDepletionInterface to function. """ import collections from typing import List import numpy as np from armi.nucDirectory import nucDir class CrossSectionTable(collections.OrderedDict): """ This is a set of one group cross sections for use with isotopicDepletion analysis. It can also double as a reaction rate table. XStable is indexed by nucNames (nG), (nF), (n2n), (nA), (nP) and (n3n) are expected the cross sections are returned in barns. """ rateTypes = ("nG", "nF", "n2n", "nA", "nP", "n3n") def __init__(self, *args, **kwargs): collections.OrderedDict.__init__(self, *args, **kwargs) self._name = None def setName(self, name): self._name = name def getName(self): return self._name def add(self, nucName, nG=0.0, nF=0.0, n2n=0.0, nA=0.0, nP=0.0, n3n=0.0): """ Add one group cross sections to the table. Parameters ---------- nucName : str nuclide name -- e.g. 'U235' nG : float (n,gamma) cross section in barns nF : float (n,fission) cross section in barns n2n : float (n,2n) cross section in barns nA : float (n,alpha) cross section in barns nP : float (n,proton) cross section in barns n3n : float (n,3n) cross section in barns """ xsData = {rateType: xs for rateType, xs in zip(self.rateTypes, [nG, nF, n2n, nA, nP, n3n])} nb = nucDir.nuclideBases.byName[nucName] mcnpNucName = int(nb.getMcnpId()) self[mcnpNucName] = xsData def addMultiGroupXS(self, nucName, microMultiGroupXS, mgFlux, totalFlux=None): """ Perform group collapse to one group cross sections and add to table. Parameters ---------- nucName: str nuclide name -- e.g. 'U235' microMultiGroupXS: XSCollection micro cross sections, typically a XSCollection from an ISOTXS mgFlux: list like The flux in each energy group totalFlux: float The total flux. Optional argument for increased speed if already available. """ totalFlux = totalFlux if totalFlux is not None else sum(mgFlux) xsTypes = ("nG", "nF", "n2n", "nA", "nP") mgCrossSections = ( microMultiGroupXS.nGamma, microMultiGroupXS.fission, microMultiGroupXS.n2n, microMultiGroupXS.nalph, microMultiGroupXS.np, ) oneGroupXS = np.asarray(mgCrossSections).dot(mgFlux) / totalFlux oneGroupXSbyName = {xsType: xs for xsType, xs in zip(xsTypes, oneGroupXS)} oneGroupXSbyName["n3n"] = 0.0 self.add(nucName, **oneGroupXSbyName) def hasValues(self): """Determines if there are non-zero values in this cross section table.""" return any(any(nuclideCrossSectionSet.values()) for nuclideCrossSectionSet in self.values()) def getXsecTable( self, headerFormat="$ xsecs for {}", tableFormat="\n{{mcnpId}} {nG:.5e} {nF:.5e} {n2n:.5e} {n3n:.5e} {nA:.5e} {nP:.5e}", ): """ Make a cross section table for external depletion physics code input decks. .. impl:: Generate a formatted cross section table. :id: I_ARMI_DEPL_TABLES1 :implements: R_ARMI_DEPL_TABLES Loops over the reaction rates stored as ``self`` to produce a string with the cross sections for each nuclide in the block. Cross sections may be populated by ``makeReactionRateTable``. The string will have a header with the table's name formatted according to ``headerFormat`` followed by rows for each unique nuclide/reaction combination, where each line is formatted according to ``tableFormat``. Parameters ---------- headerFormat: string (optional) This is the format in which the elements of the header with be returned -- i.e. if you use a .format() call with the case name you'll return a formatted list of strings. tableFormat: string (optional) This is the format in which the elements of the table with be returned -- i.e. if you use a .format() call with mcnpId, nG, nF, n2n, n3n, nA, and nP you'll get the format you want. If you use a .format() call with the case name you'll return a formatted list of string elements Results ------- output: list a list of string elements that together make a xsec card """ output = [headerFormat.format(self.getName())] for mcnpNucName in sorted(self.keys()): rxRates = self[mcnpNucName] dataToWrite = {rateType: rxRates[rateType] for rateType in self.rateTypes} if any(dataToWrite[rateType] for rateType in self.rateTypes): dataToWrite["mcnpId"] = mcnpNucName output.append(tableFormat.format(**dataToWrite)) return output def makeReactionRateTable(obj, nuclides: List = None): """ Generate a reaction rate table for given nuclides. Often useful in support of depletion. .. impl:: Generate a reaction rate table with entries for (nG), (nF), (n2n), (nA), and (nP) reactions. :id: I_ARMI_DEPL_TABLES0 :implements: R_ARMI_DEPL_TABLES For a given composite object ``obj`` and a list of nuclides ``nuclides`` in that object, call ``obj.getReactionRates()`` for each nuclide with a ``nDensity`` parameter of 1.0. If ``nuclides`` is not specified, use a list of all nuclides in ``obj``. This will reach upwards through the parents of ``obj`` to the associated :py:class:`~armi.reactor.reactors.Core` object and pull the ISOTXS library that is stored there. If ``obj`` does not belong to a ``Core``, a warning is printed. For each child of ``obj``, use the ISOTXS library and the cross-section ID for the associated block to produce a reaction rate dictionary in units of inverse seconds for the nuclide specified in the original call to ``obj.getReactionRates()``. Because ``nDensity`` was originally specified as 1.0, this dictionary actually represents the reaction rates per unit volume. If the nuclide is not in the ISOTXS library a warning is printed. Combine the reaction rates for all nuclides into a combined dictionary by summing together reaction rates of the same type on the same isotope from each of the children of ``obj``. If ``obj`` has a non-zero multi-group flux, sum the group-wise flux into the total flux and normalize the reaction rates by the total flux, producing a one-group macroscopic cross section for each reaction type on each nuclide. Store these values in a ``CrossSectionTable``. Parameters ---------- nuclides : list, optional list of nuclide names for which to generate the cross-section table. If absent, use all nuclides obtained by self.getNuclides(). Notes ----- This also used to do some caching on the block level but that has been removed and the calls to this may therefore need to be re-optimized. See Also -------- armi.physics.neutronics.isotopicDepletion.isotopicDepletionInterface.CrossSectionTable armi.reactor.composites.Composite.getReactionRates """ if nuclides is None: nuclides = obj.getNuclides() rxRates = {nucName: {rxName: 0 for rxName in CrossSectionTable.rateTypes} for nucName in nuclides} for armiObject in obj: for nucName in nuclides: rxnRates = armiObject.getReactionRates(nucName, nDensity=1.0) for rxName, rxRate in rxnRates.items(): rxRates[nucName][rxName] += rxRate crossSectionTable = CrossSectionTable() crossSectionTable.setName(obj.getName()) totalFlux = sum(obj.getIntegratedMgFlux()) if totalFlux: for nucName, nucRxRates in rxRates.items(): xSecs = {rxName: rxRate / totalFlux for rxName, rxRate in nucRxRates.items()} crossSectionTable.add(nucName, **xSecs) return crossSectionTable ================================================ FILE: armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """An abstract class for interfaces between ARMI and programs that simulate transmutation and decay.""" import collections from armi import interfaces from armi.nucDirectory import nuclideBases from armi.nuclearDataIO import xsLibraries from armi.physics.neutronics.isotopicDepletion.crossSectionTable import ( CrossSectionTable, ) from armi.reactor import composites from armi.reactor.flags import Flags def isDepletable(obj: composites.ArmiObject): """ Return True if obj or any child is flagged as DEPLETABLE. The DEPLETABLE flag is automatically set to True if any composition contains nuclides that are in the active nuclides list, unless flags are specifically set and DEPLETABLE is left out. This is often interpreted by depletion plugins as indicating which parts of the problem to apply depletion to. Analysts may want to turn on and off depletion in certain problems. For example, sometimes they want the control rods to deplete to figure out how often to replace them. Warning ------- The ``DEPLETABLE`` flag is automatically added to compositions that have active nuclides. If you explicitly define any flags at all, you must also manually include ``DEPLETABLE`` or else the objects will silently not deplete. Notes ----- The auto-flagging of ``DEPLETABLE`` happens in the construction of blueprints rather than in a plugin hook because the reactor is not available at the time the plugin hook runs. See Also -------- armi.reactor.blueprints.componentBlueprint.insertDepletableNuclideKeys """ return obj.hasFlags(Flags.DEPLETABLE) or obj.containsAtLeastOneChildWithFlags(Flags.DEPLETABLE) class AbstractIsotopicDepleter: """ Interact with a depletion code. This interface and subClasses deplete under a flux defined outside this interface The depletion in this analysis only depends on the flux, material vectors, nuclear data and continuous source and loss objects. The depleters derived from this abstract class use all the fission products ARMI can handle -- i.e. do not form lumped fission products. The class attribute _depleteByName contains a ARMI objects to deplete keyed by name. .. impl:: ARMI provides a base class to deplete isotopes. :id: I_ARMI_DEPL_ABC :implements: R_ARMI_DEPL_ABC This class provides some basic infrastructure typically needed in depletion calculations within the ARMI framework. It stores a reactor, operator, and case settings object, and also defines methods to store and retrieve the objects which should be depleted based on their names. """ name = None purpose = "depletion" def __init__(self, r=None, cs=None, o=None): self.r = r self.cs = cs self.o = o # ARMI objects to deplete keyed by name order is important for consistency in iterating through objects self._depleteByName = collections.OrderedDict() self.efpdToBurn = None self.allNuclidesInProblem = r.blueprints.allNuclidesInProblem if r else [] def addToDeplete(self, armiObj): """Add the object to the group of objects to be depleted.""" self._depleteByName[armiObj.getName()] = armiObj def setToDeplete(self, armiObjects): """Change the group of objects to deplete to the specified group.""" listOfTuples = [(obj.getName(), obj) for obj in armiObjects] self._depleteByName = collections.OrderedDict(listOfTuples) def getToDeplete(self): """Return objects to be depleted.""" return list(self._depleteByName.values()) def run(self): """ Submit depletion case with external solver to the cluster. In addition to running the physics kernel, this method calls the waitForJob method to wait for it job to finish. comm = MPI.COMM_SELF.Spawn(sys.executable,args=['cpi.py'],maxprocs=5) """ raise NotImplementedError def makeXsecTable( compositeName, xsType, mgFlux, isotxs, headerFormat="$ xsecs for {}", tableFormat="\n{mcnpId} {nG:.5e} {nF:.5e} {n2n:.5e} {n3n:.5e} {nA:.5e} {nP:.5e}", ): """ Make a cross section table for depletion physics input decks. Parameters ---------- armiObject: armiObject an armi object -- batch or block -- with a .p.xsType and a getMgFlux method activeNuclides: list a list of the nucNames of active isotopes isotxs: isotxs object headerFormat: string (optional) this is the format in which the elements of the header with be returned -- i.e. if you use a .format() call with the case name you'll return a formatted list of string elements tableFormat: string (optional) This is the format in which the elements of the table with be returned -- i.e. if you use a .format() call with mcnpId, nG, nF, n2n, n3n, nA, and nP you'll get the format you want. If you use a .format() call with the case name you'll return a formatted list of strings. Results ------- output: list a list of string elements that together make a xsec card See Also -------- crossSectionTable.makeCrossSectionTable Makes a table for arbitrary ArmiObjects """ xsTable = CrossSectionTable() if not xsType or not sum(mgFlux) > 0: return [] xsTable.setName(compositeName) totalFlux = sum(mgFlux) for nucLabel, nuc in isotxs.items(): if xsType != xsLibraries.getSuffixFromNuclideLabel(nucLabel): continue nucName = nuc.name nb = nuclideBases.byName[nucName] if isinstance(nb, (nuclideBases.LumpNuclideBase, nuclideBases.DummyNuclideBase)): continue microMultiGroupXS = isotxs[nucLabel].micros if not isinstance(nb, nuclideBases.NaturalNuclideBase): xsTable.addMultiGroupXS(nucName, microMultiGroupXS, mgFlux, totalFlux) return xsTable.getXsecTable(headerFormat=headerFormat, tableFormat=tableFormat) class AbstractIsotopicDepletionReader(interfaces.OutputReader): """Read number density output produced by the isotopic depletion.""" def read(self): """Read a isotopic depletion Output File and applies results to armi objects in the ``ToDepletion`` attribute. """ raise NotImplementedError class Csrc: """ Writes a continuous source term card in a depletion interface. Notes ----- The chemical vector is a dictionary of chemicals and their removal rate constant. This works like a decay constant. The isotopic vector is used to make a source material in continuous source definitions. This is also the base class for continuous loss cards. """ def __init__(self): self._chemicalVector = {} self._isotopicVector = {} self.defaultVector = {"0": 0} def setChemicalVector(self, chemicalVector): self._chemicalVector = chemicalVector def getChemicalVector(self): return self._chemicalVector def write(self): """Return a list of lines to write for a csrc card.""" raise NotImplementedError ================================================ FILE: armi/physics/neutronics/latticePhysics/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Initialization of the interfaces for running lattice physics calculations.""" # ruff: noqa: F401 import os from armi import interfaces, settings from armi.physics import neutronics from armi.utils import pathTools ORDER = interfaces.STACK_ORDER.CROSS_SECTIONS ================================================ FILE: armi/physics/neutronics/latticePhysics/latticePhysicsInterface.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Lattice Physics Interface. Parent classes for codes responsible for generating broad-group cross sections. """ import os from armi import interfaces, nuclearDataIO, runLog from armi.physics import neutronics from armi.physics.neutronics import LatticePhysicsFrequency from armi.physics.neutronics.const import CONF_CROSS_SECTION from armi.physics.neutronics.settings import ( CONF_CLEAR_XS, CONF_GEN_XS, CONF_LATTICE_PHYSICS_FREQUENCY, CONF_TOLERATE_BURNUP_CHANGE, CONF_XS_KERNEL, ) from armi.utils import safeCopy LATTICE_PHYSICS = "latticePhysics" def setBlockNeutronVelocities(r, neutronVelocities): """ Set the ``mgNeutronVelocity`` parameter for each block using the ``neutronVelocities`` dictionary data. Parameters ---------- r : Reactor A Reactor object, that we want to modify. neutronVelocities : dict Dictionary that is keyed with the ``representativeBlock`` XS IDs with values of multigroup neutron velocity data computed by MC2. Raises ------ ValueError Multi-group neutron velocities was not computed during the cross section calculation. """ for b in r.core.iterBlocks(): xsID = b.getMicroSuffix() if xsID not in neutronVelocities: raise ValueError( f"Cannot assign multi-group neutron velocity to {b} because it does not exist in the neutron " f"velocities dictionary with keys: {neutronVelocities.keys()}. The XS library does not contain data " f"for the {xsID} xsid." ) b.p.mgNeutronVelocity = neutronVelocities[b.getMicroSuffix()] class LatticePhysicsInterface(interfaces.Interface): """Class for interacting with lattice physics codes.""" purpose = LATTICE_PHYSICS def __init__(self, r, cs): interfaces.Interface.__init__(self, r, cs) # Set to True by default, but should be disabled when perturbed cross sections are generated. self._updateBlockNeutronVelocities = True self._burnupTolerance = self.cs[CONF_TOLERATE_BURNUP_CHANGE] self._oldXsIdsAndBurnup = {} self.executablePath = self._getExecutablePath() self.executableRoot = os.path.dirname(self.executablePath) self.includeGammaXS = neutronics.gammaTransportIsRequested(cs) or neutronics.gammaXsAreRequested(cs) self._latticePhysicsFrequency = LatticePhysicsFrequency[self.cs[CONF_LATTICE_PHYSICS_FREQUENCY]] def _getExecutablePath(self): raise NotImplementedError def interactBOL(self, cycle=0): """ Run the lattice physics code if ``genXS`` is set and update burnup groups. Generate new cross sections based off the case settings and the current state of the reactor if the lattice physics frequency is BOL. """ if self._latticePhysicsFrequency == LatticePhysicsFrequency.BOL: self.updateXSLibrary(cycle) def interactBOC(self, cycle=0): """ Run the lattice physics code if ``genXS`` is set and update burnup groups. Generate new cross sections based off the case settings and the current state of the reactor if the lattice physics frequency is BOC. Notes ----- :py:meth:`armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.interactBOC` also calls this if the ``runLatticePhysicsBeforeShuffling`` setting is True. This happens because branch searches may need XS. """ if self._latticePhysicsFrequency == LatticePhysicsFrequency.BOC: self.updateXSLibrary(cycle) def updateXSLibrary(self, cycle, node=None): """ Update the current XS library, either by creating or reloading one. Parameters ---------- cycle : int The cycle that is being processed. Used to name the library. node : int, optional The node that is being processed. Used to name the library. See Also -------- computeCrossSections : run lattice physics on the current reactor state no matter weather needed or not. """ runLog.important(f"Preparing XS for cycle {cycle}") representativeBlocks, xsIds = self._getBlocksAndXsIds() if self._newLibraryShouldBeCreated(cycle, representativeBlocks, xsIds): if self.cs[CONF_CLEAR_XS]: self.clearXS() self.computeCrossSections(blockList=representativeBlocks, xsLibrarySuffix=self._getSuffix(cycle)) self._renameExistingLibrariesForStatepoint(cycle, node) else: self.readExistingXSLibraries(cycle, node) self._checkInputs() def _renameExistingLibrariesForStatepoint(self, cycle, node): """Copy the existing neutron and/or gamma libraries into cycle-dependent files.""" safeCopy(neutronics.ISOTXS, nuclearDataIO.getExpectedISOTXSFileName(cycle, node)) if self.includeGammaXS: safeCopy( neutronics.GAMISO, nuclearDataIO.getExpectedGAMISOFileName(cycle=cycle, node=node, suffix=self._getSuffix(cycle)), ) safeCopy( neutronics.PMATRX, nuclearDataIO.getExpectedPMATRXFileName(cycle=cycle, node=node, suffix=self._getSuffix(cycle)), ) def _checkInputs(self): pass def readExistingXSLibraries(self, cycle, node): raise NotImplementedError def makeCycleXSFilesAsBaseFiles(self, cycle, node): raise NotImplementedError @staticmethod def _copyLibraryFilesForCycle(cycle, libFiles): runLog.extra(f"Current library files: {libFiles}") for baseName, cycleName in libFiles.items(): if not os.path.exists(cycleName): if not os.path.exists(baseName): raise ValueError( f"Neither {cycleName} nor {baseName} libraries exist. Either the current cycle library for " f"cycle {cycle} should exist or a base library is required to continue." ) runLog.info( f"Existing library {cycleName} for cycle {cycle} does not exist. The active library is {baseName}" ) else: runLog.info(f"Using {baseName} as an active library") if cycleName != baseName: safeCopy(cycleName, baseName) def _readGammaBinaries(self, lib, gamisoFileName, pmatrxFileName): raise NotImplementedError(f"Gamma cross sections not implemented in {self.cs[CONF_XS_KERNEL]}") def _writeGammaBinaries(self, lib, gamisoFileName, pmatrxFileName): raise NotImplementedError(f"Gamma cross sections not implemented in {self.cs[CONF_XS_KERNEL]}") def _getSuffix(self, cycle): return "" def interactEveryNode(self, cycle=None, node=None): """ Run the lattice physics code if ``genXS`` is set and update burnup groups. Generate new cross sections based off the case settings and the current state of the reactor if the lattice physics frequency is at least everyNode. If this is not a coupled calculation, or if cross sections are only being generated at everyNode, then we want to regenerate all cross sections here. If it _is_ a coupled calculation, and we are generating cross sections at coupled iterations, then keep the existing XS lib for now, adding any XS groups as necessary to ensure that all XS groups are covered. """ if self._latticePhysicsFrequency >= LatticePhysicsFrequency.everyNode: if not self.o.couplingIsActive() or self._latticePhysicsFrequency == LatticePhysicsFrequency.everyNode: self.r.core.lib = None self.updateXSLibrary(self.r.p.cycle, self.r.p.timeNode) def interactCoupled(self, iteration): """ Runs on coupled iterations to generate cross sections that are updated with the temperature state. Notes ----- This accounts for changes in cross section data due to temperature changes, which are important for cross section resonance effects and accurately characterizing Doppler constant and coefficient evaluations. For Standard and Equilibrium run types, this coupling iteration is limited to when the time node is equal to zero. The validity of this assumption lies in the expectation that these runs have consistent power, flow, and temperature conditions at all time nodes. For Snapshot run types, this assumption, in general, is invalidated as the requested reactor state may sufficiently differ from what exists on the database and where tight coupling is needed to capture temperature effects. .. warning:: For Standard and Equilibrium run types, if the reactor power, flow, and/or temperature state is expected to vary over the lifetime of the simulation, as could be the case with :ref:`detailed cycle histories <cycle-history>`, a custom subclass should be considered. Parameters ---------- iteration : int This is unused since cross sections are generated on a per-cycle basis. """ # always run for snapshots to account for temp effect of different flow or power statepoint targetFrequency = ( LatticePhysicsFrequency.firstCoupledIteration if iteration == 0 else LatticePhysicsFrequency.all ) if self._latticePhysicsFrequency >= targetFrequency: self.r.core.lib = None self.updateXSLibrary(self.r.p.cycle, self.r.p.timeNode) def clearXS(self): raise NotImplementedError def interactEOC(self, cycle=None): """ Interact at the end of a cycle. Force updating cross sections at the start of the next cycle. """ self.r.core.lib = None def computeCrossSections(self, baseList=None, forceSerial=False, xsLibrarySuffix="", blockList=None): """ Prepare a batch of inputs, execute them, and store results on reactor library. Parameters ---------- baseList : list a user-specified set of bases that will be run instead of calculating all of them forceSerial : bool, optional Will run on 1 processor in sequence instead of on many in parallel Useful for optimization/batch runs where every processor is on a different branch xsLibrarySuffix : str, optional A book-keeping suffix used in Doppler calculations blockList : list, optional List of blocks for which to generate cross sections. If None, representative blocks will be determined. """ self.r.core.lib = self._generateXsLibrary(baseList, forceSerial, xsLibrarySuffix, blockList) def _generateXsLibrary( self, baseList, forceSerial, xsLibrarySuffix, blockList, writers=None, purgeFP=True, ): raise NotImplementedError def _executeLatticePhysicsCalculation(self, returnedFromWriters, forceSerial): raise NotImplementedError def generateLatticePhysicsInputs(self, baseList, xsLibrarySuffix, blockList, xsWriters=None): """ Write input files for the generation of cross section libraries. Parameters ---------- baseList : list A list of cross-section id strings (e.g. AA, BC) that will be generated. Default: all in reactor xsLibrarySuffix : str A suffix added to the end of the XS file names such as 'voided' for voided XS. Default: Empty blockList : list The blocks to write inputs for. xsWriters : list, optional The specified writers to write the input files Returns ------- returnedFromWriters: list A list of what this specific writer instance returns for each representative block. It is the responsibility of the subclassed interface to implement. In many cases, it is the executing agent. """ returnedFromWriters = [] baseList = set(baseList or []) representativeBlocks = blockList or self.getRepresentativeBlocks() for repBlock in representativeBlocks: xsId = repBlock.getMicroSuffix() if not baseList or xsId in baseList: # write the step number to the info log runLog.info( "Creating input writer(s) for {0} with {1:65s} BU (%FIMA): {2:10.2f}".format( xsId, repBlock, repBlock.p.percentBu ) ) writers = self.getWriters(repBlock, xsLibrarySuffix, xsWriters) for writer in writers: fromWriter = writer.write() returnedFromWriters.append(fromWriter) return returnedFromWriters def getWriters(self, representativeBlock, xsLibrarySuffix, writers=None): """ Return valid lattice physics writer subclass(es). Parameters ---------- representativeBlock : Block A representative block object that can be created from a block collection. xsLibrarySuffix : str A suffix added to the end of the XS file names such as 'voided' for voided XS. Default: Empty writers : list of lattice physics writer objects, optional If the writers are known, they can be provided and constructed. Returns ------- writers : list A list of writers for the provided representative block. """ xsID = representativeBlock.getMicroSuffix() if writers: # Construct the writers that are provided writers = [ w( representativeBlock, r=self.r, externalCodeInterface=self, xsLibrarySuffix=xsLibrarySuffix, ) for w in writers ] else: geom = self.cs[CONF_CROSS_SECTION][xsID].geometry writers = self._getGeomDependentWriters(representativeBlock, xsID, geom, xsLibrarySuffix) return writers def _getGeomDependentWriters(self, representativeBlock, xsID, geom, xsLibrarySuffix): raise NotImplementedError def getReader(self): raise NotImplementedError def _newLibraryShouldBeCreated(self, cycle, representativeBlockList, xsIDs): """ Determines whether the cross section generator should be executed at this cycle. Criteria include: #. CONF_GEN_XS setting is turned on #. We are beyond any requested skipCycles (restart cycles) #. The blocks have changed burnup beyond the burnup threshold #. Lattice physics kernel (e.g. MC2) hasn't already been executed for this cycle (possible if it runs during fuel handling) """ executeXSGen = bool(self.cs[CONF_GEN_XS] and cycle >= self.cs["skipCycles"]) idsChangedBurnup = self._checkBurnupThresholds(representativeBlockList) if executeXSGen and not idsChangedBurnup: executeXSGen = False if self.r.core.hasLib(): # justification=r.core.lib property can raise exception or load pre-generated ISOTXS, but the interface # should have responsibility of loading XS's have already generated for this cycle (maybe during fuel # management). Should we update due to changes that occurred during fuel management? missing = set(xsIDs) - set(self.r.core.lib.xsIDs) if missing and not executeXSGen: runLog.info( f"Although a XS library {self.r.core.lib} exists on {self.r.core}, there are missing XS IDs " f"{missing} required. The XS generation on cycle {cycle} is not enabled, but will be run to " "generate these missing cross sections." ) executeXSGen = True elif missing: runLog.info( f"Although a XS library {self.r.core.lib} exists on {self.r.core}, there are missing XS IDs " f"{missing} required. These will be generated on cycle {cycle}." ) executeXSGen = True else: runLog.info( f"A XS library {self.r.core.lib} exists on {self.r.core} and contains the required XS data for XS " f"IDs {self.r.core.lib.xsIDs}. The generation of XS will be skipped." ) executeXSGen = False if executeXSGen: runLog.info(f"Cross sections will be generated on cycle {cycle} for the following XS IDs: {xsIDs}") else: runLog.info( f"Cross sections will not be generated on cycle {cycle}. The setting `{CONF_GEN_XS}` is " f"{self.cs[CONF_GEN_XS]} and `skipCycles` is {self.cs['skipCycles']}" ) return executeXSGen def _checkBurnupThresholds(self, blockList): """ Check to see if burnup has changed meaningfully. If there are, then the xs sets should be regenerated. Otherwise then go ahead and skip xs generation. This is motivated by the idea that during very long explicit equilibrium runs, it might save time to turn off xs generation at a certain point. Parameters ---------- blockList: iterable List of all blocks to examine Returns ------- idsChangedBurnup: bool flag regarding whether or not burnup changed substantially """ idsChangedBurnup = True if self._burnupTolerance > 0: idsChangedBurnup = False for b in blockList: xsID = b.getMicroSuffix() if xsID not in self._oldXsIdsAndBurnup: # Looks like a new ID was found that was not in the old ID's have to regenerate the cross-sections # this time around self._oldXsIdsAndBurnup[xsID] = b.p.percentBu idsChangedBurnup = True else: # The id was found. Now it is time to compare the burnups to determine if there has been enough # meaningful change between the runs buOld = self._oldXsIdsAndBurnup[xsID] buNow = b.p.percentBu if abs(buOld - buNow) > self._burnupTolerance: idsChangedBurnup = True # update the oldXs burnup to be the about to be newly generated xsBurnup self._oldXsIdsAndBurnup[xsID] = buNow runLog.important( f"Burnup has changed in xsID {xsID} from {buOld} to {buNow}. Recalculating Cross-sections" ) return idsChangedBurnup def _getProcessesPerNode(self): raise NotImplementedError def getRepresentativeBlocks(self): """Return a list of all blocks in the problem.""" xsGroupManager = self.getInterface("xsGroups") return xsGroupManager.representativeBlocks.values() # OrderedDict def _getBlocksAndXsIds(self): """Return blocks and their xsIds.""" blocks = self.getRepresentativeBlocks() return blocks, [b.getMicroSuffix() for b in blocks] def updatePhysicsCouplingControl(self): """ Disable XS update in equilibrium cases after a while. Notes ----- This is only relevant for equilibrium cases. We have to turn off XS updates after several cyclics or else the number densities will never converge. """ if self.r.core.p.cyclics >= self.cs["numCyclicsBeforeStoppingXS"]: self.enabled(False) runLog.important(f"Disabling {self} because numCyclics={self.r.core.p.cyclics}") ================================================ FILE: armi/physics/neutronics/latticePhysics/latticePhysicsWriter.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Lattice Physics Writer. Parent class for lattice physics writers. Seeks to provide access to common methods used by general lattice physics codes. """ import collections import math import numpy as np import ordered_set from armi import interfaces, runLog from armi.nucDirectory import nuclideBases from armi.physics import neutronics from armi.physics.neutronics.const import CONF_CROSS_SECTION from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import ( CONF_FP_MODEL, ) from armi.physics.neutronics.settings import ( CONF_GEN_XS, CONF_MINIMUM_FISSILE_FRACTION, CONF_MINIMUM_NUCLIDE_DENSITY, ) from armi.reactor import components from armi.reactor.flags import Flags from armi.settings.fwSettings.globalSettings import CONF_DETAILED_AXIAL_EXPANSION from armi.utils.customExceptions import warn_when_root # number of decimal places to round temperatures to in _groupNuclidesByTemperature _NUM_DIGITS_ROUND_TEMPERATURE = 3 # index of the temperature in the nuclide dictionary: {nuc: (density, temp, category)} _NUCLIDE_VALUES_TEMPERATURE_INDEX = 1 @warn_when_root def nuclideNameFoundMultipleTimes(nuclideName): return "Nuclide `{}' was found multiple times.".format(nuclideName) class LatticePhysicsWriter(interfaces.InputWriter): """ Parent class for creating the inputs for lattice physics codes. Contains methods for extracting all nuclides for a given problem. """ _SPACE = " " _SEPARATOR = " | " # Nuclide categories UNUSED_CATEGORY = "Unused" + 3 * _SPACE FUEL_CATEGORY = "Fuel" + 5 * _SPACE STRUCTURE_CATEGORY = "Structure" COOLANT_CATEGORY = "Coolant" + 2 * _SPACE FISSION_PRODUCT_CATEGORY = "Fission Product" # Nuclide attributes DEPLETABLE = "Depletable" + 4 * _SPACE UNDEPLETABLE = "Non-Depletable" REPRESENTED = "Represented" + 2 * _SPACE INF_DILUTE = "Inf Dilute" def __init__( self, representativeBlock, r=None, externalCodeInterface=None, xsLibrarySuffix="", generateExclusiveGammaXS=False, ): interfaces.InputWriter.__init__(self, r=r, externalCodeInterface=externalCodeInterface) self.cs = self.eci.cs self.block = representativeBlock if not isinstance(xsLibrarySuffix, str): raise TypeError("xsLibrarySuffix should be a string; got {}".format(type(xsLibrarySuffix))) self.xsLibrarySuffix = xsLibrarySuffix self.generateExclusiveGammaXS = generateExclusiveGammaXS if self.generateExclusiveGammaXS and not neutronics.gammaXsAreRequested(self.cs): raise ValueError("Invalid `{}` setting to generate gamma XS for {}.".format(CONF_GEN_XS, self.block)) self.xsId = representativeBlock.getMicroSuffix() self.xsSettings = self.cs[CONF_CROSS_SECTION][self.xsId] self.mergeIntoClad = self.xsSettings.mergeIntoClad self.mergeIntoFuel = self.xsSettings.mergeIntoFuel self.driverXsID = self.xsSettings.driverID self.numExternalRings = self.xsSettings.numExternalRings self.criticalBucklingSearchActive = self.xsSettings.criticalBuckling self.ductHeterogeneous = self.xsSettings.ductHeterogeneous self.traceIsotopeThreshold = self.xsSettings.traceIsotopeThreshold self.executeExclusive = self.xsSettings.xsExecuteExclusive self.priority = self.xsSettings.xsPriority self.maxAtomNumberToModelInfDilute = ( self.xsSettings.xsMaxAtomNumber if self.xsSettings.xsMaxAtomNumber is not None else 999 ) # would prefer this in 1D but its used in 0D in _writeSourceComposition self.minDriverDensity = self.xsSettings.minDriverDensity blockNeedsFPs = representativeBlock.getLumpedFissionProductCollection() is not None self.modelFissionProducts = blockNeedsFPs and self.cs[CONF_FP_MODEL] != "noFissionProducts" self.explicitFissionProducts = self.cs[CONF_FP_MODEL] == "explicitFissionProducts" self.diluteFissionProducts = blockNeedsFPs and self.cs[CONF_FP_MODEL] == "infinitelyDilute" self.minimumNuclideDensity = self.cs[CONF_MINIMUM_NUCLIDE_DENSITY] self.infinitelyDiluteDensity = self.minimumNuclideDensity self._unusedNuclides = set() self._allNuclideObjects = None def __repr__(self): suffix = " with Suffix:`{}`".format(self.xsLibrarySuffix) if self.xsLibrarySuffix else "" if self.generateExclusiveGammaXS: xsFlag = neutronics.GAMMA elif neutronics.gammaXsAreRequested(self.cs) and self._isGammaXSGenerationEnabled: xsFlag = neutronics.NEUTRONGAMMA else: xsFlag = neutronics.NEUTRON return "<{} - XS ID {} ({} XS){}>".format(self.__class__.__name__, self.xsId, xsFlag, suffix) def _writeTitle(self, fileObj): self._writeComment( fileObj, "ARMI generated case for caseTitle {}, block {}\n".format(self.cs.caseTitle, self.block), ) def write(self): raise NotImplementedError @property def _isSourceDriven(self): return bool(self.driverXsID) @property def _isGammaXSGenerationEnabled(self): """Gamma transport is not available generically across all lattice physic solvers.""" return False def _getAllNuclidesByTemperatureInC(self, component=None): """ Returns a dictionary where all nuclides in the block are grouped by temperature. Some lattice physics codes, like ``SERPENT`` create mixtures of nuclides at similar temperatures to construct a problem. The dictionary returned is of the form :: {temp1: {n1: (d1, temp1, category1), n2: (d2, temp1, category2)} temp2: {n3: (d3, temp2, category3), n4: (d4, temp2, category4)} ... } """ nuclides = self._getAllNuclideObjects(component) return _groupNuclidesByTemperature(nuclides) def _getAllNuclideObjects(self, component=None): """ Returns a single dictionary of all nuclides in the component. Calls :py:meth:`_getAllNuclidesByCategory`, which returns two dictionaries: one with just fission products and another with the remaining nuclides. This method just updates ``self._allNuclideObjects`` to contain the fission products as well. The dictionaries are structured with :py:class:`armi.nucDirectory.nuclideBases.NuclideBase` objects, with `(density, temperatureInC, and category)`` tuples for that nuclide object. """ nucs, fissProds = self._getAllNuclidesByCategory(component) nucs.update(fissProds) return nucs def _getAllNuclidesByCategory(self, component=None): """ Determine number densities and temperatures for each nuclide. Temperatures are a bit complex due to some special cases: Nuclides that build up like Pu239 have zero density at BOL but need cross sections. Nuclides like Mo99 are sometimes in structure and sometimes in lumped fission products. What temp to use? Nuclides like B-10 are in control blocks but these aren't candidates for XS creation. What temperature? To deal with this, we compute (flux-weighted) average temperatures of each nuclide based on its current component temperatures. """ dfpDensities = self._getDetailedFPDensities() ( coolantNuclides, fuelNuclides, structureNuclides, ) = self.r.core.getNuclideCategories() nucDensities = {} subjectObject = component or self.block depletableNuclides = nuclideBases.getDepletableNuclides(self.r.blueprints.activeNuclides, self.block) objNuclides = subjectObject.getNuclides() # If the explicit fission product model is enabled then the number densities # on the components will already contain all the nuclides required to be # modeled by the lattice physics writer. Otherwise, assume that `allNuclidesInProblem` # should be modeled. if self.explicitFissionProducts: # If detailed axial expansion is active, mapping between blocks occurs on uniform mesh # and this can cause blocks to have isotopes that they don't have cross sections for. # Fix this by adding all isotopes so they are present in lattice physics. if self.cs[CONF_DETAILED_AXIAL_EXPANSION]: nuclides = self.r.blueprints.allNuclidesInProblem else: nuclides = ordered_set.OrderedSet(sorted(objNuclides)) else: nuclides = self.r.blueprints.allNuclidesInProblem nuclides = nuclides.union(self.r.blueprints.nucsToForceInXsGen) numDensities = subjectObject.getNuclideNumberDensities(nuclides) for nucName, dens in zip(nuclides, numDensities): nuc = self.r.nuclideBases.byName[nucName] if isinstance(nuc, nuclideBases.LumpNuclideBase): continue # skip LFPs here but add individual FPs below. if isinstance(subjectObject, components.Component): if self.ductHeterogeneous and "Homogenized" in subjectObject.name: # Nuclide temperatures representing heterogeneous model component temperatures nucTemperatureInC = self._getAvgNuclideTemperatureInC(nucName) else: # Heterogeneous number densities and temperatures nucTemperatureInC = subjectObject.temperatureInC else: # Homogeneous number densities and temperatures nucTemperatureInC = self._getAvgNuclideTemperatureInC(nucName) density = max(dens, self.minimumNuclideDensity) if nuc in nucDensities: nuclideNameFoundMultipleTimes(nucName) dens, nucTemperatureInC, nucCategory = nucDensities[nuc] density = dens + density nucDensities[nuc] = (density, nucTemperatureInC, nucCategory) continue nucCategory = "" # Remove nuclides from detailed fission product dictionary if they are a part of the core materials # (e.g., Zr in the U10Zr which is at fuel temperature and Mo in HT9 which is at structure temp) if nuc in dfpDensities: density += dfpDensities[nuc] nucCategory += self.FISSION_PRODUCT_CATEGORY + self._SEPARATOR del dfpDensities[nuc] elif nucName in self._unusedNuclides: nucCategory += self.UNUSED_CATEGORY + self._SEPARATOR elif nucName in fuelNuclides: nucCategory += self.FUEL_CATEGORY + self._SEPARATOR elif nucName in coolantNuclides: nucCategory += self.COOLANT_CATEGORY + self._SEPARATOR elif nucName in structureNuclides: nucCategory += self.STRUCTURE_CATEGORY + self._SEPARATOR # Add additional `attributes` to the nuclide categories if nucName in objNuclides: nucCategory += self.REPRESENTED + self._SEPARATOR else: nucCategory += self.INF_DILUTE + self._SEPARATOR if nucName in depletableNuclides: nucCategory += self.DEPLETABLE else: nucCategory += self.UNDEPLETABLE nucDensities[nuc] = (density, nucTemperatureInC, nucCategory) if not self._isSourceDriven: nucDensities = self._adjustPuFissileDensity(nucDensities) fissionProductDensities = self._getDetailedFissionProducts(dfpDensities) if self._unusedNuclides: runLog.debug( "The following unused nuclides (defined in the loading file) are being added to {} at {} C: {}".format( subjectObject, self._getFuelTemperature(), list(self._unusedNuclides), ) ) # the sortFunc makes orders the nucideDensities and fissionProductDensities by name. sortFunc = lambda nb_data_tuple: nb_data_tuple[0].name nucDensities = collections.OrderedDict(sorted(nucDensities.items(), key=sortFunc)) fissionProductDensities = collections.OrderedDict(sorted(fissionProductDensities.items(), key=sortFunc)) return nucDensities, fissionProductDensities def _getAvgNuclideTemperatureInC(self, nucName): """Return the block fuel temperature and the nuclides average temperature in C.""" # Get the temperature of the nuclide in the block xsgm = self.getInterface("xsGroups") nucTemperatureInC = xsgm.getNucTemperature(self.xsId, nucName) if not nucTemperatureInC or math.isnan(nucTemperatureInC): # Assign the fuel temperature to the nuclide if it is None or NaN. nucTemperatureInC = self._getFuelTemperature() # NBD b/c the nuclide is not in problem. self._unusedNuclides.add(nucName) return nucTemperatureInC def _getFuelTemperature(self): fuelComponents = self.block.getComponents(Flags.FUEL) if not fuelComponents: fuelTemperatureInC = self.block.getAverageTempInC() else: fuelTemperatureInC = np.mean([fc.temperatureInC for fc in fuelComponents]) if not fuelTemperatureInC or math.isnan(fuelTemperatureInC): raise ValueError( "The fuel temperature of block {0} is {1} and is not valid".format(self.block, fuelTemperatureInC) ) return fuelTemperatureInC def _getDetailedFissionProducts(self, dfpDensities): """Return a dictionary of fission products not provided in the reactor blueprint nuclides. Notes ----- Assumes that all fission products are at the same temperature of the lumped fission product of U238 within the block. """ if self.cs[CONF_FP_MODEL] != "noFissionProducts": fissProductTemperatureInC = self._getAvgNuclideTemperatureInC("LFP38") return { fp: (dens, fissProductTemperatureInC, self.FISSION_PRODUCT_CATEGORY) for fp, dens in dfpDensities.items() } return {} def _getDetailedFPDensities(self): """ Expands the nuclides in the LFP based on their yields. Returns ------- dfpDensities : dict Detailed Fission Product Densities. keys are FP names, values are block number densities in atoms/bn-cm. Raises ------ IndexError The lumped fission products were not initialized on the blocks. """ dfpDensities = {} if not self.modelFissionProducts: return dfpDensities lfpCollection = self.block.getLumpedFissionProductCollection() if self.diluteFissionProducts: if lfpCollection is None: raise ValueError("Lumped fission products are not initialized. Did interactAll BOL run?") dfps = lfpCollection.getAllFissionProductNuclideBases() for individualFpBase in dfps: dfpDensities[individualFpBase] = self.minimumNuclideDensity else: # expand densities and sum dfpDensitiesByName = lfpCollection.getNumberDensities(self.block) # now, go through the list and make sure that there aren't any values less than the # minimumNuclideDensity; we need to keep trace amounts of nuclides in the problem for fpName, fpDens in dfpDensitiesByName.items(): fp = self.r.nuclideBases.byName[fpName] dfpDensities[fp] = max(fpDens, self.minimumNuclideDensity) return dfpDensities def _writeNuclide(self, fileObj, nuclide, density, nucTemperatureInC, category, xsIdSpecified=None): raise NotImplementedError @property def _isCriticalBucklingSearchActive(self): return self.criticalBucklingSearchActive def _writeComment(self, fileObj, msg): raise NotImplementedError() def _writeGroupStructure(self, fileObj): raise NotImplementedError() def _adjustPuFissileDensity(self, nucDensities): """ Checks if the minimum fissile composition is lower than the allowed minimum fissile fraction and adds additional Pu-239. Notes ----- We're going to increase the Pu-239 density to make the ratio of fissile mass to heavy metal mass equal to the target ``CONF_MINIMUM_FISSILE_FRACTION``:: minFrac = (fiss - old + new) / (hm - old + new) minFrac * (hm - old + new) = fiss - old + new minFrac * (hm - old) + old - fiss = new * (1 - minFrac) new = (minFrac * (hm - old) + old - fiss) / (1 - minFrac) where:: minFrac = ``CONF_MINIMUM_FISSILE_FRACTION`` setting fiss = fissile mass of block hm = heavy metal mass of block old = number density of Pu-239 before adjustment new = number density of Pu-239 after adjustment """ minFrac = self.cs[CONF_MINIMUM_FISSILE_FRACTION] fiss = sum(dens[0] for nuc, dens in nucDensities.items() if nuc.isFissile()) hm = sum(dens[0] for nuc, dens in nucDensities.items() if nuc.isHeavyMetal()) if fiss / hm < minFrac: pu239 = self.r.nuclideBases.byName["PU239"] old, temp, msg = nucDensities[pu239] new = (minFrac * (hm - old) + old - fiss) / (1 - minFrac) nucDensities[pu239] = (new, temp, msg) runLog.warning( f"Adjusting Pu-239 number densities in {self.block} from {old} to {new} " f"to meet minimum fissile fraction of {minFrac}." ) return nucDensities def _getDriverBlock(self): """Return the block that is driving the representative block for this writer.""" xsgm = self.getInterface("xsGroups") driverBlock = xsgm.representativeBlocks.get(self.driverXsID, None) if self.driverXsID != "" and driverBlock is None: msg = f"No representativeBlock found for driver XS ID {self.driverXsID} to use in {self}!" runLog.error(msg) raise ValueError(msg) return driverBlock def _groupNuclidesByTemperature(nuclides): """ Creates a dictionary of temperatures and nuclides at those temperatures. Nuclides is a dictionary with ``NuclideBase`` objects as keys, and the density, temperature, and category of those nuclides as values. Notes ----- The temperature will be rounded to a number of digits according to ``_NUM_DIGITS_ROUND_TEMPERATURE``, because the average temperature for each nuclide can vary down to numerical precision, i.e. 873.15 and 873.15000000001 """ tempDict = {} for nuclide, values in nuclides.items(): temperature = round(values[_NUCLIDE_VALUES_TEMPERATURE_INDEX], _NUM_DIGITS_ROUND_TEMPERATURE) if temperature not in tempDict: tempDict[temperature] = {nuclide: values} else: tempDict[temperature][nuclide] = values return tempDict ================================================ FILE: armi/physics/neutronics/latticePhysics/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/physics/neutronics/latticePhysics/tests/test_latticeInterface.py ================================================ # Copyright 2021 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test the Lattice Interface.""" import unittest from collections import OrderedDict from armi import settings from armi.nuclearDataIO.cccc import isotxs from armi.operators.operator import Operator from armi.physics.neutronics import LatticePhysicsFrequency from armi.physics.neutronics.crossSectionGroupManager import CrossSectionGroupManager from armi.physics.neutronics.latticePhysics.latticePhysicsInterface import ( LatticePhysicsInterface, ) from armi.physics.neutronics.settings import CONF_GEN_XS, CONF_GLOBAL_FLUX_ACTIVE from armi.reactor.assemblies import ( HexAssembly, grids, ) from armi.reactor.reactors import Core, Reactor from armi.reactor.tests.test_blocks import buildSimpleFuelBlock from armi.tests import ISOAA_PATH, mockRunLogs # As an interface, LatticePhysicsInterface must be subclassed to be used class LatticeInterfaceTester(LatticePhysicsInterface): def __init__(self, r, cs): self.name = "LatticeInterfaceTester" super().__init__(r, cs) def _getExecutablePath(self): return "/tmp/fake_path" def readExistingXSLibraries(self, cycle, node): pass class LatticeInterfaceTesterLibFalse(LatticeInterfaceTester): """Subclass setting _newLibraryShouldBeCreated = False.""" def _newLibraryShouldBeCreated(self, cycle, representativeBlockList, xsIDs): self.testVerification = True return False class TestLatticePhysicsInterfaceBase(unittest.TestCase): @classmethod def setUpClass(cls): # create empty reactor core cls.o = Operator(settings.Settings()) cls.o.r = Reactor("testReactor", None) cls.o.r.core = Core("testCore") # add an assembly with a single block cls.assembly = HexAssembly("testAssembly") cls.assembly.spatialGrid = grids.AxialGrid.fromNCells(1) cls.assembly.spatialGrid.armiObject = cls.assembly cls.assembly.add(buildSimpleFuelBlock()) # init and add interfaces cls.xsGroupInterface = CrossSectionGroupManager(cls.o.r, cls.o.cs) cls.o.addInterface(cls.xsGroupInterface) class TestLatticePhysicsInterface(TestLatticePhysicsInterfaceBase): """Test Lattice Physics Interface.""" @classmethod def setUpClass(cls): super().setUpClass() cls.latticeInterface = LatticeInterfaceTesterLibFalse(cls.o.r, cls.o.cs) cls.o.addInterface(cls.latticeInterface) def setUp(self): self.o.r.core.lib = "Nonsense" self.latticeInterface.testVerification = False def test_includeGammaXS(self): """Test that we can correctly flip the switch to calculate gamma XS.""" # The default operator here turns off Gamma XS generation self.assertFalse(self.latticeInterface.includeGammaXS) self.assertEqual(self.o.cs[CONF_GLOBAL_FLUX_ACTIVE], "Neutron") # but we can create an operator that turns on Gamma XS generation cs = settings.Settings().modified(newSettings={CONF_GLOBAL_FLUX_ACTIVE: "Neutron and Gamma"}) newOperator = Operator(cs) newLatticeInterface = LatticeInterfaceTesterLibFalse(newOperator.r, cs) self.assertTrue(newLatticeInterface.includeGammaXS) self.assertEqual(cs[CONF_GLOBAL_FLUX_ACTIVE], "Neutron and Gamma") def test_latticePhysicsInterface(self): """Super basic test of the LatticePhysicsInterface.""" self.assertEqual(self.latticeInterface._updateBlockNeutronVelocities, True) self.assertEqual(self.latticeInterface.executablePath, "/tmp/fake_path") self.assertEqual(self.latticeInterface.executableRoot, "/tmp") self.latticeInterface.updateXSLibrary(0) self.assertEqual(len(self.latticeInterface._oldXsIdsAndBurnup), 0) def test_interactBOL(self): """ Test interactBOL() with different update frequencies. Notes ----- Unlike other interactions, self.o.r.core.lib is not set to None at BOC, so this test uses self.testVerification instead. """ self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.never self.latticeInterface.interactBOL() self.assertFalse(self.latticeInterface.testVerification) self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode self.latticeInterface.interactBOL() self.assertFalse(self.latticeInterface.testVerification) self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.BOL self.latticeInterface.interactBOL() self.assertTrue(self.latticeInterface.testVerification) def test_interactBOC(self): """ Test interactBOC() with different update frequencies. Notes ----- Unlike other interactions, self.o.r.core.lib is not set to None at BOC, so this test uses self.testVerification instead. """ self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.BOL self.latticeInterface.interactBOC() self.assertFalse(self.latticeInterface.testVerification) self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode self.latticeInterface.interactBOC() self.assertFalse(self.latticeInterface.testVerification) self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.BOC self.latticeInterface.interactBOC() self.assertTrue(self.latticeInterface.testVerification) def test_interactEveryNode(self): """Test interactEveryNode() with different update frequencies.""" self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.BOC self.latticeInterface.interactEveryNode() self.assertEqual(self.o.r.core.lib, "Nonsense") self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode self.latticeInterface.interactEveryNode() self.assertIsNone(self.o.r.core.lib) def test_interactEveryNodeWhenCoupled(self): """ Test that the XS lib is not cleared when coupled iterations are turned on and XS will be generated during the coupled iterations. """ self.o.couplingIsActive = lambda: True self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration self.latticeInterface.interactEveryNode() self.assertEqual(self.o.r.core.lib, "Nonsense") self.o.couplingIsActive = lambda: False self.latticeInterface.interactEveryNode() self.assertIsNone(self.o.r.core.lib) def test_interactEveryNodeWhenCoupledButNot(self): """ Test that the XS lib is cleared when coupled iterations are turned on but the lattice physics frequency is not high enough. """ self.o.couplingIsActive = lambda: True self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration self.latticeInterface.interactEveryNode() self.assertEqual(self.o.r.core.lib, "Nonsense") self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode self.latticeInterface.interactEveryNode() self.assertIsNone(self.o.r.core.lib) def test_interactEveryNodeFirstCoupled(self): """Test interactEveryNode() with LatticePhysicsFrequency.firstCoupledIteration.""" self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration self.latticeInterface.interactEveryNode() self.assertIsNone(self.o.r.core.lib) def test_interactEveryNodeAll(self): """Test interactEveryNode() with LatticePhysicsFrequency.all.""" self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.all self.latticeInterface.interactEveryNode() self.assertIsNone(self.o.r.core.lib) def test_interactFirstCoupledIteration(self): """Test interactCoupled() with different update frequencies on first iteration.""" self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode self.latticeInterface.interactCoupled(iteration=0) self.assertEqual(self.o.r.core.lib, "Nonsense") self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration self.latticeInterface.interactCoupled(iteration=0) self.assertIsNone(self.o.r.core.lib) def test_interactAll(self): """Test interactCoupled() with different update frequencies on non-first iteration.""" self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration self.latticeInterface.interactCoupled(iteration=1) self.assertEqual(self.o.r.core.lib, "Nonsense") self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.all self.latticeInterface.interactCoupled(iteration=1) self.assertIsNone(self.o.r.core.lib) def test_getSuffix(self): self.assertEqual(self.latticeInterface._getSuffix(7), "") class TestLatticePhysicsLibraryCreation(TestLatticePhysicsInterfaceBase): """Test variations of _newLibraryShouldBeCreated.""" @classmethod def setUpClass(cls): super().setUpClass() cls.latticeInterface = LatticeInterfaceTester(cls.o.r, cls.o.cs) cls.o.addInterface(cls.latticeInterface) cls.xsGroupInterface.representativeBlocks = OrderedDict({"AA": cls.assembly[0]}) cls.b, cls.xsIDs = cls.latticeInterface._getBlocksAndXsIds() def setUp(self): """Reset representativeBlocks and CONF_GEN_XS.""" self.xsGroupInterface.representativeBlocks = OrderedDict({"AA": self.assembly[0]}) self.assembly[0].p.xsType = "A" self.o.cs[CONF_GEN_XS] = "" self.o.r.core.lib = isotxs.readBinary(ISOAA_PATH) def test_libCreation_NoGenXS(self): """No ISOTXS and xs gen not requested.""" self.o.r.core.lib = None with mockRunLogs.BufferLog() as mock: xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, self.b, self.xsIDs) self.assertIn("Cross sections will not be generated on cycle 1.", mock.getStdout()) self.assertFalse(xsGen) def test_libCreation_GenXS(self): """No ISOTXS and xs gen requested.""" self.o.cs[CONF_GEN_XS] = "Neutron" self.o.r.core.lib = None with mockRunLogs.BufferLog() as mock: xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, self.b, self.xsIDs) self.assertIn( "Cross sections will be generated on cycle 1 for the following XS IDs: ['AA']", mock.getStdout(), ) self.assertTrue(xsGen) def test_libCreation_NoGenXS_2(self): """ISOTXS present and has all of the necessary information.""" with mockRunLogs.BufferLog() as mock: xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, self.b, self.xsIDs) self.assertIn( "The generation of XS will be skipped.", mock.getStdout(), ) self.assertFalse(xsGen) def test_libCreation_GenXS_2(self): """ISOTXS present and does not have all of the necessary information.""" self.xsGroupInterface.representativeBlocks = OrderedDict({"BB": self.assembly[0]}) b, xsIDs = self._modifyXSType() with mockRunLogs.BufferLog() as mock: xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, b, xsIDs) self.assertIn( "is not enabled, but will be run to generate these missing cross sections.", mock.getStdout(), ) self.assertTrue(xsGen) def test_libCreation_GenXS_3(self): """ISOTXS present and does not have all of the necessary information.""" self.o.cs[CONF_GEN_XS] = "Neutron" b, xsIDs = self._modifyXSType() with mockRunLogs.BufferLog() as mock: xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, b, xsIDs) self.assertIn("These will be generated on cycle ", mock.getStdout()) self.assertTrue(xsGen) def _modifyXSType(self): self.xsGroupInterface.representativeBlocks = OrderedDict({"BB": self.assembly[0]}) self.assembly[0].p.xsType = "B" return self.latticeInterface._getBlocksAndXsIds() ================================================ FILE: armi/physics/neutronics/latticePhysics/tests/test_latticeWriter.py ================================================ # Copyright 2021 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test the Lattice Physics Writer.""" import unittest from collections import defaultdict from armi.physics.neutronics.const import CONF_CROSS_SECTION from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import ( CONF_FP_MODEL, ) from armi.physics.neutronics.latticePhysics.latticePhysicsInterface import ( setBlockNeutronVelocities, ) from armi.physics.neutronics.latticePhysics.latticePhysicsWriter import ( LatticePhysicsWriter, ) from armi.physics.neutronics.settings import ( CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION, CONF_XS_BLOCK_REPRESENTATION, ) from armi.testing import loadTestReactor from armi.tests import TEST_ROOT class FakeLatticePhysicsWriter(LatticePhysicsWriter): """LatticePhysicsWriter is abstract, so it must be subclassed to be tested.""" def __init__(self, block, r, eci): self.testOut = "" super(FakeLatticePhysicsWriter, self).__init__(block, r, eci, "", False) def write(self): pass def _writeNuclide(self, fileObj, nuclide, density, nucTemperatureInC, category, xsIdSpecified=None): pass def _writeComment(self, fileObj, msg): self.testOut += "\n" + str(msg) def _writeGroupStructure(self, fileObj): pass class TestLatticePhysicsWriter(unittest.TestCase): """Test Lattice Physics Writer.""" def setUp(self): self.o, self.r = loadTestReactor(TEST_ROOT) self.cs = self.o.cs self.cs[CONF_CROSS_SECTION].setDefaults( self.cs[CONF_XS_BLOCK_REPRESENTATION], self.cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION], ) self.block = self.r.core.getFirstBlock() self.w = FakeLatticePhysicsWriter(self.block, self.r, self.o) def test_setBlockNeutronVelocities(self): d = defaultdict(float) d["AA"] = 10.0 setBlockNeutronVelocities(self.r, d) tot = sum([b.p.mgNeutronVelocity for b in self.r.core.iterBlocks()]) self.assertGreater(tot, 3000.0) def test_latticePhysicsWriter(self): """Super basic test of the LatticePhysicsWriter.""" self.assertEqual(self.w.xsId, "AA") self.assertFalse(self.w.modelFissionProducts) self.assertEqual(self.w.driverXsID, "") self.assertAlmostEqual(self.w.minimumNuclideDensity, 1e-15, delta=1e-16) self.assertEqual(self.w.testOut, "") self.assertEqual(str(self.w), "<FakeLatticePhysicsWriter - XS ID AA (Neutron XS)>") self.w._writeTitle(None) self.assertIn("ARMI generated case for caseTitle armiRun", self.w.testOut) nucs = self.w._getAllNuclidesByTemperatureInC(None) self.assertEqual(len(nucs.keys()), 1) self.assertAlmostEqual(list(nucs.keys())[0], 450.0, delta=0.1) def test_writeTitle(self): self.w._writeTitle("test_writeTitle") self.assertIn("ARMI generated case for caseTitle", self.w.testOut) def test_isSourceDriven(self): self.assertFalse(self.w._isSourceDriven) self.w.driverXsID = True self.assertTrue(self.w._isSourceDriven) def test_isGammaXSGenerationEnabled(self): self.assertFalse(self.w._isGammaXSGenerationEnabled) def test_getAllNuclidesByTemperatureInCNone(self): nucsByTemp = self.w._getAllNuclidesByTemperatureInC(None) keys0 = list(nucsByTemp.keys()) self.assertEqual(len(keys0), 1) self.assertEqual(keys0[0], 450.0) keys1 = nucsByTemp[keys0[0]] self.assertGreater(len(keys1), 1) names = [k.name for k in keys1] self.assertIn("AM241", names) self.assertIn("U238", names) def test_getAllNuclidesByTemperatureInC(self): self.w.explicitFissionProducts = False c = self.r.core[0][0] nucsByTemp = self.w._getAllNuclidesByTemperatureInC(c) keys0 = list(nucsByTemp.keys()) self.assertEqual(len(keys0), 1) self.assertEqual(keys0[0], 450.0) keys1 = nucsByTemp[keys0[0]] self.assertGreater(len(keys1), 1) names = [k.name for k in keys1] self.assertIn("AM241", names) self.assertIn("U238", names) def test_getAllNuclidesByTempInCExplicitFisProd(self): self.w.explicitFissionProducts = True c = self.r.core[0][0] nucsByTemp = self.w._getAllNuclidesByTemperatureInC(c) keys0 = list(nucsByTemp.keys()) self.assertEqual(len(keys0), 1) self.assertEqual(keys0[0], 450.0) keys1 = nucsByTemp[keys0[0]] self.assertGreater(len(keys1), 1) names = [k.name for k in keys1] self.assertIn("AM241", names) self.assertIn("U238", names) def test_getAvgNuclideTemperatureInC(self): temp = self.w._getAvgNuclideTemperatureInC("U238") self.assertAlmostEqual(temp, 450, delta=0.001) temp = self.w._getAvgNuclideTemperatureInC("U235") self.assertAlmostEqual(temp, 450, delta=0.001) def test_getFuelTemperature(self): temp = self.w._getFuelTemperature() self.assertAlmostEqual(temp, 450, delta=0.001) def test_getDetailedFissionProducts(self): dfpDen = defaultdict(int) dfpDen["U238"] = 1.2 dfpDen["U235"] = 2.3 dfpDen["AM241"] = 3.4 prods = self.w._getDetailedFissionProducts(dfpDen) self.assertEqual(len(prods), 3) self.assertIn("U238", prods) self.assertIn("U235", prods) self.assertIn("AM241", prods) def test_getDetailedFissionProductsPass(self): self.cs[CONF_FP_MODEL] = "noFissionProducts" prods = self.w._getDetailedFissionProducts({}) self.assertEqual(len(prods), 0) def test_getDetailedFPDensities(self): self.w.modelFissionProducts = False dens = self.w._getDetailedFPDensities() self.assertEqual(len(dens), 0) self.w.modelFissionProducts = True with self.assertRaises(AttributeError): dens = self.w._getDetailedFPDensities() def test_isCriticalBucklingSearchActive(self): isActive = self.w._isCriticalBucklingSearchActive self.assertTrue(isActive) def test_getDriverBlock(self): self.w.driverXsID = "" b = self.w._getDriverBlock() self.assertIsNone(b) self.w.driverXsID = "AA" with self.assertRaises(ValueError): b = self.w._getDriverBlock() ================================================ FILE: armi/physics/neutronics/macroXSGenerationInterface.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Converts microscopic cross sections to macroscopic cross sections by multiplying by number density. .. math:: \Sigma_i = N_i \sigma_i """ from armi import context, interfaces, mpiActions, runLog from armi.nuclearDataIO import xsCollections from armi.physics.neutronics.settings import CONF_MINIMUM_NUCLIDE_DENSITY from armi.utils import getBurnSteps, iterables class MacroXSGenerator(mpiActions.MpiAction): """An action that can make macroscopic cross sections, even in parallel.""" def __init__( self, blocks, lib, buildScatterMatrix, libType, minimumNuclideDensity=0.0, ): mpiActions.MpiAction.__init__(self) self.buildScatterMatrix = buildScatterMatrix self.libType = libType self.lib = lib self.blocks = blocks self.minimumNuclideDensity = minimumNuclideDensity def __reduce__(self): # Prevent blocks and lib from being broadcast by passing None to ctor. Although lib must be broadcast, we need # to do it explicitly to correctly deal with the default lib=None argument in buildMacros(), which utilizes this # action. Default arguments make things more complicated. return ( MacroXSGenerator, ( None, None, self.buildScatterMatrix, self.libType, self.minimumNuclideDensity, ), ) def invokeHook(self): # logic here gets messy due to all the default arguments in the calling method. There exists a large number of # permutations to be handled. if context.MPI_RANK == 0: allBlocks = self.blocks if allBlocks is None: allBlocks = self.r.core.getBlocks() lib = self.lib or self.r.core.lib else: allBlocks = [] lib = None mc = xsCollections.MacroscopicCrossSectionCreator(self.buildScatterMatrix, self.minimumNuclideDensity) if context.MPI_SIZE > 1: myBlocks = self.scatterList(allBlocks) lib = context.MPI_COMM.bcast(lib, root=0) myMacros = [mc.createMacrosFromMicros(lib, b, libType=self.libType) for b in myBlocks] allMacros = self.gatherList(myMacros) else: allMacros = [mc.createMacrosFromMicros(lib, b, libType=self.libType) for b in allBlocks] if context.MPI_RANK == 0: for b, macro in zip(allBlocks, allMacros): b.macros = macro @staticmethod def scatterList(lst): """Helper functions for mpi communication.""" if context.MPI_RANK == 0: chunked = iterables.split(lst, context.MPI_SIZE) else: chunked = None return context.MPI_COMM.scatter(chunked, root=0) @staticmethod def gatherList(localList): """Helper functions for mpi communication.""" globalList = context.MPI_COMM.gather(localList, root=0) if context.MPI_RANK == 0: globalList = iterables.flatten(globalList) return globalList class MacroXSGenerationInterface(interfaces.Interface): """ Builds macroscopic cross sections on all Blocks. Notes ----- This probably should not be an interface since it has no interactXYZ methods. It should probably be converted to an MpiAction. """ name = "macroXsGen" def __init__(self, r, cs): interfaces.Interface.__init__(self, r, cs) self.macrosLastBuiltAt = None self.minimumNuclideDensity = cs[CONF_MINIMUM_NUCLIDE_DENSITY] def buildMacros( self, lib=None, bListSome=None, buildScatterMatrix=True, libType="micros", ): """ Builds block-level macroscopic cross sections for making diffusion equation matrices. This will use MPI if armi.context.MPI_SIZE > 1 Builds G-vectors of the basic XS ('nGamma','fission','nalph','np','n2n','nd','nt') Builds GxG matrices for scatter matrices .. impl:: Build macroscopic cross sections for blocks. :id: I_ARMI_MACRO_XS :implements: R_ARMI_MACRO_XS This method builds macroscopic cross sections for a user-specified set of blocks using a specified microscopic neutron or gamma cross section library. If no blocks are specified, cross sections are calculated for all blocks in the core. If no library is specified, the existing r.core.lib is used. The basic arithmetic involved in generating macroscopic cross sections consists of multiplying isotopic number densities by isotopic microscopic cross sections and summing over all isotopes in a composition. The calculation is implemented in:py:func:`computeMacroscopicGroupConstants <armi.nuclearDataIO.xsCollections.computeMacroscopicGroupConstants>`. This method uses an :py:class:`mpiAction <armi.mpiActions.MpiAction>` to distribute the work of calculating macroscopic cross sections across the worker processes. Parameters ---------- lib : library object , optional If lib is specified, then buildMacros will build macro XS using micro XS data from lib. If lib = None, then buildMacros will use the existing library self.r.core.lib. If that does not exist, then buildMacros will use a new nuclearDataIO.ISOTXS object. buildScatterMatrix : Boolean, optional If True, all macro XS will be built, including the time-consuming scatter matrix. If False, only the macro XS that are needed for fluxRecon.computePinMGFluxAndPower will be built. These include 'transport', 'fission', and a few others. No ng x ng matrices (such as 'scatter' or 'chi') will be built. Essentially, this option saves huge runtime for the fluxRecon module. libType : str, optional The block attribute containing the desired microscopic XS for this block: either "micros" for neutron XS or "gammaXS" for gamma XS. """ cycle = self.r.p.cycle burnSteps = getBurnSteps(self.cs) self.macrosLastBuiltAt = sum([burnSteps[i] + 1 for i in range(cycle)]) + self.r.p.timeNode runLog.important("Building macro XS") xsGen = MacroXSGenerator( bListSome, lib, buildScatterMatrix, libType, self.minimumNuclideDensity, ) xsGen.broadcast() xsGen.invoke(self.o, self.r, self.cs) ================================================ FILE: armi/physics/neutronics/parameters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Parameter definitions for the Neutronics Plugin. We hope neutronics plugins that compute flux will use ``mgFlux``, etc., which will enable modular construction of apps. """ from armi.reactor import parameters from armi.reactor.blocks import Block from armi.reactor.parameters import ParamLocation from armi.reactor.parameters.parameterDefinitions import isNumpyArray from armi.reactor.reactors import Core from armi.utils import units def getNeutronicsParameterDefinitions(): """Return ParameterDefinitionCollections for each appropriate ArmiObject.""" return {Block: _getNeutronicsBlockParams(), Core: _getNeutronicsCoreParams()} def _getNeutronicsBlockParams(): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam( "axMesh", units=units.UNITLESS, description="number of neutronics axial mesh points in this block", default=None, categories=[parameters.Category.retainOnReplacement], ) pb.defParam( "mgFlux", setter=isNumpyArray("mgFlux"), units=f"n*{units.CM}/{units.SECONDS}", description="multigroup volume-integrated flux", location=ParamLocation.VOLUME_INTEGRATED, saveToDB=True, categories=[ parameters.Category.fluxQuantities, parameters.Category.multiGroupQuantities, ], default=None, ) pb.defParam( "adjMgFlux", units=f"n*{units.CM}/{units.SECONDS}", description="multigroup adjoint neutron flux", location=ParamLocation.VOLUME_INTEGRATED, saveToDB=True, categories=[ parameters.Category.fluxQuantities, parameters.Category.multiGroupQuantities, ], default=None, ) pb.defParam( "lastMgFlux", units=f"n*{units.CM}/{units.SECONDS}", description="multigroup volume-integrated flux used for averaging the latest and previous depletion step", location=ParamLocation.VOLUME_INTEGRATED, saveToDB=False, categories=[ parameters.Category.fluxQuantities, parameters.Category.multiGroupQuantities, ], default=None, ) pb.defParam( "mgFluxGamma", units=f"#*{units.CM}/{units.SECONDS}", description="multigroup gamma flux", location=ParamLocation.VOLUME_INTEGRATED, saveToDB=True, categories=[ parameters.Category.fluxQuantities, parameters.Category.multiGroupQuantities, parameters.Category.gamma, ], default=None, ) pb.defParam( "mgNeutronVelocity", units=f"{units.CM}/{units.SECONDS}", description="multigroup neutron velocity", location=ParamLocation.AVERAGE, saveToDB=True, categories=[parameters.Category.multiGroupQuantities], default=None, ) pb.defParam( "extSrc", units=f"#/{units.CM}^3/{units.SECONDS}", description="multigroup external source", location=ParamLocation.AVERAGE, saveToDB=False, categories=[parameters.Category.multiGroupQuantities], default=None, ) pb.defParam( "mgGammaSrc", units=f"#/{units.CM}^3/{units.SECONDS}", description="multigroup gamma source", location=ParamLocation.AVERAGE, saveToDB=True, categories=[ parameters.Category.multiGroupQuantities, parameters.Category.gamma, ], default=None, ) pb.defParam( "gammaSrc", units=f"#/{units.CM}^3/{units.SECONDS}", description="gamma source", location=ParamLocation.AVERAGE, saveToDB=True, categories=[parameters.Category.gamma], default=0.0, ) # Not anointing the pin fluxes as a MG quantity, since it has an extra dimension, which # could lead to issues, depending on how the multiGroupQuantities category gets used pb.defParam( "pinMgFluxes", units=f"n/{units.CM}^2/{units.SECONDS}", description=""" The block-level pin multigroup fluxes. pinMgFluxes[i, g] represents the flux in group g for pin i. Flux units are the standard n/cm^2/s. The "ARMI pin ordering" is used, which is counter-clockwise from 3 o'clock. """, categories=[parameters.Category.pinQuantities], saveToDB=True, default=None, ) pb.defParam( "pinMgFluxesAdj", units=units.UNITLESS, description="should be a blank 3-D array, but re-defined later (nPins x ng x nAxialSegments)", categories=[parameters.Category.pinQuantities], saveToDB=False, default=None, ) pb.defParam( "pinMgFluxesGamma", units=f"#/{units.CM}^2/{units.SECONDS}", description="should be a blank 3-D array, but re-defined later (nPins x ng x nAxialSegments)", categories=[parameters.Category.pinQuantities, parameters.Category.gamma], saveToDB=False, default=None, ) pb.defParam( "chi", units=units.UNITLESS, description="Energy distribution of fission neutrons", location=ParamLocation.AVERAGE, saveToDB=True, default=None, ) pb.defParam( "linPow", units=f"{units.WATTS}/{units.METERS}", description=( "Pin-averaged linear heat rate, which is calculated by evaluating the block power " "and dividing by the number of pins. If gamma transport is enabled, then this " "represents the combined neutron and gamma heating. If gamma transport is disabled " "then this represents the energy generation in the pin, where gammas are assumed to " "deposit their energy locally. Note that this value does not implicitly account " "for axial and radial peaking factors within the block. Use `linPowByPin` for " "obtaining the pin linear heat rate with peaking factors included." ), location=ParamLocation.AVERAGE, default=0.0, categories=[ parameters.Category.detailedAxialExpansion, parameters.Category.neutronics, ], ) pb.defParam( "linPowByPin", setter=isNumpyArray("linPowByPin"), units=f"{units.WATTS}/{units.CM}", description=( "Pin linear linear heat rate, which is calculated through flux reconstruction and " "accounts for axial and radial peaking factors. This differs from the `linPow` " "parameter, which assumes no axial and radial peaking in the block as this " "information is unavailable without detailed flux reconstruction. The same " "application of neutron and gamma heating results applies." ), location=ParamLocation.CHILDREN, categories=[parameters.Category.pinQuantities], default=None, ) # gamma category because linPowByPin is only split by neutron/gamma when gamma is activated pb.defParam( "linPowByPinNeutron", setter=isNumpyArray("linPowByPinNeutron"), units=f"{units.WATTS}/{units.CM}", description="Pin linear neutron heat rate. This is the neutron heating component of `linPowByPin`", location=ParamLocation.CHILDREN, categories=[parameters.Category.pinQuantities, parameters.Category.gamma], default=None, ) pb.defParam( "linPowByPinGamma", setter=isNumpyArray("linPowByPinGamma"), units=f"{units.WATTS}/{units.CM}", description="Pin linear gamma heat rate. This is the gamma heating component of `linPowByPin`", location=ParamLocation.CHILDREN, categories=[parameters.Category.pinQuantities, parameters.Category.gamma], default=None, ) pb.defParam( "reactionRates", units=f"#/{units.SECONDS}", description='List of reaction rates in specified by setting "reactionsToDB"', location=ParamLocation.VOLUME_INTEGRATED, categories=[parameters.Category.fluxQuantities], default=None, ) with pDefs.createBuilder( saveToDB=True, default=None, location=ParamLocation.EDGES, categories=[parameters.Category.detailedAxialExpansion, "depletion"], ) as pb: pb.defParam( "pointsEdgeFastFluxFr", units=units.UNITLESS, description="Fraction of flux above 100keV at edges of the block", ) pb.defParam( "pointsEdgeDpa", setter=isNumpyArray("pointsEdgeDpa"), units=units.DPA, description="displacements per atom at edges of the block", location=ParamLocation.EDGES | ParamLocation.BOTTOM, categories=["cumulative", "detailedAxialExpansion", "depletion"], ) pb.defParam( "pointsEdgeDpaRate", setter=isNumpyArray("pointsEdgeDpaRate"), units=f"{units.DPA}/{units.SECONDS}", description="Current time derivative of the displacement per atoms at edges of the block", location=ParamLocation.EDGES | ParamLocation.BOTTOM, ) with pDefs.createBuilder( saveToDB=True, default=None, location=ParamLocation.CORNERS, categories=[ parameters.Category.detailedAxialExpansion, parameters.Category.depletion, ], ) as pb: pb.defParam( "cornerFastFlux", units=f"n/{units.CM}^2/{units.SECONDS}", description="Neutron flux above 100keV at hexagon block corners", ) pb.defParam( "pointsCornerFastFluxFr", units=units.UNITLESS, description="Fraction of flux above 100keV at corners of the block", ) pb.defParam( "pointsCornerDpa", setter=isNumpyArray("pointsCornerDpa"), units=units.DPA, description="displacements per atom at corners of the block", location=ParamLocation.CORNERS | ParamLocation.BOTTOM, categories=["cumulative", "detailedAxialExpansion", "depletion"], ) pb.defParam( "pointsCornerDpaRate", setter=isNumpyArray("pointsCornerDpaRate"), units=f"{units.DPA}/{units.SECONDS}", description="Current time derivative of the displacement per atoms at corners of the block", location=ParamLocation.CORNERS | ParamLocation.BOTTOM, ) with pDefs.createBuilder( default=0.0, location=ParamLocation.AVERAGE, categories=[parameters.Category.detailedAxialExpansion], ) as pb: # Neutronics reaction rate params that are not re-derived in mesh conversion pb.defParam( "rateBalance", units=f"1/{units.CM}^3/{units.SECONDS}", description="Numerical balance between particle production and destruction (should be small)", ) pb.defParam( "rateProdNet", units=f"1/{units.CM}^3/{units.SECONDS}", description="The total neutron production including (n,2n) source and fission source.", ) pb.defParam( "capturePowerFrac", units=units.UNITLESS, description="Fraction of the power produced through capture in a block.", saveToDB="True", ) pb.defParam( "fluence", units=f"#/{units.CM}^2", description="Fluence", categories=["cumulative"], ) pb.defParam( "flux", units=f"n/{units.CM}^2/{units.SECONDS}", description="neutron flux", categories=[ parameters.Category.retainOnReplacement, parameters.Category.fluxQuantities, ], ) pb.defParam("fluxAdj", units=units.UNITLESS, description="Adjoint flux") pb.defParam( "pdens", units=f"{units.WATTS}/{units.CM}^3", description="Average volumetric power density", categories=[parameters.Category.neutronics], ) pb.defParam( "pdensDecay", units=f"{units.WATTS}/{units.CM}^3", description="Decay power density from decaying radionuclides", ) pb.defParam( "arealPd", units=f"{units.MW}/{units.METERS}^2", description="Power divided by XY area", ) pb.defParam( "fisDens", units=f"fissions/{units.CM}^3/{units.SECONDS}", description="Fission density in a pin (scaled up from homogeneous)", ) pb.defParam( "fisDensHom", units=f"1/{units.CM}^3/{units.SECONDS}", description="Homogenized fissile density", ) pb.defParam( "fluxGamma", units=f"#/{units.CM}^2/{units.SECONDS}", description="Gamma scalar flux", categories=[ parameters.Category.retainOnReplacement, parameters.Category.fluxQuantities, ], ) pb.defParam( "fluxPeak", units=f"n/{units.CM}^2/{units.SECONDS}", description="Peak neutron flux calculated within the mesh", location=ParamLocation.MAX, ) pb.defParam( "kInf", units=units.UNITLESS, description=( "Neutron production rate in this block/neutron absorption rate in this " "block. Not truly kinf but a reasonable approximation of reactivity." ), ) pb.defParam("medAbsE", units=units.EV, description="Median neutron absorption energy") pb.defParam( "medFisE", units=units.EV, description="Median energy of neutron causing fission", ) pb.defParam("medFlxE", units=units.EV, description="Median neutron flux energy") pb.defParam( "pdensGamma", units=f"{units.WATTS}/{units.CM}^3", description="Average volumetric gamma power density", categories=[parameters.Category.gamma], ) # gamma category because pdens is only split by neutron/gamma when gamma is activated pb.defParam( "pdensNeutron", units=f"{units.WATTS}/{units.CM}^3", description="Average volumetric neutron power density", categories=[parameters.Category.gamma], ) pb.defParam( "ppdens", units=f"{units.WATTS}/{units.CM}^3", description="Peak power density", location=ParamLocation.MAX, ) pb.defParam( "ppdensGamma", units=f"{units.WATTS}/{units.CM}^3", description="Peak gamma density", categories=[parameters.Category.gamma], location=ParamLocation.MAX, ) # rx rate params that are derived during mesh conversion. # We'd like all things that can be derived from flux and XS to be # in this category to minimize numerical diffusion but it is a WIP. with pDefs.createBuilder( default=0.0, location=ParamLocation.AVERAGE, ) as pb: pb.defParam( "rateAbs", units=f"1/{units.CM}^3/{units.SECONDS}", description="Total absorption rate in this block (fisson + capture).", ) pb.defParam( "rateCap", units=f"1/{units.CM}^3/{units.SECONDS}", description="Parasitic capture rate in this block.", ) pb.defParam( "rateProdN2n", units=f"1/{units.CM}^3/{units.SECONDS}", description="Production rate of neutrons from n2n reactions.", ) with pDefs.createBuilder( default=0.0, location=ParamLocation.AVERAGE, categories=[parameters.Category.detailedAxialExpansion], ) as pb: pb.defParam( "rateFis", units=f"1/{units.CM}^3/{units.SECONDS}", description="Fission rate in this block.", ) pb.defParam( "rateProdFis", units=f"1/{units.CM}^3/{units.SECONDS}", description="Production rate of neutrons from fission reactions (nu * fission source / k-eff)", ) with pDefs.createBuilder( default=0.0, location=ParamLocation.VOLUME_INTEGRATED, categories=[parameters.Category.detailedAxialExpansion], ) as pb: pb.defParam( "powerGenerated", units=units.WATTS, description="Generated power. Different than b.p.power only when gamma transport is activated.", categories=[parameters.Category.gamma], ) pb.defParam( "power", units=units.WATTS, description="Total power", categories=[parameters.Category.neutronics], ) pb.defParam( "powerGamma", units=units.WATTS, description="Total gamma power", categories=[parameters.Category.gamma], ) # gamma category because power is only split by neutron/gamma when gamma is activated pb.defParam( "powerNeutron", units=units.WATTS, description="Total neutron power", categories=[parameters.Category.gamma], ) with pDefs.createBuilder(default=0.0) as pb: pb.defParam( "detailedDpaThisCycle", units=units.DPA, location=ParamLocation.AVERAGE, description=( "Displacement per atom accumulated during this cycle. This accumulates " "over a cycle and resets to zero at BOC." ), categories=[ parameters.Category.cumulativeOverCycle, parameters.Category.detailedAxialExpansion, ], ) pb.defParam( "detailedDpaPeakRate", units=f"{units.DPA}/{units.SECONDS}", description="Peak DPA rate based on detailedDpaPeak", location=ParamLocation.MAX, categories=[parameters.Category.cumulative, parameters.Category.neutronics], ) pb.defParam( "enrichmentBOL", units=units.UNITLESS, description="Enrichment during fabrication (mass fraction)", ) pb.defParam( "fastFlux", units=f"1/{units.CM}^2/{units.SECONDS}", description="Neutron flux above 100keV", location=ParamLocation.AVERAGE, categories=["detailedAxialExpansion"], ) pb.defParam( "fastFluxFr", units=units.UNITLESS, description="Fraction of flux above 100keV", location=ParamLocation.AVERAGE, categories=["detailedAxialExpansion"], ) pb.defParam( "pdensGenerated", units=f"{units.WATTS}/{units.CM}^3", description=( "Volume-averaged generated power density. Different than b.p.pdens only " "when gamma transport is activated." ), location=ParamLocation.AVERAGE, categories=[parameters.Category.gamma], ) return pDefs def _getNeutronicsCoreParams(): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(categories=[parameters.Category.neutronics]) as pb: pb.defParam( "eigenvalues", units=units.UNITLESS, description="All available lambda-eigenvalues of reactor.", default=None, # will be a list though, can't set default to mutable type. location=ParamLocation.AVERAGE, ) pb.defParam( "kInf", units=units.UNITLESS, description="k-infinity", default=0.0, location=ParamLocation.AVERAGE, ) return pDefs ================================================ FILE: armi/physics/neutronics/plugin.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A boilerplate entry for a neutronics physics plugin. The ARMI Framework comes with a neutronics plugin that introduces two independent interfaces: :py:mod:`~armi.physics.neutronics.fissionProductModel` Handles fission product modeling :py:mod:`~armi.physics.neutronics.crossSectionGroupManager` Handles the management of different cross section "groups" """ import numpy as np from armi import plugins, runLog from armi.physics.neutronics.const import CONF_CROSS_SECTION from armi.utils import tabulate class NeutronicsPlugin(plugins.ArmiPlugin): """The built-in neutronics plugin with a few capabilities and a lot of state parameter definitions.""" @staticmethod @plugins.HOOKIMPL def exposeInterfaces(cs): """Collect and expose all of the interfaces that live under the built-in neutronics package.""" from armi.physics.neutronics import crossSectionGroupManager from armi.physics.neutronics.fissionProductModel import fissionProductModel interfaceInfo = [] for mod in (crossSectionGroupManager, fissionProductModel): interfaceInfo += plugins.collectInterfaceDescriptions(mod, cs) return interfaceInfo @staticmethod @plugins.HOOKIMPL def defineParameters(): """Define parameters for the plugin.""" from armi.physics.neutronics import parameters as neutronicsParameters return neutronicsParameters.getNeutronicsParameterDefinitions() @staticmethod @plugins.HOOKIMPL def defineParameterRenames(): return {"buGroup": "envGroup", "buGroupNum": "envGroupNum"} @staticmethod @plugins.HOOKIMPL def defineEntryPoints(): """Define entry points for the plugin.""" from armi.physics.neutronics import diffIsotxs entryPoints = [diffIsotxs.CompareIsotxsLibraries] return entryPoints @staticmethod @plugins.HOOKIMPL def defineSettings(): """Define settings for the plugin.""" from armi.physics.neutronics import crossSectionSettings from armi.physics.neutronics import settings as neutronicsSettings from armi.physics.neutronics.fissionProductModel import ( fissionProductModelSettings, ) settings = [ crossSectionSettings.XSSettingDef( CONF_CROSS_SECTION, ) ] settings += neutronicsSettings.defineSettings() settings += fissionProductModelSettings.defineSettings() return settings @staticmethod @plugins.HOOKIMPL def defineSettingsValidators(inspector): """Implementation of settings inspections for neutronics settings.""" from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import ( getFissionProductModelSettingValidators, ) from armi.physics.neutronics.settings import getNeutronicsSettingValidators settingsValidators = getNeutronicsSettingValidators(inspector) settingsValidators.extend(getFissionProductModelSettingValidators(inspector)) return settingsValidators @staticmethod @plugins.HOOKIMPL def onProcessCoreLoading(core, cs, dbLoad): """Called whenever a Core object is newly built.""" applyEffectiveDelayedNeutronFractionToCore(core, cs) @staticmethod @plugins.HOOKIMPL def getReportContents(r, cs, report, stage, blueprint): """Generates the Report Content for the Neutronics Report.""" from armi.physics.neutronics import reports return reports.insertNeutronicsReport(r, cs, report, stage) def applyEffectiveDelayedNeutronFractionToCore(core, cs): """Process the settings for the delayed neutron fraction and precursor decay constants.""" # Verify and set the core beta parameters based on the user-supplied settings beta = cs["beta"] decayConstants = cs["decayConstants"] # If beta is interpreted as a float, then assign it to the total delayed neutron fraction # parameter. Otherwise, setup the group-wise delayed neutron fractions and precursor decay # constants. reportTableData = [] if isinstance(beta, float): core.p.beta = beta reportTableData.append(("Total Delayed Neutron Fraction", core.p.beta)) elif isinstance(beta, list) and isinstance(decayConstants, list): if len(beta) != len(decayConstants): raise ValueError( f"The values for `beta` ({beta}) and `decayConstants` ({decayConstants}) are not consistent lengths." ) core.p.beta = sum(beta) core.p.betaComponents = np.array(beta) core.p.betaDecayConstants = np.array(decayConstants) reportTableData.append(("Total Delayed Neutron Fraction", core.p.beta)) for i, betaComponent in enumerate(core.p.betaComponents): reportTableData.append((f"Group {i} Delayed Neutron Fractions", betaComponent)) for i, decayConstant in enumerate(core.p.betaDecayConstants): reportTableData.append(("Group {i} Precursor Decay Constants", decayConstant)) # Report to the user the values were not applied. if not reportTableData and (beta is not None or decayConstants is not None): runLog.warning( f"Delayed neutron fraction(s) - {beta} and decay constants - {decayConstants} have not been applied." ) else: runLog.extra( tabulate.tabulate( data=reportTableData, headers=["Component", "Value"], tableFmt="armi", ) ) ================================================ FILE: armi/physics/neutronics/settings.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Some generic neutronics-related settings.""" import os from armi import runLog from armi.physics.neutronics import LatticePhysicsFrequency from armi.physics.neutronics.const import NEUTRON from armi.physics.neutronics.energyGroups import GROUP_STRUCTURE from armi.settings import setting, settingsValidation from armi.settings.fwSettings.globalSettings import ( CONF_DETAILED_AXIAL_EXPANSION, CONF_NON_UNIFORM_ASSEM_FLAGS, CONF_RUN_TYPE, ) from armi.utils import directoryChangers CONF_BOUNDARIES = "boundaries" CONF_DPA_PER_FLUENCE = "dpaPerFluence" CONF_EIGEN_PROB = "eigenProb" CONF_EPS_EIG = "epsEig" CONF_EPS_FSAVG = "epsFSAvg" CONF_EPS_FSPOINT = "epsFSPoint" CONF_GEN_XS = "genXS" # gamma stuff and neutronics plugin/lattice physics CONF_GLOBAL_FLUX_ACTIVE = "globalFluxActive" CONF_GROUP_STRUCTURE = "groupStructure" CONF_INNERS_ = "inners" CONF_LOADING_FILE = "loadingFile" CONF_MCNP_LIB_BASE = "mcnpLibraryVersion" CONF_NEUTRONICS_KERNEL = "neutronicsKernel" CONF_NEUTRONICS_TYPE = "neutronicsType" CONF_OUTERS_ = "outers" CONF_RESTART_NEUTRONICS = "restartNeutronics" # used by global flux interface CONF_ACLP_DOSE_LIMIT = "aclpDoseLimit" CONF_DPA_XS_SET = "dpaXsSet" CONF_GRID_PLATE_DPA_XS_SET = "gridPlateDpaXsSet" CONF_LOAD_PAD_ELEVATION = "loadPadElevation" CONF_LOAD_PAD_LENGTH = "loadPadLength" CONF_OPT_DPA = [ "", "dpa_EBRII_INC600", "dpa_EBRII_INCX750", "dpa_EBRII_HT9", "dpa_EBRII_PE16", "dpa_EBRII_INC625", ] # moved from xsSettings CONF_CLEAR_XS = "clearXS" CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION = "disableBlockTypeExclusionInXsGeneration" CONF_LATTICE_PHYSICS_FREQUENCY = "latticePhysicsFrequency" CONF_MINIMUM_FISSILE_FRACTION = "minimumFissileFraction" CONF_MINIMUM_NUCLIDE_DENSITY = "minimumNuclideDensity" CONF_TOLERATE_BURNUP_CHANGE = "tolerateBurnupChange" CONF_XS_BLOCK_REPRESENTATION = "xsBlockRepresentation" CONF_XS_KERNEL = "xsKernel" def defineSettings(): """Standard function to define settings; for neutronics.""" settings = [ setting.Setting( CONF_GROUP_STRUCTURE, default="ANL33", label="Number of Energy Groups", description="Energy group structure to use in neutronics simulations", options=[ "ANL9", "ANL33", "ANL70", "ANL116", "ANL230", "ANL703", "ANL1041", "ANL2082", "ARMI33", "ARMI45", "CINDER63", "348", ], ), setting.Setting( CONF_GLOBAL_FLUX_ACTIVE, default="Neutron", label="Global Flux Calculation", description="Calculate the global flux at each timestep for the selected particle " "type(s) using the specified neutronics kernel.", options=["", "Neutron", "Neutron and Gamma"], ), setting.Setting( CONF_GEN_XS, default="", label="Multigroup Cross Sections Generation", description="Generate multigroup cross sections for the selected particle " "type(s) using the specified lattice physics kernel (see Lattice Physics " "tab). When not set, the XS library will be auto-loaded from an existing " "ISOTXS in the working directory, but fail if there is no ISOTXS.", options=["", "Neutron", "Neutron and Gamma"], ), setting.Setting( CONF_DPA_PER_FLUENCE, default=4.01568627451e-22, label="DPA Per Fluence", description="A quick and dirty conversion that is used to get dpaPeak", ), setting.Setting( CONF_BOUNDARIES, default="Extrapolated", label="Neutronic BCs", description="External Neutronic Boundary Conditions. Reflective does not include axial.", options=[ "Extrapolated", "Reflective", "Infinite", "ZeroSurfaceFlux", "ZeroInwardCurrent", "Generalized", ], enforcedOptions=True, ), setting.Setting( CONF_NEUTRONICS_KERNEL, default="", label="Neutronics Kernel", description="The neutronics / depletion solver for global flux solve.", options=[], enforcedOptions=True, ), setting.Setting( CONF_MCNP_LIB_BASE, default="ENDF/B-VII.1", label="ENDF data library version to use for MCNP Analysis", description=( "This setting controls the nuclides in the problem according to " "the available nuclides in the selected library. For instance, " "some MCNP libraries contain elemental nuclides while others do " f"not. Only used when MCNP is selected as {CONF_NEUTRONICS_KERNEL}." ), options=["ENDF/B-V.0", "ENDF/B-VII.0", "ENDF/B-VII.1", "ENDF/B-VIII.0"], ), setting.Setting( CONF_NEUTRONICS_TYPE, default="real", label="Neutronics Type", description="The type of neutronics solution that is desired.", options=["real", "adjoint", "both"], ), setting.Setting( CONF_EIGEN_PROB, default=True, label="Eigenvalue Problem", description="Is this a eigenvalue problem or a fixed source problem?", ), setting.Setting( CONF_EPS_EIG, default=1e-07, label="Eigenvalue Epsilon", description="Convergence criteria for calculating the eigenvalue in the global flux solver", ), setting.Setting( CONF_EPS_FSAVG, default=1e-05, label="FS Avg. epsilon", description="Convergence criteria for average fission source", ), setting.Setting( CONF_EPS_FSPOINT, default=1e-05, label="FS Point epsilon", description="Convergence criteria for point fission source", ), setting.Setting( CONF_LOAD_PAD_ELEVATION, default=0.0, label="Load pad elevation (cm)", description=( "The elevation of the bottom of the above-core load pad (ACLP) in cm from the bottom of the upper grid " "plate. Used for calculating the load pad dose" ), ), setting.Setting( CONF_LOAD_PAD_LENGTH, default=0.0, label="Load pad length (cm)", description="The length of the load pad. Used to compute average and peak dose.", ), setting.Setting( CONF_ACLP_DOSE_LIMIT, default=80.0, label="ALCP dose limit", description="Dose limit in dpa used to position the above-core load pad(if one exists)", ), setting.Setting( CONF_RESTART_NEUTRONICS, default=False, label="Restart neutronics", description="Restart global flux case using outputs from last time as a guess", ), setting.Setting( CONF_OUTERS_, default=100, label="Max Outer Iterations", description="XY and Axial partial current sweep max outer iterations.", ), setting.Setting( CONF_INNERS_, default=0, label="Inner Iterations", description="XY and Axial partial current sweep inner iterations. 0 lets the neutronics code pick a " "default.", ), setting.Setting( CONF_GRID_PLATE_DPA_XS_SET, default="dpa_EBRII_HT9", label="Grid plate DPA XS", description=("The cross sections to use for grid plate blocks DPA when computing displacements per atom."), options=CONF_OPT_DPA, ), setting.Setting( CONF_DPA_XS_SET, default="dpa_EBRII_HT9", label="DPA Cross Sections", description="The cross sections to use when computing displacements per atom.", options=CONF_OPT_DPA, ), setting.Setting( CONF_CLEAR_XS, default=False, label="Clear XS", description="Delete all cross section libraries before regenerating them.", ), setting.Setting( CONF_MINIMUM_FISSILE_FRACTION, default=0.045, label="Minimum Fissile Fraction", description="Minimum fissile fraction (fissile number densities / heavy metal number densities).", oldNames=[("mc2.minimumFissileFraction", None)], ), setting.Setting( CONF_MINIMUM_NUCLIDE_DENSITY, default=1e-15, label="Minimum nuclide density", description="Density to use for nuclides and fission products at infinite dilution. This is also used as " "the minimum density considered for computing macroscopic cross sections.", ), setting.Setting( CONF_TOLERATE_BURNUP_CHANGE, default=0.0, label="Cross Section Burnup Group Tolerance", description="Burnup window for computing cross sections. If the prior " "cross sections were computed within the window, new cross sections will " "not be generated and the prior calculated cross sections will be used.", ), setting.Setting( CONF_XS_BLOCK_REPRESENTATION, default="Average", label="Cross Section Block Averaging Method", description="The type of averaging to perform when creating cross sections for a group of blocks", options=[ "Median", "Average", "FluxWeightedAverage", "ComponentAverage1DSlab", ], ), setting.Setting( CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION, default=False, label="Which Block types to merge together in XS Generation", description="Control which blocks get merged together by the XSGM. If set to ``None`` or ``True`` then all " "block types in the XS ID will be considered. If set to ``False`` then a default of ['fuel'] will be used. " "Can also be set to an exact list of strings for types to consider.", ), setting.Setting( CONF_XS_KERNEL, default="MC2v3", label="Lattice Physics Kernel", description="Method to determine broad group cross sections for assemblies", options=["", "MC2v2", "MC2v3", "MC2v3-PARTISN", "SERPENT"], ), setting.Setting( CONF_LATTICE_PHYSICS_FREQUENCY, default="BOC", label="Frequency of lattice physics updates", description="Define the frequency at which cross sections are updated with new lattice physics " "interactions.", options=[opt.name for opt in list(LatticePhysicsFrequency)], enforcedOptions=True, ), ] return settings def _blueprintsHasOldXSInput(inspector): path = inspector.cs[CONF_LOADING_FILE] with directoryChangers.DirectoryChanger(inspector.cs.inputDirectory): with open(os.path.expandvars(path)) as f: for line in f: if line.startswith("cross sections:"): return True return False def getNeutronicsSettingValidators(inspector): """The standard helper method, to provide validators to neutronics settings.""" queries = [] def migrateXSOption(name0): """ The `genXS` and `globalFluxActive` settings used to take True/False as inputs, this helper method migrates those to the new values. """ value = inspector.cs[name0] if value == "True": value = NEUTRON elif value == "False": value = "" inspector.cs = inspector.cs.modified(newSettings={name0: value}) def migrateXSOptionGenXS(): """pass-through to migrateXSOption(), because Query functions cannot take arguments.""" migrateXSOption(CONF_GEN_XS) def migrateXSOptionGlobalFluxActive(): """pass-through to migrateXSOption(), because Query functions cannot take arguments.""" migrateXSOption(CONF_GLOBAL_FLUX_ACTIVE) queries.append( settingsValidation.Query( lambda: inspector.cs[CONF_GEN_XS] in ("True", "False"), "The {0} setting cannot not take `True` or `False` as an exact value any more.", 'Would you like to auto-correct {0} to the correct value? ("" or {1})'.format(CONF_GEN_XS, NEUTRON), migrateXSOptionGenXS, ) ) queries.append( settingsValidation.Query( lambda: inspector.cs[CONF_GLOBAL_FLUX_ACTIVE] in ("True", "False"), "The {0} setting cannot not take `True` or `False` as an exact value any more.", 'Would you like to auto-correct {0} to the correct value? ("" or {1})'.format( CONF_GLOBAL_FLUX_ACTIVE, NEUTRON ), migrateXSOptionGlobalFluxActive, ) ) def migrateNormalBCSetting(): """The `boundary` setting is migrated from `Normal` to `Extrapolated`.""" inspector.cs = inspector.cs.modified(newSettings={CONF_BOUNDARIES: "Extrapolated"}) queries.append( settingsValidation.Query( lambda: inspector.cs[CONF_BOUNDARIES] == "Normal", "The {0} setting now takes `Extrapolated` instead of `Normal` as a value.".format(CONF_BOUNDARIES), "Would you like to auto-correct {0} from `Normal` to `Extrapolated`?".format(CONF_BOUNDARIES), migrateNormalBCSetting, ) ) def updateXSGroupStructure(): """Trying to migrate to a valid XS group structure name.""" value = inspector.cs[CONF_GROUP_STRUCTURE] newValue = value.upper() if newValue in GROUP_STRUCTURE: runLog.info("Updating the cross section group structure from {} to {}".format(value, newValue)) else: newValue = inspector.cs.getSetting(CONF_GROUP_STRUCTURE).default runLog.info( "Unable to automatically convert the {} setting of {}. Defaulting to {}".format( CONF_GROUP_STRUCTURE, value, newValue ) ) inspector.cs = inspector.cs.modified(newSettings={CONF_GROUP_STRUCTURE: newValue}) queries.append( settingsValidation.Query( lambda: inspector.cs[CONF_GROUP_STRUCTURE] not in GROUP_STRUCTURE, "The given group structure {0} was not recognized.".format(inspector.cs[CONF_GROUP_STRUCTURE]), "Would you like to auto-correct the group structure value?", updateXSGroupStructure, ) ) def migrateDpa(name0): """Migrating some common shortened names for dpa XS sets.""" value = inspector.cs[name0] if value == "dpaHT9_33": value = "dpaHT9_ANL33_TwrBol" elif value == "dpa_SS316": value = "dpaSS316_ANL33_TwrBol" inspector.cs = inspector.cs.modified(newSettings={name0: value}) def migrateDpaDpaXsSet(): """Pass-through to migrateDpa(), because Query functions cannot take arguments.""" migrateDpa(CONF_DPA_XS_SET) def migrateDpaGridPlate(): """Pass-through to migrateDpa(), because Query functions cannot take arguments.""" migrateDpa(CONF_GRID_PLATE_DPA_XS_SET) queries.append( settingsValidation.Query( lambda: inspector.cs[CONF_DPA_XS_SET] in ("dpaHT9_33", "dpa_SS316"), "It appears you are using a shortened version of the {0}.".format(CONF_DPA_XS_SET), "Would you like to auto-correct this to the full name?", migrateDpaDpaXsSet, ) ) queries.append( settingsValidation.Query( lambda: inspector.cs[CONF_GRID_PLATE_DPA_XS_SET] in ("dpaHT9_33", "dpa_SS316"), "It appears you are using a shortened version of the {0}.".format(CONF_GRID_PLATE_DPA_XS_SET), "Would you like to auto-correct this to the full name?", migrateDpaGridPlate, ) ) queries.append( settingsValidation.Query( lambda: inspector.cs[CONF_DETAILED_AXIAL_EXPANSION] and inspector.cs[CONF_NON_UNIFORM_ASSEM_FLAGS], f"The use of {CONF_DETAILED_AXIAL_EXPANSION} and {CONF_NON_UNIFORM_ASSEM_FLAGS} is not supported.", "Automatically set non-uniform assembly treatment to its default?", lambda: inspector._assignCS( CONF_NON_UNIFORM_ASSEM_FLAGS, inspector.cs.getSetting(CONF_NON_UNIFORM_ASSEM_FLAGS).default, ), ) ) queryMsg = ( "A Snapshots case is selected but the `latticePhysicsFrequency` " "{0} is less than `firstCoupledIteration`. `firstCoupledIteration`" " or `all` is recommended for Snapshots when they involve large changes " "in power or flow compared to the loaded state." ).format(inspector.cs[CONF_LATTICE_PHYSICS_FREQUENCY]) queryPrompt = ( "Would you like to update `latticePhysicsFrequency` from " f"{inspector.cs[CONF_LATTICE_PHYSICS_FREQUENCY]} to `firstCoupledIteration`?" ) queries.append( settingsValidation.Query( lambda: inspector.cs[CONF_RUN_TYPE] == "Snapshots" and not LatticePhysicsFrequency[inspector.cs[CONF_LATTICE_PHYSICS_FREQUENCY]] >= LatticePhysicsFrequency.firstCoupledIteration, queryMsg, queryPrompt, lambda: inspector._assignCS(CONF_LATTICE_PHYSICS_FREQUENCY, "firstCoupledIteration"), ) ) return queries ================================================ FILE: armi/physics/neutronics/tests/ISOXA ================================================ Not a real cross section file; just a placeholder to unit test the file copying function. ================================================ FILE: armi/physics/neutronics/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/physics/neutronics/tests/rzmflxYA ================================================ Not a real flux spectrum file; just a placeholder to unit test the file copying function. ================================================ FILE: armi/physics/neutronics/tests/test_crossSectionManager.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test the cross section manager. :py:mod:`armi.physics.neutronics.crossSectionGroupManager` """ import copy import os import pickle import sys import unittest from io import BytesIO from unittest.mock import MagicMock from armi import settings from armi.physics.neutronics import crossSectionGroupManager from armi.physics.neutronics.const import CONF_CROSS_SECTION from armi.physics.neutronics.crossSectionGroupManager import ( AverageBlockCollection, BlockCollection, CrossSectionGroupManager, FluxWeightedAverageBlockCollection, MedianBlockCollection, ) from armi.physics.neutronics.crossSectionSettings import XSModelingOptions from armi.physics.neutronics.fissionProductModel.tests import test_lumpedFissionProduct from armi.physics.neutronics.settings import ( CONF_LATTICE_PHYSICS_FREQUENCY, CONF_XS_BLOCK_REPRESENTATION, ) from armi.reactor.blocks import HexBlock from armi.reactor.flags import Flags from armi.reactor.tests import test_blocks, test_reactors from armi.tests import TEST_ROOT, mockRunLogs from armi.utils import units from armi.utils.directoryChangers import TemporaryDirectoryChanger THIS_DIR = os.path.dirname(os.path.abspath(__file__)) class TestBlockColl(unittest.TestCase): def setUp(self): self.blockList = makeBlocks() self.bc = BlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem) self.bc.extend(self.blockList) def test_add(self): self.bc.append("DummyBlock1") self.bc.extend(["DB2", "DB3"]) self.assertIn("DummyBlock1", self.bc) self.assertIn("DB2", self.bc) self.assertIn("DB3", self.bc) def test_getBlocksInGroup(self): for b in self.blockList: self.assertIn(b, self.bc) def test_is_pickleable(self): self.bc.weightingParam = "test" buf = BytesIO() pickle.dump(self.bc, buf) buf.seek(0) newBc = pickle.load(buf) self.assertEqual(self.bc.weightingParam, newBc.weightingParam) class TestBlockCollMedian(unittest.TestCase): def setUp(self): self.blockList = makeBlocks(5) for bi, b in enumerate(self.blockList): b.setType("fuel") b.p.percentBu = bi / 4.0 * 100 self.blockList[0], self.blockList[2] = self.blockList[2], self.blockList[0] self.bc = MedianBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem) self.bc.extend(self.blockList) def test_createRepresentativeBlock(self): avgB = self.bc.createRepresentativeBlock() self.assertAlmostEqual(avgB.p.percentBu, 50.0) def test_getBlockNuclideTemperature(self): # doesn't have to be in median block tests, but this is a simpler test nuc = "U235" testBlock = self.blockList[0] amt, amtWeightedTemp = 0, 0 for c in testBlock: dens = c.getNumberDensity(nuc) if dens > 0: thisAmt = dens * c.getVolume() amt += thisAmt amtWeightedTemp += thisAmt * c.temperatureInC avgTemp = amtWeightedTemp / amt self.assertAlmostEqual(avgTemp, crossSectionGroupManager.getBlockNuclideTemperature(testBlock, nuc)) class TestBlockCollAvg(unittest.TestCase): @classmethod def setUpClass(cls): fpFactory = test_lumpedFissionProduct.getDummyLFPFile() cls.blockList = makeBlocks(5) for bi, b in enumerate(cls.blockList): b.setType("fuel") b.p.percentBu = bi / 4.0 * 100 b.setLumpedFissionProducts(fpFactory.createLFPsFromFile()) # put some trace Fe-56 and Na-23 into the fuel # zero out all fuel nuclides except U-235 (for mass-weighting of component temperature) fuelComp = b.getComponent(Flags.FUEL) for nuc in fuelComp.getNuclides(): b.setNumberDensity(nuc, 0.0) b.setNumberDensity("U235", bi) fuelComp.setNumberDensity("FE56", 1e-15) fuelComp.setNumberDensity("NA23", 1e-15) b.p.gasReleaseFraction = bi * 2 / 8.0 for c in b: if c.hasFlags(Flags.FUEL): c.temperatureInC = 600.0 + bi elif c.hasFlags([Flags.CLAD, Flags.DUCT, Flags.WIRE]): c.temperatureInC = 500.0 + bi elif c.hasFlags([Flags.BOND, Flags.COOLANT, Flags.INTERCOOLANT]): c.temperatureInC = 400.0 + bi def setUp(self): self.bc = AverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem) self.bc.extend(self.blockList) self.bc.averageByComponent = True def test_performAverageByComponent(self): """Check the averageByComponent attribute.""" self.bc._checkBlockSimilarity = MagicMock(return_value=True) self.assertTrue(self.bc._performAverageByComponent()) self.bc.averageByComponent = False self.assertFalse(self.bc._performAverageByComponent()) def test_checkBlockSimilarity(self): """Check the block similarity test.""" self.assertTrue(self.bc._checkBlockSimilarity()) self.bc.append(test_blocks.loadTestBlock()) self.assertFalse(self.bc._checkBlockSimilarity()) def test_createRepresentativeBlock(self): """Test creation of a representative block. .. test:: Create representative blocks using a volume-weighted averaging. :id: T_ARMI_XSGM_CREATE_REPR_BLOCKS0 :tests: R_ARMI_XSGM_CREATE_REPR_BLOCKS """ avgB = self.bc.createRepresentativeBlock() self.assertNotIn(avgB, self.bc) # (0 + 1 + 2 + 3 + 4) / 5 = 10/5 = 2.0 # adjust for thermal expansion between input temp (600 C) and average temp (603 C) fuelMat = avgB.getComponent(Flags.FUEL).material expansion = (1.0 + fuelMat.linearExpansionPercent(Tc=603.0) / 100.0) / ( 1.0 + fuelMat.linearExpansionPercent(Tc=600.0) / 100.0 ) self.assertAlmostEqual(avgB.getNumberDensity("U235") / expansion**2, 2.0) # (0 + 1/4 + 2/4 + 3/4 + 4/4) / 5 * 100.0 = 50.0 self.assertEqual(avgB.p.percentBu, 50.0) # check that a new block collection of the representative block has right temperatures # this is required for Doppler coefficient calculations newBc = AverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem) newBc.append(avgB) newBc.calcAvgNuclideTemperatures() self.assertAlmostEqual(newBc.avgNucTemperatures["U235"], 603.0) self.assertAlmostEqual(newBc.avgNucTemperatures["FE56"], 502.0) self.assertAlmostEqual(newBc.avgNucTemperatures["NA23"], 402.0) def test_createRepresentativeBlockDissimilar(self): """Test creation of a representative block from a collection with dissimilar blocks.""" uniqueBlock = test_blocks.loadTestBlock() uniqueBlock.p.percentBu = 50.0 fpFactory = test_lumpedFissionProduct.getDummyLFPFile() uniqueBlock.setLumpedFissionProducts(fpFactory.createLFPsFromFile()) uniqueBlock.setNumberDensity("U235", 2.0) uniqueBlock.p.gasReleaseFraction = 1.0 for c in uniqueBlock: if c.hasFlags(Flags.FUEL): c.temperatureInC = 600.0 elif c.hasFlags([Flags.CLAD, Flags.DUCT, Flags.WIRE]): c.temperatureInC = 500.0 elif c.hasFlags([Flags.BOND, Flags.COOLANT, Flags.INTERCOOLANT]): c.temperatureInC = 400.0 self.bc.append(uniqueBlock) with mockRunLogs.BufferLog() as mock: avgB = self.bc.createRepresentativeBlock() self.assertIn("Non-matching block in AverageBlockCollection", mock.getStdout()) self.assertNotIn(avgB, self.bc) # (0 + 1 + 2 + 3 + 4 + 2) / 6.0 = 12/6 = 2.0 self.assertAlmostEqual(avgB.getNumberDensity("U235"), 2.0) # (0 + 1/4 + 2/4 + 3/4 + 4/4) / 5 * 100.0 = 50.0 self.assertAlmostEqual(avgB.p.percentBu, 50.0) # U35 has different average temperature because blocks have different U235 content newBc = AverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem) newBc.append(avgB) newBc.calcAvgNuclideTemperatures() # temps expected to be proportional to volume-fraction weighted temperature # this is a non-physical result, but it demonstrates a problem that exists in the code # when dissimilar blocks are put together in a BlockCollection structureVolume = sum(c.getVolume() for c in avgB.getComponents([Flags.CLAD, Flags.DUCT, Flags.WIRE])) fuelVolume = avgB.getComponent(Flags.FUEL).getVolume() coolantVolume = sum(c.getVolume() for c in avgB.getComponents([Flags.BOND, Flags.COOLANT, Flags.INTERCOOLANT])) expectedIronTemp = (structureVolume * 500.0 + fuelVolume * 600.0) / (structureVolume + fuelVolume) expectedSodiumTemp = (coolantVolume * 400.0 + fuelVolume * 600.0) / (coolantVolume + fuelVolume) self.assertAlmostEqual(newBc.avgNucTemperatures["U235"], 600.0) self.assertAlmostEqual(newBc.avgNucTemperatures["FE56"], expectedIronTemp) self.assertAlmostEqual(newBc.avgNucTemperatures["NA23"], expectedSodiumTemp) class TestComponentAveraging(unittest.TestCase): @classmethod def setUpClass(cls): fpFactory = test_lumpedFissionProduct.getDummyLFPFile() cls.blockList = makeBlocks(3) for bi, b in enumerate(cls.blockList): b.setType("fuel") b.setLumpedFissionProducts(fpFactory.createLFPsFromFile()) # put some trace Fe-56 and Na-23 into the fuel # zero out all fuel nuclides except U-235 (for mass-weighting of component temperature) for nuc in b.getNuclides(): b.setNumberDensity(nuc, 0.0) b.setNumberDensity("U235", bi) b.setNumberDensity("FE56", bi / 2.0) b.setNumberDensity("NA23", bi / 3.0) for c in b: if c.hasFlags(Flags.FUEL): c.temperatureInC = 600.0 + bi elif c.hasFlags([Flags.CLAD, Flags.DUCT, Flags.WIRE]): c.temperatureInC = 500.0 + bi elif c.hasFlags([Flags.BOND, Flags.COOLANT, Flags.INTERCOOLANT]): c.temperatureInC = 400.0 + bi def setUp(self): self.bc = AverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem) blockCopies = [copy.deepcopy(b) for b in self.blockList] self.bc.extend(blockCopies) def test_getAverageComponentNumberDensities(self): """Test component number density averaging.""" # because of the way densities are set up, the middle block (index 1 of 0-2) component # densities are equivalent to the average b = self.bc[1] for compIndex, c in enumerate(b.getComponents()): avgDensities = self.bc._getAverageComponentNumberDensities(compIndex) compDensities = c.getNumberDensities() for nuc in c.getNuclides(): self.assertAlmostEqual( compDensities[nuc], avgDensities[nuc], msg=f"{nuc} density {compDensities[nuc]} not equal to {avgDensities[nuc]}!", ) self.assertEqual(len(compDensities), len(avgDensities)) def test_getAverageComponentTemperature(self): """Test mass-weighted component temperature averaging.""" b = self.bc[0] massWeightedIncrease = 5.0 / 3.0 baseTemps = [600, 400, 500, 500, 400, 500, 400] expectedTemps = [t + massWeightedIncrease for t in baseTemps] for compIndex, c in enumerate(b.getComponents()): avgTemp = self.bc._getAverageComponentTemperature(compIndex) self.assertAlmostEqual( expectedTemps[compIndex], avgTemp, msg=f"{c} avg temperature {avgTemp} not equal to expected {expectedTemps[compIndex]}!", ) def test_getAvgCompTempVariedWeights(self): """Test mass-weighted component temperature averaging with variable weights.""" # make up a fake weighting with power param self.bc.weightingParam = "power" for i, b in enumerate(self.bc): b.p.power = i weightedIncrease = 1.8 baseTemps = [600, 400, 500, 500, 400, 500, 400] expectedTemps = [t + weightedIncrease for t in baseTemps] for compIndex, c in enumerate(b.getComponents()): avgTemp = self.bc._getAverageComponentTemperature(compIndex) self.assertAlmostEqual( expectedTemps[compIndex], avgTemp, msg=f"{c} avg temperature {avgTemp} not equal to expected {expectedTemps[compIndex]}!", ) def test_getAvgCompTempNoMass(self): """Test component temperature averaging when the components have no mass.""" for b in self.bc: for nuc in b.getNuclides(): b.setNumberDensity(nuc, 0.0) unweightedIncrease = 1.0 baseTemps = [600, 400, 500, 500, 400, 500, 400] expectedTemps = [t + unweightedIncrease for t in baseTemps] for compIndex, c in enumerate(b.getComponents()): avgTemp = self.bc._getAverageComponentTemperature(compIndex) self.assertAlmostEqual( expectedTemps[compIndex], avgTemp, msg=f"{c} avg temperature {avgTemp} not equal to expected {expectedTemps[compIndex]}!", ) class TestBlockCollCompAvg(unittest.TestCase): """Test Block collection component averages.""" def setUp(self): r""" First part of setup same as test_Cartesian. Second part of setup builds lists/dictionaries of expected values to compare to. has expected values for component isotopic atom density and component area. """ self.o, self.r = test_reactors.loadTestReactor(TEST_ROOT, inputFileName="zpprTest.yaml") # ndrawer1 lenFuelTypeD1 ndrawer2 lenFuelTypeD2 EuWeight = float(1 * 60 + 3 * 15) otherEUWeight = float(1 * 15 + 3 * 45) totalWeight = otherEUWeight + EuWeight otherEUWeight /= totalWeight EuWeight /= totalWeight expectedRepBlanketBlock = [ {"U238": 0.045}, # DU {"NA23": 0.02}, # Na {"U238": 0.045}, # DU ] expectedRepFuelBlock = [ {"U238": 0.045 * EuWeight + 0.045 * otherEUWeight}, # DU { "U235": 0.025 * EuWeight + 0.0125 * otherEUWeight, "U238": 0.02 * EuWeight + 0.01 * otherEUWeight, }, {"NA23": 0.02}, # Na} { "FE54": 0.07 * 0.05845, "FE56": 0.07 * 0.91754, "FE57": 0.07 * 0.02119, "FE58": 0.07 * 0.00282, }, # Steel ] # later sorted by density so less massive block first self.expectedBlockDensities = [ expectedRepBlanketBlock, expectedRepFuelBlock, expectedRepFuelBlock, ] self.expectedAreas = [[1, 6, 1], [1, 2, 1, 4]] def test_ComponentAverageRepBlock(self): """Tests that the XS group manager calculates the expected component atom density and component area correctly. Order of components is also checked since in 1D cases the order of the components matters. """ xsgm = self.o.getInterface("xsGroups") for _xsID, xsOpt in self.o.cs[CONF_CROSS_SECTION].items(): self.assertEqual(xsOpt.blockRepresentation, None) xsgm.interactBOL() # Check that the correct defaults are propagated after the interactBOL # from the cross section group manager is called. for _xsID, xsOpt in self.o.cs[CONF_CROSS_SECTION].items(): self.assertEqual(xsOpt.blockRepresentation, self.o.cs[CONF_XS_BLOCK_REPRESENTATION]) xsgm.createRepresentativeBlocks() representativeBlockList = list(xsgm.representativeBlocks.values()) representativeBlockList.sort(key=lambda repB: repB.getMass() / repB.getVolume()) self.assertEqual(len(representativeBlockList), len(self.expectedBlockDensities)) for b, componentDensities, areas in zip( representativeBlockList, self.expectedBlockDensities, self.expectedAreas ): self.assertEqual(len(b), len(componentDensities)) self.assertEqual(len(b), len(areas)) for c, compDensity, compArea in zip(b, componentDensities, areas): self.assertEqual(compArea, c.getArea()) cNucs = c.getNuclides() self.assertEqual(len(cNucs), len(compDensity), (cNucs, compDensity)) for nuc in cNucs: self.assertAlmostEqual(c.getNumberDensity(nuc), compDensity[nuc]) self.assertIn( "AC", xsgm.representativeBlocks, ("Assemblies not in the core should still have XS groups, see _getMissingBlueprintBlocks()"), ) class TestBlockCollCompAvg1DCyl(unittest.TestCase): """Test Block collection component averages for 1D cylinder.""" def setUp(self): """First part of setup same as test_Cartesian. Second part of setup builds lists/dictionaries of expected values to compare to. has expected values for component isotopic atom density and component area. """ self.o, self.r = test_reactors.loadTestReactor(TEST_ROOT) sodiumDensity = {"NA23": 0.022166571826233578} steelDensity = { "C": 0.0007685664978992269, "V50": 6.795562118653462e-07, "V51": 0.0002711429285342731, "SI28": 0.0003789374369638149, "SI29": 1.924063709833714e-05, "SI30": 1.268328992580968e-05, "CR50": 0.0004532023742335746, "CR52": 0.008739556775111474, "CR53": 0.0009909955713678232, "CR54": 0.000246679773317009, "MN55": 0.0004200803669857142, "FE54": 0.004101496663229472, "FE56": 0.06438472483061823, "FE57": 0.0014869241111006412, "FE58": 0.00019788230265709334, "NI58": 0.0002944487657779742, "NI60": 0.00011342053328927859, "NI61": 4.930763373747379e-06, "NI62": 1.571788956157717e-05, "NI64": 4.005163933412346e-06, "MO92": 7.140180476114493e-05, "MO94": 4.4505841916481845e-05, "MO95": 7.659816252004227e-05, "MO96": 8.02548587207478e-05, "MO97": 4.594927462728666e-05, "MO98": 0.00011610009956095838, "MO100": 4.6334190016834624e-05, "W182": 3.663619370317025e-05, "W183": 1.9783544599711936e-05, "W184": 4.235973352562047e-05, "W186": 3.9304414603061506e-05, } linerAdjustment = 1.014188527784268 cladDensity = {nuc: dens * linerAdjustment for nuc, dens in steelDensity.items()} fuelDensity = { "AM241": 2.3605999999999997e-05, "PU238": 3.7387e-06, "PU239": 0.0028603799999999996, "PU240": 0.000712945, "PU241": 9.823120000000004e-05, "PU242": 2.02221e-05, "U235": 0.00405533, "U238": 0.0134125, } self.expectedComponentDensities = [ fuelDensity, sodiumDensity, cladDensity, steelDensity, sodiumDensity, steelDensity, sodiumDensity, ] self.expectedComponentAreas = [ 99.54797488948871, 29.719913442616843, 30.07759373476877, 1.365897776727751, 63.184097853691235, 17.107013842808822, 1.9717608091694139, ] def test_ComponentAverage1DCylinder(self): """Tests that the cross-section group manager calculates the expected component atom density and component area correctly. Order of components is also checked since in 1D cases the order of the components matters. .. test:: Create representative blocks using custom cylindrical averaging. :id: T_ARMI_XSGM_CREATE_REPR_BLOCKS1 :tests: R_ARMI_XSGM_CREATE_REPR_BLOCKS """ xsgm = self.o.getInterface("xsGroups") xsgm.interactBOL() # Check that the correct defaults are propagated after the interactBOL # from the cross section group manager is called. xsOpt = self.o.cs[CONF_CROSS_SECTION]["ZA"] self.assertEqual(xsOpt.blockRepresentation, "ComponentAverage1DCylinder") xsgm.createRepresentativeBlocks() xsgm.updateNuclideTemperatures() representativeBlockList = list(xsgm.representativeBlocks.values()) representativeBlockList.sort(key=lambda repB: repB.getMass() / repB.getVolume()) reprBlock = xsgm.representativeBlocks["ZA"] self.assertEqual(reprBlock.name, "1D_CYL_AVG_ZA") self.assertEqual(reprBlock.p.percentBu, 0.0) refTemps = {"fuel": 600.0, "coolant": 450.0, "structure": 462.4565} for c, compDensity, compArea in zip(reprBlock, self.expectedComponentDensities, self.expectedComponentAreas): self.assertEqual(compArea, c.getArea()) cNucs = c.getNuclides() for nuc in cNucs: self.assertAlmostEqual(c.getNumberDensity(nuc), compDensity.get(nuc, 0.0)) if "fuel" in c.getType(): compTemp = refTemps["fuel"] elif any(sodium in c.getType() for sodium in ["bond", "coolant"]): compTemp = refTemps["coolant"] else: compTemp = refTemps["structure"] self.assertAlmostEqual( compTemp, xsgm.avgNucTemperatures["ZA"][nuc], 2, f"{nuc} temperature does not match expected value of {compTemp}", ) def test_ComponentAverageDuctHet1DCylinder(self): """ Tests that the cross-section group manager calculates the expected component atom density, component area, and average nuclide temperature correctly for a duct heterogeneous cylindrical block collection. """ self.o.cs[CONF_CROSS_SECTION]["ZA"].ductHeterogeneous = True xsgm = self.o.getInterface("xsGroups") xsgm.interactBOL() # Check that the correct defaults are propagated after the interactBOL # from the cross section group manager is called. xsOpt = self.o.cs[CONF_CROSS_SECTION]["ZA"] self.assertEqual(xsOpt.blockRepresentation, "ComponentAverage1DCylinder") xsgm.createRepresentativeBlocks() xsgm.updateNuclideTemperatures() representativeBlockList = list(xsgm.representativeBlocks.values()) representativeBlockList.sort(key=lambda repB: repB.getMass() / repB.getVolume()) reprBlock = xsgm.representativeBlocks["ZA"] self.assertEqual(reprBlock.name, "1D_CYL_DUCT_HET_AVG_ZA") self.assertEqual(reprBlock.p.percentBu, 0.0) refTemps = {"fuel": 600.0, "coolant": 450.0, "structure": 462.4565} for c, compDensity, compArea in zip(reprBlock, self.expectedComponentDensities, self.expectedComponentAreas): self.assertEqual(compArea, c.getArea()) cNucs = c.getNuclides() for nuc in cNucs: self.assertAlmostEqual(c.getNumberDensity(nuc), compDensity.get(nuc, 0.0)) if "fuel" in c.getType(): compTemp = refTemps["fuel"] elif any(sodium in c.getType() for sodium in ["bond", "coolant"]): compTemp = refTemps["coolant"] else: compTemp = refTemps["structure"] if any(comp in c.getType() for comp in ["fuel", "bond", "coolant"]): # only 1 fuel component, and bond and coolant are both at same temperature # the component temp should match the avg nuc temp self.assertAlmostEqual( compTemp, xsgm.avgNucTemperatures["ZA"][nuc], 2, f"{nuc} temperature does not match expected value of {compTemp} for component {c}", ) else: # steel components are at different temperatures # the temperatures should be different diff = abs(compTemp - xsgm.avgNucTemperatures["ZA"][nuc]) self.assertGreater( diff, 1.0, f"{nuc} temperature should be different from {compTemp} for component {c}", ) def test_checkComponentConsistency(self): xsgm = self.o.getInterface("xsGroups") xsgm.interactBOL() blockCollectionsByXsGroup = xsgm.makeCrossSectionGroups() blockCollection = blockCollectionsByXsGroup["ZA"] baseComponents = self.r.core.getFirstBlock(Flags.CONTROL).getComponents() densities = { "control": baseComponents[0].getNumberDensities(), "clad": baseComponents[2].getNumberDensities(), "coolant": baseComponents[4].getNumberDensities(), } controlComponent, cladComponent, coolantComponent = self._makeComponents(7, densities) # reference block refBlock = HexBlock("refBlock") refBlock.add(controlComponent) refBlock.add(cladComponent) refBlock.add(coolantComponent) # matching block matchingBlock = HexBlock("matchBlock") matchingBlock.add(controlComponent) matchingBlock.add(cladComponent) matchingBlock.add(coolantComponent) # unsorted block unsortedBlock = HexBlock("unsortedBlock") unsortedBlock.add(cladComponent) unsortedBlock.add(coolantComponent) unsortedBlock.add(controlComponent) # non-matching block length nonMatchingLengthBlock = HexBlock("blockLengthDiff") nonMatchingLengthBlock.add(controlComponent) nonMatchingLengthBlock.add(coolantComponent) # non-matching component multiplicity nonMatchingMultBlock = HexBlock("blockComponentDiff") control, clad, coolant = self._makeComponents(19, densities) nonMatchingMultBlock.add(control) nonMatchingMultBlock.add(clad) nonMatchingMultBlock.add(coolant) # different nuclides nucDiffBlock = HexBlock("blockNucDiff") mixedDensities = { "clad": baseComponents[0].getNumberDensities(), "coolant": baseComponents[2].getNumberDensities(), "control": baseComponents[4].getNumberDensities(), } control, clad, coolant = self._makeComponents(7, mixedDensities) nucDiffBlock.add(control) nucDiffBlock.add(clad) nucDiffBlock.add(coolant) # additional non-important nuclides negligibleNucDiffBlock = HexBlock("blockNegligibleNucDiff") negligibleNuc = {"N14": 1.0e-5} modControl = baseComponents[0].getNumberDensities() modClad = baseComponents[2].getNumberDensities() modCoolant = baseComponents[4].getNumberDensities() modControl.update(negligibleNuc) modClad.update(negligibleNuc) modCoolant.update(negligibleNuc) mixedDensities = { "control": modControl, "clad": modClad, "coolant": modCoolant, } control, clad, coolant = self._makeComponents(7, mixedDensities) negligibleNucDiffBlock.add(control) negligibleNucDiffBlock.add(clad) negligibleNucDiffBlock.add(coolant) # nuclides at zero number density should be okay zeroNucBlock = HexBlock("blockNucZero") mixedDensities = { "control": baseComponents[0].getNumberDensities(), "clad": baseComponents[2].getNumberDensities(), "coolant": baseComponents[4].getNumberDensities(), } control, clad, coolant = self._makeComponents(7, mixedDensities) # set some nuclide number densities to zero control.setNumberDensity("U235", 0.0) control.setNumberDensity("O16", 0.0) clad.setNumberDensity("FE56", 0.0) coolant.setNumberDensity("NA23", 0.0) coolant.setNumberDensity("PU239", 0.0) zeroNucBlock.add(control) zeroNucBlock.add(clad) zeroNucBlock.add(coolant) blockCollection._checkComponentConsistency(refBlock, matchingBlock) blockCollection._checkComponentConsistency(refBlock, unsortedBlock) blockCollection._checkComponentConsistency(refBlock, negligibleNucDiffBlock) blockCollection._checkComponentConsistency(refBlock, zeroNucBlock) for b in (nonMatchingMultBlock, nonMatchingLengthBlock, nucDiffBlock): with self.assertRaises(ValueError): blockCollection._checkComponentConsistency(refBlock, b) def _makeComponents(self, multiplicity, densities): from armi.reactor import components baseComponents = self.r.core.getFirstBlock(Flags.CONTROL).getComponents() controlComponent = components.Circle( "control", baseComponents[0].material, 100.0, 100.0, id=0.0, od=0.6, mult=multiplicity, ) cladComponent = components.Circle( "clad", baseComponents[2].material, 100.0, 100.0, id=0.6, od=0.7, mult=multiplicity, ) coolantComponent = components.Circle( "coolant", baseComponents[4].material, 100.0, 100.0, id=0.7, od=0.8, mult=multiplicity, ) controlComponent.setNumberDensities(densities["control"]) cladComponent.setNumberDensities(densities["clad"]) coolantComponent.setNumberDensities(densities["coolant"]) return controlComponent, cladComponent, coolantComponent class TestBlockCollFluxWeightAvg(unittest.TestCase): @classmethod def setUpClass(cls): fpFactory = test_lumpedFissionProduct.getDummyLFPFile() cls.blockList = makeBlocks(5) for bi, b in enumerate(cls.blockList): b.setType("fuel") b.p.percentBu = bi / 4.0 * 100 b.setLumpedFissionProducts(fpFactory.createLFPsFromFile()) b.setNumberDensity("U235", bi) b.p.gasReleaseFraction = bi * 2 / 8.0 b.p.flux = bi + 1 def setUp(self): self.bc = FluxWeightedAverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem) self.bc.extend(self.blockList) def test_createRepresentativeBlock(self): self.bc[1].p.flux = 1e99 # only the 2nd block values should show up avgB = self.bc.createRepresentativeBlock() self.assertNotIn(avgB, self.bc) self.assertAlmostEqual(avgB.getNumberDensity("U235"), 1.0) self.assertEqual(avgB.p.percentBu, 25.0) def test_invalidWeights(self): self.bc[0].p.flux = 0.0 with self.assertRaises(ValueError): self.bc.createRepresentativeBlock() class TestXSGM(unittest.TestCase): def setUp(self): cs = settings.Settings() self.blockList = makeBlocks(20) self.csm = CrossSectionGroupManager(self.blockList[0].core.r, cs) for bi, b in enumerate(self.blockList): b.p.percentBu = bi / 19.0 * 100 self.csm._setBuGroupBounds([3, 10, 30, 100]) self.csm.interactBOL() def test_enableEnvGroupUpdates(self): self.csm._envGroupUpdatesEnabled = False self.csm.enableEnvGroupUpdates() self.assertTrue(self.csm._envGroupUpdatesEnabled) # test flipping again keeps true self.csm.enableEnvGroupUpdates() self.assertTrue(self.csm._envGroupUpdatesEnabled) def test_disableEnvGroupUpdates(self): self.csm._envGroupUpdatesEnabled = True wasEnabled = self.csm.disableEnvGroupUpdates() self.assertTrue(wasEnabled) self.assertFalse(self.csm._envGroupUpdatesEnabled) wasEnabled = self.csm.disableEnvGroupUpdates() self.assertFalse(wasEnabled) self.assertFalse(self.csm._envGroupUpdatesEnabled) def test_updateBurnupGroups(self): self.blockList[1].p.percentBu = 3.1 self.blockList[2].p.percentBu = 10.0 self.csm._updateEnvironmentGroups(self.blockList) self.assertEqual(self.blockList[0].p.envGroup, "A") self.assertEqual(self.blockList[1].p.envGroup, "B") self.assertEqual(self.blockList[2].p.envGroup, "B") self.assertEqual(self.blockList[-1].p.envGroup, "D") def test_setBuGroupBounds(self): self.assertAlmostEqual(self.csm._buGroupBounds[2], 30.0) with self.assertRaises(ValueError): self.csm._setBuGroupBounds([3, 10, 300]) with self.assertRaises(ValueError): self.csm._setBuGroupBounds([-5, 3, 10, 30.0]) with self.assertRaises(ValueError): self.csm._setBuGroupBounds([1, 5, 3]) def test_setTempGroupBounds(self): # negative temps in C are allowed self.csm._setTempGroupBounds([-5, 3, 10, 300]) self.assertAlmostEqual(self.csm._tempGroupBounds[2], 10.0) with self.assertRaises(ValueError): self.csm._setTempGroupBounds([1, 5, 3]) def test_addXsGroupsFromBlocks(self): blockCollectionsByXsGroup = {} blockCollectionsByXsGroup = self.csm._addXsGroupsFromBlocks(blockCollectionsByXsGroup, self.blockList) self.assertEqual(len(blockCollectionsByXsGroup), 4) self.assertIn("AB", blockCollectionsByXsGroup) def test_getMissingBlueprintBlocks(self): """Test the function to get missing blueprints blocks.""" self.csm._setTempGroupBounds([0, 100, 200]) blockCollectionsByXsGroup = {} blockCollectionsByXsGroup = self.csm._addXsGroupsFromBlocks(blockCollectionsByXsGroup, self.blockList) missingBlueprintBlocks = self.csm._getMissingBlueprintBlocks(blockCollectionsByXsGroup) envGroups = set(b.p.envGroup for b in missingBlueprintBlocks) self.assertGreater(len(envGroups), 1, "Blueprint block environment groups were not updated!") def test_calcWeightedBurnup(self): self.blockList[1].p.percentBu = 3.1 self.blockList[2].p.percentBu = 10.0 self.blockList[3].p.percentBu = 1.5 for b in self.blockList[4:]: b.p.percentBu = 0.0 self.csm._updateEnvironmentGroups(self.blockList) blockCollectionsByXsGroup = {} blockCollectionsByXsGroup = self.csm._addXsGroupsFromBlocks(blockCollectionsByXsGroup, self.blockList) ABcollection = blockCollectionsByXsGroup["AB"] self.assertEqual(blockCollectionsByXsGroup["AA"]._calcWeightedBurnup(), 1 / 12.0) self.assertEqual( ABcollection.getWeight(self.blockList[1]), ABcollection.getWeight(self.blockList[2]), "The two blocks in AB do not have the same weighting!", ) self.assertEqual(ABcollection._calcWeightedBurnup(), 6.55) def test_getNextAvailableXsType(self): blockCollectionsByXsGroup = {} blockCollectionsByXsGroup = self.csm._addXsGroupsFromBlocks(blockCollectionsByXsGroup, self.blockList) xsType1, xsType2, xsType3 = self.csm.getNextAvailableXsTypes(3) self.assertEqual("B", xsType1) self.assertEqual("C", xsType2) self.assertEqual("D", xsType3) # verify that we can get lowercase letters xsTypes = self.csm.getNextAvailableXsTypes(26) self.assertEqual("Y", xsTypes[-4]) self.assertEqual("a", xsTypes[-3]) self.assertEqual("b", xsTypes[-2]) self.assertEqual("c", xsTypes[-1]) # verify that we can get lowercase letters if sys.platform.startswith("win"): with mockRunLogs.BufferLog() as mock: xsTypes = self.csm.getNextAvailableXsTypes(27) self.assertIn("Mixing upper and lower-case XS", mock.getStdout()) def test_getRepresentativeBlocks(self): """Test that we can create the representative blocks for a reactor. .. test:: Build representative blocks for a reactor. :id: T_ARMI_XSGM_CREATE_XS_GROUPS0 :tests: R_ARMI_XSGM_CREATE_XS_GROUPS """ _o, r = test_reactors.loadTestReactor(TEST_ROOT) self.csm.r = r # Assumption: All sodium in fuel blocks for this test is 450 C and this is the expected # sodium temperature. These lines of code take the first sodium block and decrease the # temperature of the block, but change the atom density to approximately zero. Checking # later on the nuclide temperature of sodium is asserted to be still 450. This perturbation # proves that altering the temperature of an component with near zero atom density does not # affect the average temperature of the block collection. This demonstrates that the # temperatures of a block collection are atom weighted rather than just the average # temperature. regularFuel = r.core.getFirstBlock(Flags.FUEL, exact=True) intercoolant = regularFuel.getComponent(Flags.INTERCOOLANT) intercoolant.setTemperature(100) # just above melting intercoolant.setNumberDensity("NA23", units.TRACE_NUMBER_DENSITY) self.csm.createRepresentativeBlocks() blocks = list(self.csm.representativeBlocks.values()) self.assertGreater(len(blocks), 0) # Test ability to get average nuclide temperature in block. u235 = self.csm.getNucTemperature("AA", "U235") fe = self.csm.getNucTemperature("AA", "FE56") na = self.csm.getNucTemperature("AA", "NA23") self.assertAlmostEqual(na, 450.0, msg="Na temp was {}, not 450".format(na)) self.assertGreater(u235, fe) self.assertGreater(fe, na) self.assertTrue(0.0 < na < fe) # trace nuclides should also be at fuel temp. self.assertAlmostEqual(self.csm.getNucTemperature("AA", "LFP35"), u235) # Test that retrieving temperatures fails if a representative block for a given XS ID does not exist self.assertEqual(self.csm.getNucTemperature("Z", "U235"), None) # Test dimensions self.assertEqual(blocks[0].getHeight(), 25.0) self.assertEqual(blocks[1].getHeight(), 25.0) self.assertAlmostEqual(blocks[0].getVolume(), 6074.356308731789) self.assertAlmostEqual(blocks[1].getVolume(), 6074.356308731789) # Number densities haven't been calculated yet self.assertIsNone(blocks[0].p.detailedNDens) self.assertIsNone(blocks[1].p.detailedNDens) def test_checkForUnrepresentedXSIDs(self): blockCollectionsByXsGroup = self.csm.makeCrossSectionGroups() self.csm.createRepresentativeBlocks() # set valid flags to something the fuel block would not have to trigger unrepresented block fuelXStype = "AD" blocksWithType = [b for b in self.csm.r.core.iterBlocks(Flags.FUEL) if b.getMicroSuffix() == fuelXStype] fuelCollection = blockCollectionsByXsGroup[fuelXStype] fuelCollection._validRepresentativeBlockTypes = Flags.CLAD # check for unrepresented XS ID, assert that it is found self.csm._checkForUnrepresentedXSIDs(blockCollectionsByXsGroup) self.assertListEqual(self.csm._unrepresentedXSIDs, [fuelXStype]) # modify unrepresented XS ID, assert that first character is the same self.csm._modifyUnrepresentedXSIDs(blockCollectionsByXsGroup) for b in blocksWithType: modifiedType = b.getMicroSuffix() self.assertEqual(modifiedType[0], fuelXStype[0]) self.assertNotEqual(modifiedType[1], fuelXStype[1]) def _createRepresentativeBlocksUsingExistingBlocks(self, validBlockTypes): """Reusable code used in multiple unit tests.""" o, r = test_reactors.loadTestReactor(TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml") # set a few random non-default settings on AA to be copied to the new BA group o.cs[CONF_CROSS_SECTION].update( { "AA": XSModelingOptions( "AA", geometry="0D", averageByComponent=True, xsMaxAtomNumber=60, criticalBuckling=False, xsPriority=2, ) } ) o.cs[CONF_CROSS_SECTION].setDefaults(crossSectionGroupManager.AVERAGE_BLOCK_COLLECTION, validBlockTypes) aaSettings = o.cs[CONF_CROSS_SECTION]["AA"] self.csm.cs = copy.deepcopy(o.cs) self.csm.createRepresentativeBlocks() unperturbedReprBlocks = copy.deepcopy(self.csm.representativeBlocks) self.assertNotIn("BA", unperturbedReprBlocks) block = r.core.getFirstBlock() blockXSID = block.getMicroSuffix() blockList = [block] ( _bCollect, newRepresentativeBlocks, origXSIDsFromNew, ) = self.csm.createRepresentativeBlocksUsingExistingBlocks(blockList, unperturbedReprBlocks) self.assertIn("BA", newRepresentativeBlocks) oldReprBlock = unperturbedReprBlocks[blockXSID] newReprBlock = newRepresentativeBlocks["BA"] self.assertEqual(newReprBlock.getMicroSuffix(), "BA") self.assertEqual(newReprBlock.getNumberDensities(), oldReprBlock.getNumberDensities()) self.assertEqual(origXSIDsFromNew["BA"], "AA") # check that settings were copied correctly baSettings = self.csm.cs[CONF_CROSS_SECTION]["BA"] self.assertEqual(baSettings.xsID, "BA") for setting, baSettingValue in baSettings.__dict__.items(): if setting == "xsID": continue self.assertEqual(baSettingValue, aaSettings.__dict__[setting]) def test_createRepBlocksUsingExistingBlocks(self): """ Demonstrates that a new representative block can be generated from an existing representative block. Notes ----- This tests that the XS ID of the new representative block is correct and that the compositions are identical between the original and the new representative blocks. """ self._createRepresentativeBlocksUsingExistingBlocks(["fuel"]) def test_createRepBlocksDisableValidBlockTypes(self): """ Demonstrates that a new representative block can be generated from an existing representative block. Notes ----- This tests that the XS ID of the new representative block is correct and that the compositions are identical between the original and the new representative blocks. """ self._createRepresentativeBlocksUsingExistingBlocks(True) def test_interactBOL(self): """Test `BOL` lattice physics update frequency. .. test:: The cross-section group manager frequency depends on the LPI frequency at BOL. :id: T_ARMI_XSGM_FREQ0 :tests: R_ARMI_XSGM_FREQ """ self.assertFalse(self.csm.representativeBlocks) self.blockList[0].core.r.p.timeNode = 0 self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "BOL" self.csm.interactBOL() self.assertTrue(self.csm.representativeBlocks) def test_interactBOC(self): """Test `BOC` lattice physics update frequency. .. test:: The cross-section group manager frequency depends on the LPI frequency at BOC. :id: T_ARMI_XSGM_FREQ1 :tests: R_ARMI_XSGM_FREQ """ self.assertFalse(self.csm.representativeBlocks) self.blockList[0].core.r.p.timeNode = 0 self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "BOC" self.csm.interactBOL() self.csm.interactBOC() self.assertTrue(self.csm.representativeBlocks) def test_interactEveryNode(self): """Test `everyNode` lattice physics update frequency. .. test:: The cross-section group manager frequency depends on the LPI frequency at every time node. :id: T_ARMI_XSGM_FREQ2 :tests: R_ARMI_XSGM_FREQ """ self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "BOC" self.csm.interactBOL() self.csm.interactEveryNode() self.assertFalse(self.csm.representativeBlocks) self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "everyNode" self.csm.interactBOL() self.csm.interactEveryNode() self.assertTrue(self.csm.representativeBlocks) def test_interactFirstCoupledIteration(self): """Test `firstCoupledIteration` lattice physics update frequency. .. test:: The cross-section group manager frequency depends on the LPI frequency during first coupled iteration. :id: T_ARMI_XSGM_FREQ3 :tests: R_ARMI_XSGM_FREQ """ self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "everyNode" self.csm.interactBOL() self.csm.interactCoupled(iteration=0) self.assertFalse(self.csm.representativeBlocks) self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "firstCoupledIteration" self.csm.interactBOL() self.csm.interactCoupled(iteration=0) self.assertTrue(self.csm.representativeBlocks) def test_interactAllCoupled(self): """Test `all` lattice physics update frequency. .. test:: The cross-section group manager frequency depends on the LPI frequency during coupling. :id: T_ARMI_XSGM_FREQ4 :tests: R_ARMI_XSGM_FREQ """ self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "firstCoupledIteration" self.csm.interactBOL() self.csm.interactCoupled(iteration=1) self.assertFalse(self.csm.representativeBlocks) self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "all" self.csm.interactBOL() self.csm.interactCoupled(iteration=1) self.assertTrue(self.csm.representativeBlocks) def test_xsgmIsRunBeforeXS(self): """Test that the XSGM is run before the cross sections are calculated. .. test:: Test that the cross-section group manager is run before the cross sections are calculated. :id: T_ARMI_XSGM_FREQ5 :tests: R_ARMI_XSGM_FREQ """ from armi.interfaces import STACK_ORDER self.assertLess(crossSectionGroupManager.ORDER, STACK_ORDER.CROSS_SECTIONS) def test_copyPregeneratedFiles(self): """ Tests copying pre-generated cross section and flux files using reactor that is built from a case settings file. """ o, r = test_reactors.loadTestReactor(TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml") # Need to overwrite the relative paths with absolute o.cs[CONF_CROSS_SECTION]["XA"].xsFileLocation = [os.path.join(THIS_DIR, "ISOXA")] o.cs[CONF_CROSS_SECTION]["YA"].fluxFileLocation = os.path.join(THIS_DIR, "rzmflxYA") csm = CrossSectionGroupManager(r, o.cs) with TemporaryDirectoryChanger(root=THIS_DIR): csm._copyPregeneratedXSFile("XA") csm._copyPregeneratedFluxSolutionFile("YA") self.assertTrue(os.path.exists("ISOXA")) self.assertTrue(os.path.exists("rzmflxYA")) class TestXSGMWithTempGrouping(unittest.TestCase): def setUp(self): cs = settings.Settings() cs["tempGroups"] = [300, 400, 500] self.blockList = makeBlocks(11) buAndTemps = ( (1, 340), (2, 150), (6, 410), (10.5, 290), (2.5, 360), (4, 460), (15, 370), (16, 340), (15, 700), (14, 720), ) for b, env in zip(self.blockList, buAndTemps): bu, temp = env comps = b.getComponents(Flags.FUEL) self.assertEqual(len(comps), 1) c = next(iter(comps)) c.setTemperature(temp) b.p.percentBu = bu core = self.blockList[0].core def getBlocks(includeAll=True): return self.blockList # this sets XSGM to only analyze the blocks in the block list. core.getBlocks = getBlocks self.csm = CrossSectionGroupManager(self.blockList[0].core.r, cs) self.csm._setBuGroupBounds([3, 10, 30, 100]) self.csm.interactBOL() def test_updateEnvironmentGroups(self): """Test creation of a cross section groups with temperature grouping. .. test:: Create representative blocks using temperature groups. :id: T_ARMI_XSGM_CREATE_XS_GROUPS1 :tests: R_ARMI_XSGM_CREATE_XS_GROUPS, R_ARMI_XSGM_CREATE_REPR_BLOCKS """ self.csm.createRepresentativeBlocks() BL = self.blockList loners = [BL[1], BL[3]] self.assertNotEqual(loners[0].getMicroSuffix(), loners[1].getMicroSuffix()) sameGroups = [(BL[0], BL[4]), (BL[2], BL[5]), (BL[6], BL[7]), (BL[8], BL[9])] # check that likes have like and different are different for group in sameGroups: b1, b2 = group xsSuffix = b1.getMicroSuffix() self.assertEqual(xsSuffix, b2.getMicroSuffix()) for group in sameGroups: newb1, newb2 = group if b1 is newb1: continue self.assertNotEqual(xsSuffix, newb1.getMicroSuffix()) self.assertNotEqual(xsSuffix, newb2.getMicroSuffix()) for lone in loners: self.assertNotEqual(xsSuffix, lone.getMicroSuffix()) self.assertNotEqual(loners[0].getMicroSuffix(), loners[1].getMicroSuffix()) # calculated based on the average of buAndTemps expectedIDs = ["AF", "AA", "AL", "AC", "AH", "AR"] expectedTemps = [ (340 + 360) / 2, 150, (410 + 460) / 2, 290, (370 + 340) / 2, (700 + 720) / 2, ] expectedBurnups = (1.75, 2, 5, 10.5, 15.5, 14.5) for xsID, expectedTemp, expectedBurnup in zip(expectedIDs, expectedTemps, expectedBurnups): b = self.csm.representativeBlocks[xsID] thisTemp = self.csm.avgNucTemperatures[xsID]["U238"] self.assertAlmostEqual(thisTemp, expectedTemp) self.assertAlmostEqual(b.p.percentBu, expectedBurnup) class TestXSNumberConverters(unittest.TestCase): def test_conversion(self): label = crossSectionGroupManager.getXSTypeLabelFromNumber(65) self.assertEqual(label, "A") num = crossSectionGroupManager.getXSTypeNumberFromLabel("A") self.assertEqual(num, 65) def test_conversion_2digit(self): label = crossSectionGroupManager.getXSTypeLabelFromNumber(6570) self.assertEqual(label, "AF") num = crossSectionGroupManager.getXSTypeNumberFromLabel("ZZ") self.assertEqual(num, 9090) def makeBlocks(howMany=20): _o, r = test_reactors.loadTestReactor(TEST_ROOT) # shift y 3 to skip central assemblies 1/3 volume return r.core.getBlocks(Flags.FUEL)[3 : howMany + 3] ================================================ FILE: armi/physics/neutronics/tests/test_crossSectionSettings.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """XS Settings tests.""" import io import unittest import voluptuous as vol from ruamel.yaml import YAML from armi import settings from armi.physics.neutronics.const import CONF_CROSS_SECTION from armi.physics.neutronics.crossSectionSettings import ( CONF_BLOCK_REPRESENTATION, CONF_GEOM, XSModelingOptions, XSSettingDef, XSSettings, xsSettingsValidator, ) from armi.physics.neutronics.settings import ( CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION, CONF_XS_BLOCK_REPRESENTATION, ) from armi.physics.neutronics.tests.test_neutronicsPlugin import XS_EXAMPLE from armi.settings import caseSettings class TestCrossSectionSettings(unittest.TestCase): def test_crossSections(self): xsModel = XSModelingOptions( xsID="AA", geometry="0D", criticalBuckling=True, blockRepresentation="Median", requiredRAM=20.0, ) self.assertEqual("AA", xsModel.xsID) self.assertEqual("0D", xsModel.geometry) self.assertEqual("Median", xsModel.blockRepresentation) self.assertFalse(xsModel.fluxIsPregenerated) self.assertFalse(xsModel.xsIsPregenerated) self.assertTrue(xsModel.criticalBuckling) self.assertEqual(20.0, xsModel.requiredRAM) def test_pregeneratedCrossSections(self): cs = settings.Settings() xs = XSSettings() xa = XSModelingOptions("XA", xsFileLocation=["ISOXA"]) xs["XA"] = xa self.assertEqual(["ISOXA"], xa.xsFileLocation) self.assertNotIn("XB", xs) xs.setDefaults( cs[CONF_XS_BLOCK_REPRESENTATION], cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION], ) # Check that the file location of 'XB' still points to the same file location as 'XA'. self.assertEqual(xa, xs["XB"]) self.assertFalse(xa.fluxIsPregenerated) self.assertTrue(xa.xsIsPregenerated) self.assertFalse(xa.criticalBuckling) def test_pregeneratedFluxInputs(self): xsModel = XSModelingOptions( xsID="AA", fluxFileLocation="ISOAA", geometry="0D", criticalBuckling=True, blockRepresentation="Median", ) self.assertEqual("AA", xsModel.xsID) self.assertEqual("0D", xsModel.geometry) self.assertEqual("ISOAA", xsModel.fluxFileLocation) self.assertTrue(xsModel.fluxIsPregenerated) self.assertTrue(xsModel.criticalBuckling) self.assertEqual("Median", xsModel.blockRepresentation) def test_prioritization(self): xsModel = XSModelingOptions( xsID="AA", geometry="0D", criticalBuckling=True, xsPriority=2, xsExecuteExclusive=True, ) self.assertEqual("AA", xsModel.xsID) self.assertEqual(True, xsModel.xsExecuteExclusive) self.assertEqual(2, xsModel.xsPriority) xsModel = XSModelingOptions( xsID="AA", geometry="0D", criticalBuckling=True, ) # defaults work xsModel.setDefaults("Average", False) self.assertEqual(False, xsModel.xsExecuteExclusive) self.assertEqual(5, xsModel.xsPriority) def test_homogeneousXsDefaultSettingAssignment(self): """ Make sure the object can whip up an unspecified xsID by default. This is used when user hasn't specified anything. """ cs = settings.Settings() xsModel = XSSettings() xsModel.setDefaults( cs[CONF_XS_BLOCK_REPRESENTATION], cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION], ) self.assertNotIn("YA", xsModel) self.assertEqual(xsModel["YA"].geometry, "0D") self.assertEqual(xsModel["YA"].criticalBuckling, True) self.assertEqual(xsModel["YA"].ductHeterogeneous, False) self.assertEqual(xsModel["YA"].traceIsotopeThreshold, 0.0) self.assertEqual(xsModel["YA"].requiredRAM, 0.0) def test_setDefSettingsByLowestEnvGroupHomog(self): # Initialize some micro suffix in the cross sections cs = settings.Settings() xs = XSSettings() jd = XSModelingOptions("JD", geometry="0D", criticalBuckling=False) xs["JD"] = jd xs.setDefaults( cs[CONF_XS_BLOCK_REPRESENTATION], cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION], ) self.assertIn("JD", xs) # Check that new micro suffix `JF` with higher burn-up group gets assigned the same settings as `JD` self.assertNotIn("JF", xs) self.assertEqual(xs["JD"], xs["JF"]) self.assertNotIn("JF", xs) # Check that new micro suffix `JG` with higher burn-up group gets assigned the same settings as `JD` self.assertNotIn("JG", xs) self.assertEqual(xs["JG"], xs["JD"]) # Check that new micro suffix `JB` with lower burn-up group does NOT get assigned the same settings as `JD` self.assertNotIn("JB", xs) self.assertNotEqual(xs["JD"], xs["JB"]) def test_setDefSettingsByLowestEnvGroup1D(self): # Initialize some micro suffix in the cross sections cs = settings.Settings() xsModel = XSSettings() rq = XSModelingOptions( "RQ", geometry="1D cylinder", blockRepresentation="ComponentAverage1DCylinder", meshSubdivisionsPerCm=1.0, ) xsModel["RQ"] = rq xsModel.setDefaults( cs[CONF_XS_BLOCK_REPRESENTATION], cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION], ) # Check that new micro suffix `RY` with higher burn-up group gets assigned the same settings as `RQ` self.assertNotIn("RY", xsModel) self.assertEqual(xsModel["RY"], xsModel["RQ"]) # Check that new micro suffix `RZ` with higher burn-up group gets assigned the same settings as `RQ` self.assertNotIn("RZ", xsModel) self.assertEqual(xsModel["RZ"], xsModel["RQ"]) # Check that new micro suffix `RA` with lower burn-up group does NOT get assigned the same settings as `RQ` self.assertNotIn("RA", xsModel) self.assertNotEqual(xsModel["RA"], xsModel["RQ"]) def test_optionalKey(self): """Test that optional key shows up with default value.""" cs = settings.Settings() xsModel = XSSettings() da = XSModelingOptions( "DA", geometry="1D cylinder", meshSubdivisionsPerCm=1.0, ductHeterogeneous=True, traceIsotopeThreshold=1.0e-5, ) xsModel["DA"] = da xsModel.setDefaults( cs[CONF_XS_BLOCK_REPRESENTATION], cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION], ) self.assertEqual(xsModel["DA"].mergeIntoClad, ["gap"]) self.assertEqual(xsModel["DA"].meshSubdivisionsPerCm, 1.0) self.assertEqual(xsModel["DA"].ductHeterogeneous, True) self.assertEqual(xsModel["DA"].traceIsotopeThreshold, 1.0e-5) self.assertEqual(xsModel["DA"].mergeIntoFuel, []) def test_badCrossSections(self): with self.assertRaises(TypeError): # This will fail because it is not the required # Dict[str: Dict] structure xsSettingsValidator({CONF_GEOM: "4D"}) with self.assertRaises(vol.error.MultipleInvalid): # This will fail because it has an invalid type for ``driverID`` xsSettingsValidator({"AA": {"driverId": 0.0}}) with self.assertRaises(vol.error.MultipleInvalid): # This will fail because it has an invalid value for # the ``blockRepresentation`` xsSettingsValidator({"AA": {CONF_BLOCK_REPRESENTATION: "Invalid"}}) with self.assertRaises(vol.error.MultipleInvalid): # This will fail because the ``xsID`` is not one or two # characters xsSettingsValidator({"AAA": {CONF_BLOCK_REPRESENTATION: "Average"}}) class TestXSSettings(unittest.TestCase): def test_yamlIO(self): """Ensure we can read/write this custom setting object to yaml.""" yaml = YAML() inp = yaml.load(io.StringIO(XS_EXAMPLE)) xs = XSSettingDef("TestSetting") xs.setValue(inp) self.assertEqual(xs.value["BA"].geometry, "1D slab") outBuf = io.StringIO() output = xs.dump() yaml.dump(output, outBuf) outBuf.seek(0) inp2 = yaml.load(outBuf) self.assertEqual(inp.keys(), inp2.keys()) def test_caseSettings(self): """ Test the setting of the cross section setting using the case settings object. Notes ----- The purpose of this test is to ensure that the cross sections sections can be removed from an existing case settings object once they have been set. """ def _setInitialXSSettings(): cs = caseSettings.Settings() cs[CONF_CROSS_SECTION] = XSSettings() cs[CONF_CROSS_SECTION]["AA"] = XSModelingOptions("AA", geometry="0D") cs[CONF_CROSS_SECTION]["BA"] = XSModelingOptions("BA", geometry="0D") self.assertIn("AA", cs[CONF_CROSS_SECTION]) self.assertIn("BA", cs[CONF_CROSS_SECTION]) self.assertNotIn("CA", cs[CONF_CROSS_SECTION]) self.assertNotIn("DA", cs[CONF_CROSS_SECTION]) return cs cs = _setInitialXSSettings() cs[CONF_CROSS_SECTION] = {"AA": {}, "BA": {}} self.assertDictEqual(cs[CONF_CROSS_SECTION], {}) self.assertTrue(isinstance(cs[CONF_CROSS_SECTION], XSSettings)) # Produce an error if the setting is set to # a None value cs = _setInitialXSSettings() with self.assertRaises(TypeError): cs[CONF_CROSS_SECTION] = None cs = _setInitialXSSettings() cs[CONF_CROSS_SECTION] = {"AA": None, "BA": {}} self.assertDictEqual(cs[CONF_CROSS_SECTION], {}) # Test that a new XS setting can be added to an existing # caseSetting using the ``XSModelingOptions`` or using # a dictionary. cs = _setInitialXSSettings() cs[CONF_CROSS_SECTION].update({"CA": XSModelingOptions("CA", geometry="0D"), "DA": {CONF_GEOM: "0D"}}) self.assertIn("AA", cs[CONF_CROSS_SECTION]) self.assertIn("BA", cs[CONF_CROSS_SECTION]) self.assertIn("CA", cs[CONF_CROSS_SECTION]) self.assertIn("DA", cs[CONF_CROSS_SECTION]) # Clear out the settings by setting the value to a None. # This will be interpreted as a empty dictionary. cs[CONF_CROSS_SECTION] = {} self.assertDictEqual(cs[CONF_CROSS_SECTION], {}) self.assertTrue(isinstance(cs[CONF_CROSS_SECTION], XSSettings)) # This will fail because the ``setDefaults`` method on the # ``XSSettings`` has not yet been called. with self.assertRaises(ValueError): cs[CONF_CROSS_SECTION]["AA"] cs[CONF_CROSS_SECTION].setDefaults( blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION], validBlockTypes=cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION], ) cs[CONF_CROSS_SECTION]["AA"] self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].geometry, "0D") def test_csBlockRepresentation(self): """ Test that the XS block representation is applied globally, but only to XS modeling options where the blockRepresentation has not already been assigned. """ cs = caseSettings.Settings() cs[CONF_XS_BLOCK_REPRESENTATION] = "FluxWeightedAverage" cs[CONF_CROSS_SECTION] = XSSettings() cs[CONF_CROSS_SECTION]["AA"] = XSModelingOptions("AA", geometry="0D") cs[CONF_CROSS_SECTION]["BA"] = XSModelingOptions("BA", geometry="0D", blockRepresentation="Average") self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].blockRepresentation, None) self.assertEqual(cs[CONF_CROSS_SECTION]["BA"].blockRepresentation, "Average") cs[CONF_CROSS_SECTION].setDefaults( cs[CONF_XS_BLOCK_REPRESENTATION], cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION], ) self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].blockRepresentation, "FluxWeightedAverage") self.assertEqual(cs[CONF_CROSS_SECTION]["BA"].blockRepresentation, "Average") def test_csBlockRepresentationFileLocation(self): """ Test that default blockRepresentation is applied correctly to a XSModelingOption that has the ``xsFileLocation`` attribute defined. """ cs = caseSettings.Settings() cs[CONF_XS_BLOCK_REPRESENTATION] = "FluxWeightedAverage" cs[CONF_CROSS_SECTION] = XSSettings() cs[CONF_CROSS_SECTION]["AA"] = XSModelingOptions("AA", xsFileLocation=[]) # Check FluxWeightedAverage cs[CONF_CROSS_SECTION].setDefaults( cs[CONF_XS_BLOCK_REPRESENTATION], cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION], ) self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].blockRepresentation, "FluxWeightedAverage") # Check Average cs[CONF_XS_BLOCK_REPRESENTATION] = "Average" cs[CONF_CROSS_SECTION]["AA"] = XSModelingOptions("AA", xsFileLocation=[]) cs[CONF_CROSS_SECTION].setDefaults( cs[CONF_XS_BLOCK_REPRESENTATION], cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION], ) self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].blockRepresentation, "Average") # Check Median cs[CONF_XS_BLOCK_REPRESENTATION] = "Average" cs[CONF_CROSS_SECTION]["AA"] = XSModelingOptions("AA", xsFileLocation=[], blockRepresentation="Median") cs[CONF_CROSS_SECTION].setDefaults( cs[CONF_XS_BLOCK_REPRESENTATION], cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION], ) self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].blockRepresentation, "Median") def test_xsSettingsSetDefault(self): """Test the configuration options of the ``setDefaults`` method.""" cs = caseSettings.Settings() cs[CONF_XS_BLOCK_REPRESENTATION] = "FluxWeightedAverage" cs[CONF_CROSS_SECTION].setDefaults(blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION], validBlockTypes=None) self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].validBlockTypes, None) cs[CONF_CROSS_SECTION].setDefaults(blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION], validBlockTypes=True) self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].validBlockTypes, None) cs[CONF_CROSS_SECTION].setDefaults(blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION], validBlockTypes=False) self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].validBlockTypes, ["fuel"]) cs[CONF_CROSS_SECTION].setDefaults( blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION], validBlockTypes=["control", "fuel", "plenum"], ) self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].validBlockTypes, ["control", "fuel", "plenum"]) ================================================ FILE: armi/physics/neutronics/tests/test_crossSectionTable.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for cross section table for depletion.""" import unittest from armi.nuclearDataIO.cccc import isotxs from armi.physics.neutronics.isotopicDepletion import ( crossSectionTable, ) from armi.physics.neutronics.isotopicDepletion import ( isotopicDepletionInterface as idi, ) from armi.physics.neutronics.latticePhysics import ORDER from armi.reactor.tests.test_blocks import loadTestBlock from armi.settings import Settings from armi.testing import loadTestReactor from armi.tests import ISOAA_PATH class TestCrossSectionTable(unittest.TestCase): def test_makeTable(self): """Test making a cross section table. .. test:: Generate cross section table. :id: T_ARMI_DEPL_TABLES :tests: R_ARMI_DEPL_TABLES """ obj = loadTestBlock() obj.p.mgFlux = range(33) core = obj.parent.parent core.lib = isotxs.readBinary(ISOAA_PATH) table = crossSectionTable.makeReactionRateTable(obj) self.assertEqual(len(obj.getNuclides()), len(table)) self.assertEqual(obj.getName(), "B0001-000") self.assertEqual(table.getName(), "B0001-000") self.assertTrue(table.hasValues()) xSecTable = table.getXsecTable() self.assertEqual(len(xSecTable), 11) self.assertIn("xsecs", xSecTable[0]) self.assertIn("mcnpId", xSecTable[-1]) def test_isotopicDepletionInterface(self): """ Test isotopic depletion interface. .. test:: ARMI provides a base class to deplete isotopes. :id: T_ARMI_DEPL_ABC :tests: R_ARMI_DEPL_ABC """ _o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") cs = Settings() aid = idi.AbstractIsotopicDepleter(r, cs) self.assertIsNone(aid.efpdToBurn) self.assertEqual(len(aid._depleteByName), 0) self.assertEqual(len(aid.getToDeplete()), 0) self.assertEqual(ORDER, 5.0) ================================================ FILE: armi/physics/neutronics/tests/test_energyGroups.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Energy group tests.""" import unittest from armi.physics.neutronics import energyGroups class TestEnergyGroups(unittest.TestCase): def test_invalidGroupStructureType(self): """Test that the reverse lookup fails on non-existent energy group bounds. .. test:: Check the neutron energy group bounds logic fails correctly for the wrong structure. :id: T_ARMI_EG_NE0 :tests: R_ARMI_EG_NE """ modifier = 1e-5 for groupStructureType in energyGroups.GROUP_STRUCTURE: energyBounds = energyGroups.getGroupStructure(groupStructureType) energyBounds[0] = energyBounds[0] * modifier with self.assertRaises(ValueError): energyGroups.getGroupStructureType(energyBounds) def test_consistenciesBetweenGSAndGSType(self): """Test that the reverse lookup of the energy group structures work. .. test:: Check the neutron energy group bounds for a given group structure. :id: T_ARMI_EG_NE1 :tests: R_ARMI_EG_NE """ for groupStructureType in energyGroups.GROUP_STRUCTURE: self.assertEqual( groupStructureType, energyGroups.getGroupStructureType(energyGroups.getGroupStructure(groupStructureType)), ) def test_getFastFluxGroupCutoff(self): """Test ability to get the ARMI energy group index contained in energy threshold. .. test:: Return the energy group index which contains a given energy threshold. :id: T_ARMI_EG_FE :tests: R_ARMI_EG_FE """ group, frac = energyGroups.getFastFluxGroupCutoff([100002, 100001, 100000, 99999, 0]) self.assertListEqual([group, frac], [2, 0]) ================================================ FILE: armi/physics/neutronics/tests/test_macroXSGenerationInterface.py ================================================ # Copyright 2021 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MacroXSGenerationInterface tests.""" import unittest from collections import defaultdict from armi.nuclearDataIO import isotxs from armi.nuclearDataIO.xsCollections import XSCollection from armi.physics.neutronics.macroXSGenerationInterface import ( MacroXSGenerationInterface, ) from armi.settings import Settings from armi.testing import loadTestReactor from armi.tests import ISOAA_PATH class TestMacroXSGenerationInterface(unittest.TestCase): def test_macroXSGenerationInterfaceBasics(self): """Test the macroscopic XS generating interfaces. .. test:: Build macroscopic cross sections for all blocks in the reactor. :id: T_ARMI_MACRO_XS :tests: R_ARMI_MACRO_XS """ cs = Settings() _o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") # Before: verify there are no macro XS on each block for b in r.core.iterBlocks(): self.assertIsNone(b.macros) # create the macro XS interface i = MacroXSGenerationInterface(r, cs) self.assertEqual(i.minimumNuclideDensity, 1e-15) self.assertEqual(i.name, "macroXsGen") # Mock up a nuclide library mockLib = isotxs.readBinary(ISOAA_PATH) mockLib.__dict__["_nuclides"] = defaultdict( lambda: mockLib.__dict__["_nuclides"]["CAA"], mockLib.__dict__["_nuclides"] ) # This is the meat of it: build the macro XS self.assertIsNone(i.macrosLastBuiltAt) i.buildMacros(mockLib, buildScatterMatrix=False) self.assertEqual(i.macrosLastBuiltAt, 0) # After: verify there are macro XS on each block for b in r.core.iterBlocks(): self.assertIsNotNone(b.macros) self.assertTrue(isinstance(b.macros, XSCollection)) ================================================ FILE: armi/physics/neutronics/tests/test_neutronicsPlugin.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the neutronics plugin.""" import io import unittest from ruamel.yaml import YAML from armi import getPluginManagerOrFail, settings, tests from armi.physics import neutronics from armi.physics.neutronics.const import CONF_CROSS_SECTION from armi.physics.neutronics.settings import ( CONF_BOUNDARIES, CONF_DPA_XS_SET, CONF_GEN_XS, CONF_GLOBAL_FLUX_ACTIVE, CONF_GRID_PLATE_DPA_XS_SET, CONF_GROUP_STRUCTURE, CONF_INNERS_, CONF_LATTICE_PHYSICS_FREQUENCY, CONF_NEUTRONICS_KERNEL, CONF_OUTERS_, getNeutronicsSettingValidators, ) from armi.settings import caseSettings, settingsValidation from armi.settings.fwSettings.globalSettings import CONF_RUN_TYPE from armi.tests import TEST_ROOT from armi.tests.test_plugins import TestPlugin from armi.utils import directoryChangers XS_EXAMPLE = """AA: geometry: 0D criticalBuckling: true blockRepresentation: Median BA: geometry: 1D slab blockRepresentation: Median """ class TestNeutronicsPlugin(TestPlugin): plugin = neutronics.NeutronicsPlugin def setUp(self): self.td = directoryChangers.TemporaryDirectoryChanger() self.td.__enter__() def tearDown(self): self.td.__exit__(None, None, None) def test_customSettingObjectIO(self): """Check specialized settings can build objects as values and write.""" cs = caseSettings.Settings() yaml = YAML() inp = yaml.load(io.StringIO(XS_EXAMPLE)) cs[CONF_CROSS_SECTION] = inp self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].geometry, "0D") fname = "test_setting_obj_io_.yaml" cs.writeToYamlFile(fname) outText = open(fname, "r").read() self.assertIn("geometry: 0D", outText) def test_customSettingRoundTrip(self): """Check specialized settings can go back and forth.""" cs = caseSettings.Settings() yaml = YAML() inp = yaml.load(io.StringIO(XS_EXAMPLE)) cs[CONF_CROSS_SECTION] = inp cs[CONF_CROSS_SECTION] = cs[CONF_CROSS_SECTION] fname = "test_setting_obj_io_round.yaml" cs.writeToYamlFile(fname) outText = open(fname, "r").read() self.assertIn("geometry: 0D", outText) self.assertIn("geometry: 1D", outText) def test_neutronicsSettingsLoaded(self): """Check that various special neutronics-specifics settings are loaded.""" cs = caseSettings.Settings() self.assertIn(CONF_INNERS_, cs) self.assertIn(CONF_OUTERS_, cs) self.assertIn(CONF_NEUTRONICS_KERNEL, cs) class NeutronicsReactorTests(unittest.TestCase): @classmethod def setUpClass(cls): # prepare the input files. This is important so the unit tests run from wherever # they need to run from. cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT) cls.directoryChanger.open() @classmethod def tearDownClass(cls): cls.directoryChanger.close() @staticmethod def __getModifiedSettings(customSettings): cs = settings.Settings() newSettings = {} for key, val in customSettings.items(): newSettings[key] = val return cs.modified(newSettings=newSettings) def test_kineticsParameterAssignment(self): """Test that the delayed neutron fraction and precursor decay constants are applied from settings.""" r = tests.getEmptyHexReactor() self.assertIsNone(r.core.p.beta) self.assertIsNone(r.core.p.betaComponents) self.assertIsNone(r.core.p.betaDecayConstants) # Test that the group-wise beta and decay constants are assigned # together given that they are the same length. r = tests.getEmptyHexReactor() cs = self.__getModifiedSettings( customSettings={ "beta": [0.0] * 6, "decayConstants": [1.0] * 6, } ) dbLoad = False getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad) r.core.setOptionsFromCs(cs) self.assertEqual(r.core.p.beta, sum(cs["beta"])) self.assertListEqual(list(r.core.p.betaComponents), cs["beta"]) self.assertListEqual(list(r.core.p.betaDecayConstants), cs["decayConstants"]) # Test the assignment of total beta as a float r = tests.getEmptyHexReactor() cs = self.__getModifiedSettings( customSettings={"beta": 0.00670}, ) getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad) self.assertEqual(r.core.p.beta, cs["beta"]) self.assertIsNone(r.core.p.betaComponents) self.assertIsNone(r.core.p.betaDecayConstants) # Test that nothing is assigned if the beta is specified as a list # without a corresponding decay constants list. r = tests.getEmptyHexReactor() cs = self.__getModifiedSettings( customSettings={ "beta": [0.0] * 6, }, ) getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad) self.assertIsNone(r.core.p.beta) self.assertIsNone(r.core.p.betaComponents) self.assertIsNone(r.core.p.betaDecayConstants) # Test that 1 group beta components and decay constants can be assigned. # Since beta is a list, ensure that it's assigned to the `betaComponents` # parameter. r = tests.getEmptyHexReactor() cs = self.__getModifiedSettings( customSettings={"beta": [0.0], "decayConstants": [1.0]}, ) getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad) self.assertEqual(r.core.p.beta, sum(cs["beta"])) self.assertListEqual(list(r.core.p.betaComponents), cs["beta"]) self.assertListEqual(list(r.core.p.betaDecayConstants), cs["decayConstants"]) # Test that decay constants are not assigned without a corresponding # group-wise beta input. r = tests.getEmptyHexReactor() cs = self.__getModifiedSettings( customSettings={"decayConstants": [1.0] * 6}, ) getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad) self.assertIsNone(r.core.p.beta) self.assertIsNone(r.core.p.betaComponents) self.assertIsNone(r.core.p.betaDecayConstants) # Test that decay constants are not assigned without a corresponding # group-wise beta input. This also demonstrates that the total beta # is still assigned. r = tests.getEmptyHexReactor() cs = self.__getModifiedSettings( customSettings={"decayConstants": [1.0] * 6, "beta": 0.0}, ) getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad) self.assertEqual(r.core.p.beta, cs["beta"]) self.assertIsNone(r.core.p.betaComponents) self.assertIsNone(r.core.p.betaDecayConstants) # Test the demonstrates that None values are acceptable # and that nothing is assigned. r = tests.getEmptyHexReactor() cs = self.__getModifiedSettings( customSettings={"decayConstants": None, "beta": None}, ) getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad) self.assertEqual(r.core.p.beta, cs["beta"]) self.assertIsNone(r.core.p.betaComponents) self.assertIsNone(r.core.p.betaDecayConstants) # Test that an error is raised if the decay constants # and group-wise beta are inconsistent sizes with self.assertRaises(ValueError): r = tests.getEmptyHexReactor() cs = self.__getModifiedSettings( customSettings={"decayConstants": [1.0] * 6, "beta": [0.0]}, ) getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad) # Test that an error is raised if the decay constants # and group-wise beta are inconsistent sizes with self.assertRaises(ValueError): r = tests.getEmptyHexReactor() cs = self.__getModifiedSettings( customSettings={"decayConstants": [1.0] * 6, "beta": [0.0] * 5}, ) getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad) @staticmethod def __autoCorrectAllQueries(settingsValidator): """Force-Correct (resolve() to "YES") all queries in a Settings Validator.""" for query in settingsValidator: try: query.correction() except FileNotFoundError: # to make testing easier, let's ignore settings that require input files pass def test_neutronicsSettingsValidators(self): # grab the neutronics validators cs = settings.Settings() inspector = settingsValidation.Inspector(cs) sv = getNeutronicsSettingValidators(inspector) self.assertEqual(len(sv), 8) # Test the Query: boundaries are now "Extrapolated", not "Generalized" cs = cs.modified(newSettings={CONF_BOUNDARIES: "Generalized"}) inspector = settingsValidation.Inspector(cs) sv = getNeutronicsSettingValidators(inspector) self.__autoCorrectAllQueries(sv) self.assertEqual(inspector.cs[CONF_BOUNDARIES], "Extrapolated") # Test the Query: genXS are no longer True/False cs = cs.modified(newSettings={CONF_GEN_XS: "True"}) inspector = settingsValidation.Inspector(cs) sv = getNeutronicsSettingValidators(inspector) self.__autoCorrectAllQueries(sv) self.assertEqual(inspector.cs[CONF_GEN_XS], "Neutron") cs = cs.modified(newSettings={CONF_GEN_XS: "False"}) inspector = settingsValidation.Inspector(cs) sv = getNeutronicsSettingValidators(inspector) self.__autoCorrectAllQueries(sv) self.assertEqual(inspector.cs[CONF_GEN_XS], "") # Test the Query: CONF_GLOBAL_FLUX_ACTIVE are no longer True/False cs = cs.modified(newSettings={CONF_GLOBAL_FLUX_ACTIVE: "True"}) inspector = settingsValidation.Inspector(cs) sv = getNeutronicsSettingValidators(inspector) self.__autoCorrectAllQueries(sv) self.assertEqual(inspector.cs[CONF_GLOBAL_FLUX_ACTIVE], "Neutron") cs = cs.modified(newSettings={CONF_GLOBAL_FLUX_ACTIVE: "False"}) inspector = settingsValidation.Inspector(cs) sv = getNeutronicsSettingValidators(inspector) self.__autoCorrectAllQueries(sv) self.assertEqual(inspector.cs[CONF_GLOBAL_FLUX_ACTIVE], "") # Test the Query: try to migrate the Group Structure name cs = cs.modified(newSettings={CONF_GROUP_STRUCTURE: "armi45"}) inspector = settingsValidation.Inspector(cs) sv = getNeutronicsSettingValidators(inspector) self.__autoCorrectAllQueries(sv) self.assertEqual(inspector.cs[CONF_GROUP_STRUCTURE], "ARMI45") cs = cs.modified(newSettings={CONF_GROUP_STRUCTURE: "bad_value"}) inspector = settingsValidation.Inspector(cs) sv = getNeutronicsSettingValidators(inspector) self.__autoCorrectAllQueries(sv) self.assertEqual(inspector.cs[CONF_GROUP_STRUCTURE], "ANL33") # Test the Query: migrating some common shortened names for dpa XS sets cs = cs.modified(newSettings={CONF_DPA_XS_SET: "dpaHT9_33"}) inspector = settingsValidation.Inspector(cs) sv = getNeutronicsSettingValidators(inspector) self.__autoCorrectAllQueries(sv) self.assertEqual(inspector.cs[CONF_DPA_XS_SET], "dpaHT9_ANL33_TwrBol") cs = cs.modified(newSettings={CONF_GRID_PLATE_DPA_XS_SET: "dpa_SS316"}) inspector = settingsValidation.Inspector(cs) sv = getNeutronicsSettingValidators(inspector) self.__autoCorrectAllQueries(sv) self.assertEqual(inspector.cs[CONF_GRID_PLATE_DPA_XS_SET], "dpaSS316_ANL33_TwrBol") cs = cs.modified( newSettings={ CONF_RUN_TYPE: "Snapshots", CONF_LATTICE_PHYSICS_FREQUENCY: "BOC", } ) inspector = settingsValidation.Inspector(cs) sv = getNeutronicsSettingValidators(inspector) self.__autoCorrectAllQueries(sv) self.assertEqual(inspector.cs[CONF_LATTICE_PHYSICS_FREQUENCY], "firstCoupledIteration") ================================================ FILE: armi/physics/neutronics/tests/test_neutronicsSymmetry.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Audit symmetry-aware parameters in neutronics. See Also -------- armi.testing.symmetryTesting """ from armi.physics.neutronics.parameters import getNeutronicsParameterDefinitions from armi.reactor.blocks import Block from armi.reactor.cores import Core from armi.testing import symmetryTesting class TestNeutronicsParamSym(symmetryTesting.BasicArmiSymmetryTestHelper): def setUp(self): pluginParameters = getNeutronicsParameterDefinitions() self.coreParamsToTest = pluginParameters[Core] self.blockParamsToTest = pluginParameters[Block] self.expectedSymmetricBlockParams = [ "mgFlux", "adjMgFlux", "lastMgFlux", "mgFluxGamma", "reactionRates", "power", "powerGamma", "powerNeutron", "powerGenerated", ] super().setUp() ================================================ FILE: armi/physics/safety/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Safety package for generic safety-related code.""" from armi import plugins class SafetyPlugin(plugins.ArmiPlugin): @staticmethod @plugins.HOOKIMPL def defineSettings(): """Define settings for the plugin.""" return [] ================================================ FILE: armi/physics/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/physics/tests/test_executers.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module provides tests for the generic Executers.""" import os import subprocess import unittest from armi.physics import executers from armi.reactor import geometry from armi.utils import directoryChangers class MockParams: def __init__(self): self.cycle = 1 self.timeNode = 2 class MockCore: def __init__(self): # just pick a random geomType self.geomType = geometry.GeomType.CARTESIAN self.symmetry = "full" self.p = MockParams() class MockReactor: def __init__(self): self.core = MockCore() self.o = None self.p = MockParams() class TestExecutionOptions(unittest.TestCase): def test_runningDirectoryPath(self): """ Test that the running directory path is set up correctly based on the case title and label provided. """ e = executers.ExecutionOptions(label=None) e.setRunDirFromCaseTitle(caseTitle="test") self.assertEqual(os.path.basename(e.runDir), "508bc04f-0") e = executers.ExecutionOptions(label="label") e.setRunDirFromCaseTitle(caseTitle="test") self.assertEqual(os.path.basename(e.runDir), "b07da087-0") e = executers.ExecutionOptions(label="label2") e.setRunDirFromCaseTitle(caseTitle="test") self.assertEqual(os.path.basename(e.runDir), "9c1c83cb-0") class TestExecuters(unittest.TestCase): def setUp(self): e = executers.ExecutionOptions(label=None) self.executer = executers.DefaultExecuter(e, MockReactor()) def test_collectInputsAndOutputs(self): """Verify that the executer can select to not copy back output.""" self.executer.options.inputFile = "test.inp" self.executer.options.outputFile = "test.out" self.executer.options.copyOutput = False inputs, outputs = self.executer._collectInputsAndOutputs() self.assertEqual("test.inp", inputs[0], "Input file was not successfully identified.") self.assertTrue(outputs == [], "Outputs were returned erroneously!") self.executer.options.copyOutput = True inputs, outputs = self.executer._collectInputsAndOutputs() self.assertEqual("test.inp", inputs[0], "Input file was not successfully identified.") self.assertEqual("test.out", outputs[0], "Output file was not successfully identified.") def test_updateRunDir(self): """ Verify that runDir is updated when TemporaryDirectoryChanger is used and not updated when ForcedCreationDirectoryChanger is used. """ self.assertEqual(self.executer.dcType, directoryChangers.TemporaryDirectoryChanger) self.executer._updateRunDir("updatedRunDir") self.assertEqual(self.executer.options.runDir, "updatedRunDir") # change directoryChanger type, runDir not updated self.executer.options.runDir = "runDir" self.executer.dcType = directoryChangers.ForcedCreationDirectoryChanger self.executer._updateRunDir("notThisString") self.assertEqual(self.executer.options.runDir, "runDir") def test_runExternalExecutable(self): """Run an external executable with an Executer. .. test:: Run an external executable with an Executer. :id: T_ARMI_EX :tests: R_ARMI_EX """ filePath = "test_runExternalExecutable.py" outFile = "tmp.txt" label = "printExtraStuff" class MockExecutionOptions(executers.ExecutionOptions): pass class MockExecuter(executers.Executer): def run(self, args): if self.options.label == label: subprocess.run(["python", filePath, "extra stuff"]) else: subprocess.run(["python", filePath, args]) with directoryChangers.TemporaryDirectoryChanger(): # build a mock external program (a little Python script) self.__makeALittleTestProgram(filePath, outFile) # make sure the output file doesn't exist yet self.assertFalse(os.path.exists(outFile)) # set up an executer for our little test program opts = MockExecutionOptions() exe = MockExecuter(opts, None) exe.run("") # make sure the output file exists now self.assertTrue(os.path.exists(outFile)) # run the executer with options testString = "some options" exe.run(testString) # make sure the output file exists now self.assertTrue(os.path.exists(outFile)) newTxt = open(outFile, "r").read() self.assertIn(testString, newTxt) # now prove the options object can affect the execution exe.options.label = label exe.run("") newerTxt = open(outFile, "r").read() self.assertIn("extra stuff", newerTxt) @staticmethod def __makeALittleTestProgram(filePath, outFile): """Helper method to write a tiny Python script. We need "an external program" for testing. """ txt = f"""import sys def main(): with open("{outFile}", "w") as f: f.write(str(sys.argv)) if __name__ == "__main__": main() """ with open(filePath, "w") as f: f.write(txt) ================================================ FILE: armi/physics/thermalHydraulics/__init__.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Thermal Hydraulics package.""" from armi.physics.thermalHydraulics.plugin import ThermalHydraulicsPlugin # noqa: F401 ================================================ FILE: armi/physics/thermalHydraulics/const.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # NOTE: This couldn't be packaged with the thermalHydraulics plugin because it # ends up getting imported by the ARMI framework before plugins get imported. ORIFICE_SETTING_ZONE_MAP = "zone map" ================================================ FILE: armi/physics/thermalHydraulics/parameters.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Parameter definitions for thermal hydraulic plugins.""" from armi.reactor import parameters from armi.reactor.assemblies import Assembly from armi.reactor.blocks import Block from armi.reactor.parameters import ParamLocation from armi.utils import units def getParameterDefinitions(): """Return ParameterDefinitionCollections for each appropriate ArmiObject.""" return {Assembly: _getAssemblyParams(), Block: _getBlockParams()} def _getAssemblyParams(): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(default=0.0, categories=["thermal hydraulics"]) as pb: pb.defParam( "THmassFlowRate", units=f"{units.KG}/{units.SECONDS}", description="The nominal assembly flow rate", categories=["broadcast"], location=ParamLocation.VOLUME_INTEGRATED, ) pb.defParam( "THcoolantInletT", units=units.DEGC, description="Assembly inlet temperature in C (cold temperature)", ) with pDefs.createBuilder( default=0.0, location=ParamLocation.AVERAGE, saveToDB=True, categories=["thermal hydraulics"], ) as pb: pb.defParam( "THdeltaPTotal", units=units.PASCALS, description="Total pressure difference across the assembly", categories=["broadcast"], ) return pDefs def _getBlockParams(): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(default=0.0, categories=["thInterface"], saveToDB=True) as pb: pb.defParam( "THcoolantOutletT", units=units.DEGC, description="Coolant temperature at the outlet of this block", location=ParamLocation.TOP, ) pb.defParam( "THmassFlowRate", units=f"{units.KG}/{units.SECONDS}", description="Mass flow rate", location=ParamLocation.VOLUME_INTEGRATED, ) pb.defParam( "THcoolantInletT", units=units.DEGC, description="The nominal average bulk coolant inlet temperature into the block.", location=ParamLocation.BOTTOM, ) pb.defParam( "THdeltaPTotal", units=units.PASCALS, description="Total pressure difference in a block", location=ParamLocation.AVERAGE, ) with pDefs.createBuilder(default=None, categories=["thermal hydraulics", "mongoose"], saveToDB=True) as pb: pb.defParam( "THcornTemp", units=units.DEGC, description="Mid-wall duct temperature for assembly corners", location=ParamLocation.BOTTOM | ParamLocation.CORNERS, ) pb.defParam( "THedgeTemp", units=units.DEGC, description="Mid-wall duct temperature for assembly edges", location=ParamLocation.BOTTOM | ParamLocation.EDGES, ) return pDefs ================================================ FILE: armi/physics/thermalHydraulics/plugin.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Generic Thermal/Hydraulics Plugin. Thermal/hydraulics is concerned with temperatures, flows, pressures, and heat transfer. """ from armi import interfaces, plugins ORDER = interfaces.STACK_ORDER.THERMAL_HYDRAULICS class ThermalHydraulicsPlugin(plugins.ArmiPlugin): """Plugin for thermal/hydraulics.""" @staticmethod @plugins.HOOKIMPL def defineParameters(): """Define additional parameters for the reactor data model.""" from armi.physics.thermalHydraulics import parameters return parameters.getParameterDefinitions() ================================================ FILE: armi/physics/thermalHydraulics/tests/__init__.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/physics/thermalHydraulics/tests/test_thermalHydraulicsSymmetry.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Audit symmetry-aware parameters in thermal hydraulics. See Also -------- armi.testing.symmetryTesting """ from armi.physics.thermalHydraulics.parameters import getParameterDefinitions from armi.reactor.blocks import Block from armi.testing import symmetryTesting class TestTHParamSymmetry(symmetryTesting.BasicArmiSymmetryTestHelper): def setUp(self): pluginParameters = getParameterDefinitions() self.blockParamsToTest = pluginParameters[Block] self.expectedSymmetricBlockParams = ["THmassFlowRate"] super().setUp() ================================================ FILE: armi/pluginManager.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Slightly customized version of the stock pluggy ``PluginManager``.""" import pluggy class ArmiPluginManager(pluggy.PluginManager): """ PluginManager implementation with ARMI-specific features. The main point of this subclass is to make it possible to detect when the plugin manager has been mutated, allowing for safe caching of expensive results derived from the set of registered plugins. This is done by exposing a counter that is incremented any time the set of registered plugins is modified. If a client caches any results derived from calling plugin hooks, caching this counter along with that data allows for cheaply testing that the cached results are still valid. """ def __init__(self, *args, **kwargs): pluggy.PluginManager.__init__(self, *args, **kwargs) self._counter = 0 @property def counter(self): return self._counter def register(self, *args, **kwargs): self._counter += 1 pluggy.PluginManager.register(self, *args, **kwargs) def unregister(self, *args, **kwargs): pluggy.PluginManager.unregister(self, *args, **kwargs) ================================================ FILE: armi/plugins.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Plugins allow various built-in or external functionality to be brought into the ARMI ecosystem. This module defines the hooks that may be defined within plugins. Plugins are ultimately incorporated into a :py:class:`armi.pluginManager.ArmiPluginManager`, which live inside of a :py:class:`armi.apps.App` object. The ``ArmiPluginManager`` is derived from the ``PluginManager`` class provided by the ``pluggy`` package, which provides a registry of known plugins. Rather than create one directly, we use the :py:func:`armi.plugins.getNewPluginManager()` function, which handles some of the setup for us. From a high-altitude perspective, the plugins provide numerous "hooks", which allow for ARMI to be extended in various ways. Some of these extensions are subtle and play a part in how certain ARMI components are initialized or defined. As such, it is necessary to register most plugins before some parts of ARMI are imported or exercised in a meaningful way. These requirements are in flux, and will ultimately constitute part of the specification of the ARMI plugin architecture. For now, to be safe, plugins should be registered as soon as possible. After forming the ``PluginManager``, the plugin hooks can be accessed through the ``hook`` attribute. E.g.:: >>> armi.getPluginManagerOrFail().hook.exposeInterfaces(cs=cs) Don't forget to use the keyword argument form for all arguments to hooks; ``pluggy`` requires them to enforce hook specifications. The :py:class:`armi.apps.App` class serves as the primary storage location of the PluginManager, and also provides some methods to get data out of the plugins more ergonomically than through the hooks themselves. Some things you may want to bring in via a plugin includes: - :py:mod:`armi.settings` and their validators - :py:mod:`armi.reactor.components` for custom geometry - :py:mod:`armi.reactor.flags` for custom reactor components - :py:mod:`armi.interfaces` to define new calculation sequences and interactions with new codes - :py:mod:`armi.reactor.parameters` to represent new physical state on the reactor - :py:mod:`armi.materials` for custom materials - Elements of the :py:mod:`armi.gui` - :py:mod:`armi.operators` for adding new operations on reactor models - :py:mod:`armi.cli` for adding new operations on input files Warning ------- The plugin system was developed to support improved collaboration. It is new and should be considered under development. The API is subject to change as the version of the ARMI framework approaches 1.0. Notes ----- Due to the nature of some of these components, there are a couple of restrictions on the order in which things can be imported (lest we endeavor to redesign them considerably). Examples: - Parameters: All parameter definitions must be present before any ``ArmiObject`` objects are instantiated. This is mostly by choice, but also makes the most sense, because the ``ParameterCollection`` s are instance attributes of an ``ArmiObject``, which in turn use ``Parameter`` objects as *class* attributes. We should know what class attributes we have before making instances. - Blueprints: Since blueprints should be extendable with new sections, we must also be able to provide new *class* attributes to extend their behavior. This is because blueprints use the yamlize package, which uses class attributes to define much of the class's behavior through metaclassing. Therefore, we need to be able to import all plugins *before* importing blueprints. Plugins are currently stateless. They do not have ``__init__()`` methods, and when they are registered with the PluginMagager, the PluginManager gets the Plugin's class object rather than an instance of that class. Also notice that all of the hooks are ``@staticmethod``\ s. As a result, they can be called directly off of the class object, and only have access to the state passed into them to perform their function. This is a deliberate design choice to keep the plugin system simple and to preclude a large class of potential bugs. At some point it may make sense to revisit this. **Other customization points** While the Plugin API is the main place for ARMI framework customization, there are several other areas where ARMI may be extended or customized. These typically pre-dated the Plugin-based architecture, and as the need arise may be migrated to here. - Component types: Component types are registered dynamically through some metaclass magic, found in :py:class:`armi.reactor.components.component.ComponentType` and :py:class:`armi.reactor.composites.CompositeModelType`. Simply defining a new Component subclass should register it with the appropriate ARMI systems. While this is convenient, it does lead to potential issues, as the behavior of ARMI becomes sensitive to module import order and the like; the containing module needs to be imported before the registration occurs, which can be surprising. - Interface input files: Interfaces used to be discovered dynamically, rather than explicitly as they are now in the :py:meth:`armi.plugins.ArmiPlugin.exposeInterfaces` plugin hook. Essentially they functioned as ersatz plugins. One of the ways that they would customize ARMI behavior is through the :py:meth:`armi.physics.interface.Interface.specifyInputs` static method, which is still used to determine inter- Case dependencies and support cloning and hashing Case inputs. Going forward, this approach will likely be deprecated in favor of a plugin hook. - Fuel handler logic: The :py:class:`armi.physics.fuelCycle.fuelHandlers.FuelHandlerInterface` supports customization through the dynamic loading of fuel handler logic modules, based on user settings. This also predated the plugin infrastructure, and may one day be replaced with plugin-based fuel handler logic. """ from typing import TYPE_CHECKING, Callable, Dict, List, Union import pluggy from armi import pluginManager from armi.utils import flags if TYPE_CHECKING: from armi.reactor.composites import Composite from armi.reactor.converters.axialExpansionChanger import AxialExpansionChanger HOOKSPEC = pluggy.HookspecMarker("armi") HOOKIMPL = pluggy.HookimplMarker("armi") class ArmiPlugin: """ An ArmiPlugin exposes a collection of hooks that allow users to add a variety of things to their ARMI application: Interfaces, parameters, settings, flags, and much more. .. impl:: Plugins add code to the application through interfaces. :id: I_ARMI_PLUGIN :implements: R_ARMI_PLUGIN Each plugin has the option of implementing the ``exposeInterfaces`` method, and this will be used as a plugin hook to add one or more Interfaces to the ARMI Application. Interfaces can wrap external executables with nuclear modeling codes in them, or directly implement their logic in Python. But because Interfaces are Python code, they have direct access to read and write from ARMI's reactor data model. This Plugin to multiple Interfaces to reactor data model connection is the primary way that developers add code to an ARMI application and simulation. """ @staticmethod @HOOKSPEC def exposeInterfaces(cs) -> List: """ Function for exposing interface(s) to other code. .. impl:: Plugins can add interfaces to the operator. :id: I_ARMI_PLUGIN_INTERFACES :implements: R_ARMI_PLUGIN_INTERFACES This method takes in a Settings object and returns a list of Interfaces, the position of each Interface in the Interface stack, and a list of arguments to pass to the Interface when initializing it later. These Interfaces can then be used to add code to a simulation. Returns ------- list Tuples containing: - The insertion order to use when building an interface stack, - an implementation of the Interface class - a dictionary of kwargs to pass to an Operator when adding an instance of the interface class If no Interfaces should be active given the passed case settings, this should return an empty list. """ @staticmethod @HOOKSPEC def defineParameters() -> Dict: """ Define additional parameters for the reactor data model. .. impl:: Plugins can add parameters to the reactor data model. :id: I_ARMI_PLUGIN_PARAMS :implements: R_ARMI_PLUGIN_PARAMS Through this method, plugin developers can create new Parameters. A parameter can represent any physical property an analyst might want to track. And they can be added at any level of the reactor data model. Through this, the developers can extend ARMI and what physical properties of the reactor they want to calculate, track, and store to the database. .. impl:: Define an arbitrary physical parameter. :id: I_ARMI_PARAM0 :implements: R_ARMI_PARAM Through this method, plugin developers can create new Parameters. A parameter can represent any physical property an analyst might want to track. For example, through this method, a plugin developer can add a new thermodynamic property that adds a thermodynamic parameter to every block in the reactor. Or they could add a neutronics parameter to every fuel assembly. A parameter is quite generic. But these parameters will be tracked in the reactor data model, extend what developers can do with ARMI, and will be saved to the output database. Returns ------- dict Keys should be subclasses of ArmiObject, values being a ParameterDefinitionCollection should be added to the key's parameter definitions. Example ------- >>> pluginBlockParams = parameters.ParameterDefinitionCollection() >>> with pluginBlockParams.createBuilder() as pb: ... pb.defParam("plugBlkP1", ...) ... # ... >>> pluginAssemParams = parameters.ParameterDefinitionCollection() >>> with pluginAssemParams.createBuilder() as pb: ... pb.defParam("plugAsmP1", ...) ... # ... >>> return {blocks.Block: pluginBlockParams, assemblies.Assembly: pluginAssemParams} """ @staticmethod @HOOKSPEC def afterConstructionOfAssemblies(assemblies, cs) -> None: """ Function to call after a set of assemblies are constructed. This hook can be used to: - Verify that all assemblies satisfy constraints imposed by active interfaces and plugins - Apply modifications to Assemblies based on modeling options and active interfaces Implementers may alter the state of the passed Assembly objects. Returns ------- None """ @staticmethod @HOOKSPEC def onProcessCoreLoading(core, cs, dbLoad) -> None: """ Function to call whenever a Core object is newly built. This is usually used to set initial parameter values from inputs, either after constructing a Core from Blueprints, or after loading it from a database. """ @staticmethod @HOOKSPEC def beforeReactorConstruction(cs) -> None: """Function to call before the reactor is constructed.""" @staticmethod @HOOKSPEC def defineFlags() -> Dict[str, Union[int, flags.auto]]: """ Add new flags to the reactor data model, and the simulation. .. impl:: Plugins can define new, unique flags to the system. :id: I_ARMI_FLAG_EXTEND1 :implements: R_ARMI_FLAG_EXTEND This method allows a plugin developers to provide novel values for the Flags system. This method returns a dictionary mapping flag names to their desired numerical values. In most cases, no specific value is needed, one can be automatically generated using :py:class:`armi.utils.flags.auto`. (For more information, see :py:mod:`armi.reactor.flags`.) See Also -------- armi.reactor.flags Example ------- >>> def defineFlags(): ... return {"FANCY": armi.utils.flags.auto()} """ @staticmethod @HOOKSPEC def defineBlockTypes() -> List: """ Function for providing novel Block types from a plugin. This should return a list of tuples containing ``(compType, blockType)``, where ``blockType`` is a new ``Block`` subclass to register, and ``compType`` is the corresponding ``Component`` type that should activate it. For instance a ``HexBlock`` would be created when the largest component is a ``Hexagon``:: [(Hexagon, HexBlock)] Returns ------- list ``[(compType, BlockType), ...]`` """ @staticmethod @HOOKSPEC def defineAssemblyTypes() -> List: """ Function for providing novel Assembly types from a plugin. This should return a list of tuples containing ``(blockType, assemType)``, where ``assemType`` is a new ``Assembly`` subclass to register, and ``blockType`` is the corresponding ``Block`` subclass that, if present in the assembly, should trigger it to be of the corresponding ``assemType``. Warning ------- There is no guarantee that you will find subclassing ``Assembly`` useful. Example ------- .. code:: [ (HexBlock, HexAssembly), (CartesianBlock, CartesianAssembly), (ThRZBlock, ThRZAssembly), ] Returns ------- list List of new Block&Assembly types """ @staticmethod @HOOKSPEC def defineBlueprintsSections() -> List: """ Return new sections for the blueprints input method. This hook allows plugins to extend the blueprints functionality with their own sections. Returns ------- list (name, section, resolutionMethod) tuples, where: - name : The name of the attribute to add to the Blueprints class; this should be a valid Python identifier. - section : An instance of ``yaml.Attribute`` defining the data that is described by the Blueprints section. - resolutionMethod : A callable that takes a Blueprints object and case settings as arguments. This will be called like an unbound instance method on the passed Blueprints object to initialize the state of the new Blueprints section. Notes ----- Most of the sections that a plugin would want to add may be better served as settings, rather than blueprints sections. These sections were added to the blueprints mainly because the schema is more flexible, allowing namespaces and hierarchical collections of settings. Perhaps in the near future it would make sense to enhance the settings system to support these features, moving the blueprints extensions out into settings. This is discussed in more detail in T1671. """ @staticmethod @HOOKSPEC def defineEntryPoints() -> List: """ Return new entry points for the ARMI CLI. This hook allows plugins to provide their own ARMI entry points, which each serve as a command in the command- line interface. Returns ------- list class objects which derive from the base EntryPoint class. """ @staticmethod @HOOKSPEC def defineSettings() -> List: """ Define configuration settings for this plugin. .. impl:: Plugins can add settings to the run. :id: I_ARMI_PLUGIN_SETTINGS :implements: R_ARMI_PLUGIN_SETTINGS This hook allows plugin developers to provide their own configuration settings, which can participate in the :py:class:`armi.settings.caseSettings.Settings`. Plugins may provide entirely new settings to what are already provided by ARMI, as well as new options or default values for existing settings. For instance, the framework provides a ``neutronicsKernel`` setting for selecting which global physics solver to use. Since we wish to enforce that the user specify a valid kernel, the settings validator will check to make sure that the user's requested kernel is among the available options. If a plugin were to provide a new neutronics kernel (let's say MCNP), it should also define a new option to tell the settings system that ``"MCNP"`` is a valid option. Returns ------- list A list of Settings, Options, or Defaults to be registered. See Also -------- armi.physics.neutronics.NeutronicsPlugin.defineSettings armi.settings.setting.Setting armi.settings.setting.Option armi.settings.setting.Default """ return [] @staticmethod @HOOKSPEC def defineSettingsValidators(inspector) -> List: """ Define the high-level settings input validators by adding them to an inspector. Parameters ---------- inspector : :py:class:`armi.settings.settingsValidation.Inspector` instance The inspector to add queries to. See note below, this is not ideal. Notes ----- These are higher-level than the input-level SCHEMA defined in :py:meth:`defineSettings` and are intended to be used for more complex cross-plugin info. We would prefer to not manipulate objects passed in directly, but rather have the inspection happen in a measurable hook. This would help find misbehaving plugins. See Also -------- armi.settings.settingsValidation.Inspector : Runs the queries Returns ------- list Query objects to attach """ return [] @staticmethod @HOOKSPEC def defineCaseDependencies(case, suite): r""" Function for defining case dependencies. Some Cases depend on the results of other ``Case``\ s in the same ``CaseSuite``. Which dependencies exist, and how they are discovered depends entirely on the type of analysis and active interfaces, etc. This function allows a plugin to inspect settings and declare dependencies between the passed ``case`` and any other cases in the passed ``suite``. Parameters ---------- case : Case The specific case for which we want to find dependencies. suite : CaseSuite A CaseSuite object to which the Case and other potential dependencies belong. Returns ------- dependencies : set of Cases This should return a set containing ``Case`` objects that are considered dependencies of the passed ``case``. They should be members of the passed ``suite``. """ @staticmethod @HOOKSPEC def defineGuiWidgets() -> List: """ Define which settings should go in the GUI. Rather than making widgets here, this simply returns metadata as a nested dictionary saying which tab to put which settings on, and a little bit about how to group them. Returns ------- widgetData : list of dict Each dict is nested. First level contains the tab name (e.g. 'Global Flux'). Second level contains a box name. Third level contains help and a list of setting names See Also -------- armi.gui.submitter.layout.abstractTab.AbstractTab.addSectionsFromPlugin : uses data structure Example ------- >>> widgets = { ... 'Global Flux': { ... 'MCNP Solver Settings': { ... 'help': "Help message" ... 'settings': [ ... "mcnpAddTallies", ... "useSrctp", ... ] ... } ... } ... } """ @staticmethod @HOOKSPEC def getOperatorClassFromRunType(runType: str): """Return an Operator subclass if the runType is recognized by this plugin.""" @staticmethod @HOOKSPEC def defineParameterRenames() -> Dict: """ Return a mapping from old parameter names to new parameter names. Occasionally, it may become necessary to alter the name of an existing parameter. This can lead to frustration when attempting to load from old database files that use the previous name. This hook allows a plugin to define mappings from the old name to the new name, allowing the old database to be read in and translated to the new parameter name. The following rules are followed when applying these renames: * When state is loaded from a database, if the parameter name in the database file is found in the rename dictionary, it will be mapped to that renamed parameter. * If the renamed parameter is found in the renames, then it will be mapped again to new parameter name. This process is repeated until there are no more renames left. This allows for parameters to be renamed multiple times, and for a database from several generations prior to still be readable, so long as the history of renames is intact. * If at the end of the above process, the parameter name is not a defined parameter for the appropriate ``ArmiObject`` type, an exception is raised. * If any of the ``renames`` keys match any currently-defined parameters, an exception is raised. * If any of the ``renames`` collide with another plugin's ``renames``, an exception is raised. Returns ------- renames : dict Keys should be an old parameter name, where the corresponding values are the new parameter name. Example ------- The following would allow databases with values for either ``superOldParam`` or ``oldParam`` to be read into ``currentParam``:: return {"superOldParam": "oldParam", "oldParam": "currentParam"} """ @staticmethod @HOOKSPEC def mpiActionRequiresReset(cmd) -> bool: """ Flag indicating when a reactor reset is required. Commands are sent through operators either as strings (old) or as MpiActions (newer). After some are sent, the reactor must be reset. This hook says when to reset. The reset operation is a (arguably suboptimal) response to some memory issues in very large and long-running cases. Parameters ---------- cmd : str or MpiAction The ARMI mpi command being sent Returns ------- bool See Also -------- armi.operators.operatorMPI.OperatorMPI.workerOperate : Handles these flags """ @staticmethod @HOOKSPEC def getReportContents(r, cs, report, stage, blueprint) -> None: """ To generate a report. Parameters ---------- r : Reactor cs : Settings report : ReportContent Report object to add contents to stage : ReportStage begin/standard/or end (stage of the report for when inserting BOL vs. EOL content) blueprint : Blueprint, optional for a reactor (if None, only partial contents created) """ @staticmethod @HOOKSPEC def defineSystemBuilders() -> Dict[str, Callable[[str], "Composite"]]: """ Convert a user-string from the systems section into a valid composite builder. Parameters ---------- name : str Name of the system type defined by the user, e.g., ``"core"`` Returns ------- dict Dictionary that maps a grid type from the input file (e.g., ``"core"``) to a function responsible for building a grid of that type, e.g., .. code:: { "core": armi.reactor.reactors.Core, "excore": armi.reactor.excoreStructure.ExcoreStructure, "sfp": armi.reactor.spentFuelPool.SpentFuelPool, } Notes ----- The default :class:`~armi.reactor.ReactorPlugin` defines a ``"core"`` lookup and a ``"sfp"`` lookup, triggered to run after all other hooks have been run. """ @staticmethod @HOOKSPEC(firstresult=True) def getAxialExpansionChanger() -> type["AxialExpansionChanger"]: """Produce the class responsible for performing axial expansion. Plugins can provide this hook to override or negate axial expansion. Will be used during initial construction of the core and assemblies, and can be a class to perform custom axial expansion routines. The first object returned that is not ``None`` will be used. Plugins are encouraged to add the ``tryfirst=True`` arguments to their ``HOOKIMPL`` invocations to make sure their specific are earlier in the hook call sequence. Returns ------- type of :class:`armi.reactor.converters.axialExpansionChanger.AxialExpansionChanger` Notes ----- This hook **should not** provide an instance of the class. The construction of the changer will be handled by applications and plugins that need it. This hook should only be provided by one additional plugin in your application. Otherwise the `order of hook execution <https://pluggy.readthedocs.io/en/stable/index.html#call-time-order>`_ may not provide the behavior you expect. Examples -------- >>> class MyPlugin(ArmiPlugin): ... @staticmethod ... @HOOKIMPL(tryfirst=True) ... def getAxialExpansionChanger(): ... from myproject.physics import BespokeAxialExpansion ... ... return BespokeAxialExpansion """ class UserPlugin(ArmiPlugin): """ A variation on the ArmiPlugin meant to be created at runtime, from the ``userPlugins`` setting. This is obviously a more limited use-case than the usual ArmiPlugin, as those are meant to be defined at import time, instead of run time. As such, this class has some built-in tooling to limit how these run-time plugins are used. They are meant to be more limited. Notes ----- The usual ArmiPlugin is much more flexible, if the UserPlugin does not support what you want to do, just use an ArmiPlugin. """ def __init__(self, *args, **kwargs): ArmiPlugin.__init__(self, *args, **kwargs) self.__enforceLimitations() def __enforceLimitations(self): """ This method enforces that UserPlugins are more limited than regular ArmiPlugins. UserPlugins are different from regular plugins in that they can be defined during a run, and as such, we want to limit how flexible they are, so we can correctly corral their side effects during a run. """ if issubclass(self.__class__, UserPlugin): assert len(self.__class__.defineParameters()) == 0, ( "UserPlugins cannot define parameters, consider using an ArmiPlugin." ) assert len(self.__class__.defineParameterRenames()) == 0, ( "UserPlugins cannot define parameter renames, consider using an ArmiPlugin." ) assert len(self.__class__.defineSettings()) == 0, ( "UserPlugins cannot define new Settings, consider using an ArmiPlugin." ) # NOTE: These are the methods that we are staunchly _not_ allowing people to change in this class. If you # need these, please use a regular ArmiPlugin. self.defineParameterRenames = lambda: {} self.defineSettings = lambda: [] self.defineSettingsValidators = lambda: [] @staticmethod @HOOKSPEC def defineParameters(): """ Prevents defining additional parameters. .. warning:: This is not overridable. Notes ----- It is a designed limitation of user plugins that they not define parameters. Parameters are defined when the App() is read in, which is LONG before the settings file has been read. So the parameters are defined before we discover the user plugin. If this is a feature you need, just use an ArmiPlugin. """ return {} @staticmethod @HOOKSPEC def defineParameterRenames(): """ Prevents parameter renames. Warning ------- This is not overridable. Notes ----- It is a designed limitation of user plugins that they not generate parameter renames, Parameters are defined when the App() is read in, which is LONG before the settings file has been read. So the parameters are defined before we discover the user plugin. If this is a feature you need, just use a normal Plugin. """ return {} @staticmethod @HOOKSPEC def defineSettings(): """ Prevents new settings. Warning ------- This is not overridable. Notes ----- It is a designed limitation of user plugins that they not define new settings, so that they are able to be added to the plugin stack during run time. """ return [] @staticmethod @HOOKSPEC def defineSettingsValidators(inspector): """ Prevents new settings validators. .. warning:: This is not overridable. Notes ----- It is a designed limitation of user plugins that they not define new settings, so that they are able to be added to the plugin stack during run time. """ return [] def getNewPluginManager() -> pluginManager.ArmiPluginManager: """Return a new plugin manager with all of the hookspecs pre-registered.""" pm = pluginManager.ArmiPluginManager("armi") pm.add_hookspecs(ArmiPlugin) return pm def collectInterfaceDescriptions(mod, cs): """ Adapt old-style ``describeInterfaces`` to the new plugin interface. Old describeInterfaces implementations would return an interface class and kwargs for adding to an operator. Now we expect an ORDER as well. This takes a module and case settings and staples the module's ORDER attribute to the tuple and checks to make sure that a None is replaced by an empty list. """ from armi import interfaces val = mod.describeInterfaces(cs) if val is None: return [] if isinstance(val, list): return [interfaces.InterfaceInfo(mod.ORDER, klass, kwargs) for klass, kwargs in val] klass, kwargs = val return [interfaces.InterfaceInfo(mod.ORDER, klass, kwargs)] class PluginError(RuntimeError): """ Special exception class for use when a plugin appears to be non-conformant. These should always come from some form of programmer error, and indicates conditions such as: - A plugin improperly implementing a hook, when possible to detect. - A collision between components provided by plugins (e.g. two plugins providing the same Blueprints section) """ ================================================ FILE: armi/reactor/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" The reactor package houses the data model used in ARMI to represent the reactor during its simulation. It contains definitions of the reactor, assemblies, blocks, components, etc. See :doc:`/developer/index`. """ from typing import TYPE_CHECKING, Callable, Dict, Union from armi import materials, plugins if TYPE_CHECKING: from armi.reactor.excoreStructure import ExcoreStructure from armi.reactor.reactors import Core from armi.reactor.spentFuelPool import SpentFuelPool class ReactorPlugin(plugins.ArmiPlugin): """Plugin exposing built-in reactor components, blocks, assemblies, etc.""" @staticmethod @plugins.HOOKIMPL def beforeReactorConstruction(cs) -> None: """Just before reactor construction, update the material "registry" with user settings, if it is set. Often it is set by the application. """ from armi.settings.fwSettings.globalSettings import ( CONF_MATERIAL_NAMESPACE_ORDER, ) if cs[CONF_MATERIAL_NAMESPACE_ORDER]: materials.setMaterialNamespaceOrder(cs[CONF_MATERIAL_NAMESPACE_ORDER]) @staticmethod @plugins.HOOKIMPL def defineBlockTypes(): from armi.reactor import blocks from armi.reactor.components.basicShapes import Hexagon, Rectangle from armi.reactor.components.volumetricShapes import RadialSegment return [ (Rectangle, blocks.CartesianBlock), (RadialSegment, blocks.ThRZBlock), (Hexagon, blocks.HexBlock), ] @staticmethod @plugins.HOOKIMPL def defineAssemblyTypes(): from armi.reactor.assemblies import CartesianAssembly, HexAssembly, ThRZAssembly from armi.reactor.blocks import CartesianBlock, HexBlock, ThRZBlock return [ (HexBlock, HexAssembly), (CartesianBlock, CartesianAssembly), (ThRZBlock, ThRZAssembly), ] @staticmethod @plugins.HOOKIMPL(trylast=True) def defineSystemBuilders() -> Dict[str, Callable[[str], Union["Core", "ExcoreStructure", "SpentFuelPool"]]]: from armi.reactor.excoreStructure import ExcoreStructure from armi.reactor.reactors import Core from armi.reactor.spentFuelPool import SpentFuelPool return { "core": Core, "excore": ExcoreStructure, "sfp": SpentFuelPool, } @staticmethod @plugins.HOOKIMPL(trylast=True) def getAxialExpansionChanger(): from armi.reactor.converters.axialExpansionChanger import AxialExpansionChanger return AxialExpansionChanger ================================================ FILE: armi/reactor/assemblies.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Assemblies are collections of Blocks. Generally, Blocks are stacked from bottom to top. """ import copy import math import pickle from collections.abc import Iterable from random import randint from typing import ClassVar, Optional, Type import numpy as np from scipy import interpolate from armi import runLog from armi.materials.material import Fluid from armi.reactor import assemblyParameters, blocks, composites, grids from armi.reactor.flags import Flags, TypeSpec from armi.reactor.parameters import ParamLocation from armi.reactor.spentFuelPool import SpentFuelPool class Assembly(composites.Composite): """ A single assembly in a reactor made up of blocks built from the bottom up. Append blocks to add them up. Index blocks with 0 being the bottom. """ _BLOCK_TYPE: ClassVar[Optional[Type[blocks.Block]]] = None pDefs = assemblyParameters.getAssemblyParameterDefinitions() # For assemblies coming in from the database, waiting to be loaded to their old # position. This is a necessary distinction, since we need to make sure that a bunch # of fuel management stuff doesn't treat its re-placement into the core as a new move DATABASE = "database" LOAD_QUEUE = "LoadQueue" SPENT_FUEL_POOL = "SFP" DELETE = "Delete" NOT_CREATED_YET = "NotCreatedYet" # used in assembly location history tracking NOT_IN_CORE = [LOAD_QUEUE, SPENT_FUEL_POOL, DELETE, NOT_CREATED_YET] def __init__(self, typ, assemNum=None): """ Parameters ---------- typ : str Name of assembly design (e.g. the name from the blueprints input file). assemNum : int, optional The unique ID number of this assembly. If None is provided, we generate a random int. This makes it clear that it is a placeholder. When an assembly with a negative ID is placed into a Reactor, it will be given a new, positive ID. """ # If no assembly number is provided, generate a random number as a placeholder. if assemNum is None: assemNum = randint(-9000000000000, -1) name = self.makeNameFromAssemNum(assemNum) composites.Composite.__init__(self, name) self.p.assemNum = assemNum self.setType(typ) self._current = 0 # for iterating self.p.buLimit = self.getMaxParam("buLimit") self.lastLocationLabel = self.LOAD_QUEUE self.p.orientation = np.array((0.0, 0.0, 0.0)) self.p.ringPosHist = [] def __repr__(self): msg = "<{typeName} Assembly {name} at {loc}>".format( name=self.getName(), loc=self.getLocation(), typeName=self.getType() ) return msg def __lt__(self, other): """ Compare two assemblies by location. Notes ----- As with other ArmiObjects, Assemblies are sorted based on location. Assemblies are more permissive in the grid consistency checks to accommodate situations where assemblies might be children of the same Core, but not in the same grid as each other (like in the spent fuel pool). In these situations, the operator returns ``False``. This behavior may lead to some strange sorting behavior when two or more Assemblies are being compared that do not live in the same grid. See Also -------- armi.reactor.composites.ArmiObject.__lt__ """ try: return composites.ArmiObject.__lt__(self, other) except ValueError: return False def renameBlocksAccordingToAssemblyNum(self): """ Updates the names of all blocks to comply with the assembly number. Useful after an assembly number/name has been loaded from a snapshot and you want to update all block names to be consistent. It may be better to store block numbers on each block as params. A database that can hold strings would be even better. Notes ----- You must run armi.reactor.reactors.Reactor.regenAssemblyLists after calling this. """ assemNum = self.getNum() for bi, b in enumerate(self): b.setName(b.makeName(assemNum, bi)) @staticmethod def makeNameFromAssemNum(assemNum): """ Set the name of this assembly (and the containing blocks) based on an assemNum. AssemNums are like serial numbers for assemblies. """ return "A{0:04d}".format(int(assemNum)) def renumber(self, newNum): """ Change the assembly number of this assembly. And handle the downstream impacts of changing the name of this Assembly and all of the Blocks within this Assembly. Parameters ---------- newNum : int The new Assembly number. """ self.p.assemNum = int(newNum) self.name = self.makeNameFromAssemNum(self.p.assemNum) self.renameBlocksAccordingToAssemblyNum() def makeUnique(self): """ Function to make an assembly unique by getting a new assembly number. This also adjusts the assembly's blocks IDs. This is necessary when using ``deepcopy`` to get a unique ``assemNum`` since a deepcopy implies it would otherwise have been the same object. """ # Default to a random negative assembly number (unique enough) self.p.assemNum = randint(-9000000000000, -1) self.renumber(self.p.assemNum) def _checkPotentialChild(self, obj: blocks.Block, action: str = "add"): """An internal helper method to ensure the Block type is valid for this Assembly.""" if self._BLOCK_TYPE is None or isinstance(obj, self._BLOCK_TYPE): # this is the right Block, pass on return # if we got here, this Block is not the right type for this Assembly msg = f"Cannot {action} {obj} to this Assembly, it is not a {self._BLOCK_TYPE}." runLog.error(msg) raise TypeError(msg) def add(self, obj: blocks.Block): """ Add an object to this assembly. The simple act of adding a block to an assembly fully defines the location of the block in 3-D. .. impl:: Assemblies are made up of type Block. :id: I_ARMI_ASSEM_BLOCKS :implements: R_ARMI_ASSEM_BLOCKS Adds a unique Block to the top of the Assembly. If the Block already exists in the Assembly, an error is raised in :py:meth:`armi.reactor.composites.Composite.add`. The spatialLocator of the Assembly is updated to account for the new Block. In ``reestablishBlockOrder``, the Assembly spatialGrid is reinitialized and Block-wise spatialLocator and name objects are updated. The axial mesh and other Block geometry parameters are updated in ``calculateZCoords``. """ self._checkPotentialChild(obj, "add") composites.Composite.add(self, obj) obj.spatialLocator = self.spatialGrid[0, 0, len(self) - 1] # more work is needed, make a new mesh self.reestablishBlockOrder() self.calculateZCoords() def insert(self, index, obj): """Insert an object at a given index position with the assembly.""" self._checkPotentialChild(obj, "insert") composites.Composite.insert(self, index, obj) obj.spatialLocator = self.spatialGrid[0, 0, index] def moveTo(self, locator): """Move an assembly somewhere else.""" oldSymmetryFactor = self.getSymmetryFactor() composites.Composite.moveTo(self, locator) if self.lastLocationLabel != self.DATABASE: self.p.numMoves += 1 self.p.daysSinceLastMove = 0.0 self.parent.childrenByLocator[locator] = self # symmetry may have changed (either moving on or off of symmetry line) self.clearCache() self.scaleParamsToNewSymmetryFactor(oldSymmetryFactor) def scaleParamsToNewSymmetryFactor(self, oldSymmetryFactor): scalingFactor = oldSymmetryFactor / self.getSymmetryFactor() if scalingFactor == 1: return blockVolIntegratedParamsToScale = self[0].p.paramDefs.atLocation(ParamLocation.VOLUME_INTEGRATED) for b in self: self._scaleParams(b, blockVolIntegratedParamsToScale, scalingFactor) assemblyVolIntegratedParamsToScale = self.p.paramDefs.atLocation(ParamLocation.VOLUME_INTEGRATED) self._scaleParams(self, assemblyVolIntegratedParamsToScale, scalingFactor) @staticmethod def _scaleParams(obj, params, scalingFactor): for param in params: name = param.name if obj.p[name] is None or isinstance(obj.p[name], str): continue elif isinstance(obj.p[name], Iterable): obj.p[name] = [value * scalingFactor for value in obj.p[name]] else: # numpy array or other obj.p[name] = obj.p[name] * scalingFactor def getNum(self): """Return unique integer for this assembly.""" return int(self.p.assemNum) def getLocation(self): """ Get string label representing this object's location. .. impl:: Assembly location is retrievable. :id: I_ARMI_ASSEM_POSI0 :implements: R_ARMI_ASSEM_POSI This method returns a string label indicating the location of an Assembly. There are three options: 1) the Assembly is not within a Core object and is interpreted as in the "load queue"; 2) the Assembly is within the spent fuel pool; 3) the Assembly is within a Core object, so it has a physical location within the Core. """ # just use ring and position, not axial (which is 0) if not self.parent: return self.LOAD_QUEUE elif isinstance(self.parent, SpentFuelPool): return self.SPENT_FUEL_POOL return self.parent.spatialGrid.getLabel(self.spatialLocator.getCompleteIndices()[:2]) def coords(self): """Return the location of the assembly in the plane using cartesian global coordinates. .. impl:: Assembly coordinates are retrievable. :id: I_ARMI_ASSEM_POSI1 :implements: R_ARMI_ASSEM_POSI In this method, the spatialLocator of an Assembly is leveraged to return its physical (x,y) coordinates in cm. """ x, y, _z = self.spatialLocator.getGlobalCoordinates() return (x, y) def getArea(self): """ Return the area of the assembly by looking at its first block. The assumption is that all blocks in an assembly have the same area. Calculate the total assembly volume in cm^3. """ try: return self[0].getArea() except IndexError: runLog.warning(f"{self} has no blocks and therefore no area.") return None def getVolume(self): """Calculate the total assembly volume in cm^3.""" return self.getArea() * self.getTotalHeight() def getPinPlenumVolumeInCubicMeters(self) -> float: """ Return the total volume of the plenum for an assembly in m^3. Notes ----- If there is no plenum blocks in the assembly, a plenum volume of 0.0 is returned. Warning ------- This is a bit design-specific for pinned assemblies. Returns ------- float: Total plenum volume for an assembly. """ plenumVolume = 0.0 for b in self.iterChildrenWithFlags(Flags.PLENUM): length = b.getHeight() for c in b.iterChildrenWithFlags(Flags.CLAD): cladId = c.getDimension("id") plenumVolume += math.pi * (cladId / 2.0) ** 2.0 * length # convert vol from cm^3 to m^3 plenumVolume *= 1e-6 return plenumVolume def getAveragePlenumTemperature(self): """Return the average of the plenum block outlet temperatures.""" plenumBlocks = self.iterChildrenWithFlags(Flags.PLENUM) plenumTemps = [b.p.THcoolantOutletT for b in plenumBlocks] # no plenum blocks, use the top block of the assembly for plenum temperature if not plenumTemps: runLog.warning("No plenum blocks exist. Using outlet coolant temperature.") plenumTemps = [self[-1].p.THcoolantOutletT] return sum(plenumTemps) / len(plenumTemps) def adjustResolution(self, refA): """Split the blocks in this assembly to have the same mesh structure as refA.""" newBlockStack = [] newBlocks = 0 # number of new blocks we've added so far. for i, b in enumerate(self): refB = refA[i + newBlocks] # pick the block that is "supposed to" line up with refB. if refB.getHeight() == b.getHeight(): # these blocks line up newBlockStack.append(b) continue elif refB.getHeight() > b.getHeight(): raise RuntimeError( "can't split {0} ({1}cm) into larger blocks to match ref block {2} ({3}cm)".format( b, b.getHeight(), refB, refB.getHeight() ) ) else: # b is larger than refB. Split b up by splitting it into several smaller blocks of # refBs heightToChop = b.getHeight() heightChopped = 0.0 while abs(heightChopped - heightToChop) > 1e-5: # stop when they are equal. floating point. # update which ref block we're on (does nothing on the first pass) refB = refA[i + newBlocks] newB = copy.deepcopy(b) newB.setHeight(refB.getHeight()) # make block match ref mesh newBlockStack.append(newB) heightChopped += refB.getHeight() newBlocks += 1 runLog.important(f"Added a new block {newB} of height {newB.getHeight()}") runLog.important(f"Chopped {heightChopped} of {heightToChop}") # subtract one because we eliminated the original b completely. newBlocks -= 1 self.removeAll() self.spatialGrid = grids.AxialGrid.fromNCells(len(newBlockStack)) for b in newBlockStack: self.add(b) self.reestablishBlockOrder() def getAxialMesh(self, centers=False, zeroAtFuel=False): """ Make a list of the block z-mesh tops from bottom to top in cm. Parameters ---------- centers : bool, optional Return centers instead of tops. If centers and zeroesAtFuel the zero point will be center of first fuel. zeroAtFuel : bool, optional If true will make the (bottom or center depending on centers) of the first fuel block be the zero point instead of the bottom of the first block. See Also -------- armi.reactor.assemblies.Assembly.makeAxialSnapList : makes index-based lookup of axial mesh armi.reactor.reactors.Reactor.findAllAxialMeshPoints : gets a global list of all of these, plus finer res. """ bottom = 0.0 meshVals = [] fuelIndex = None for bi, b in enumerate(self): top = bottom + b.getHeight() if centers: center = bottom + (top - bottom) / 2.0 meshVals.append(center) else: meshVals.append(top) bottom = top if fuelIndex is None and b.isFuel(): fuelIndex = bi if zeroAtFuel: # adjust the mesh to put zero at the first fuel block. zeroVal = meshVals[fuelIndex] meshVals = [mv - zeroVal for mv in meshVals] return meshVals def calculateZCoords(self): """ Set the center z-coords of each block and the params for axial expansion. See Also -------- reestablishBlockOrder """ bottom = 0.0 mesh = [bottom] for bi, b in enumerate(self): b.p.z = bottom + (b.getHeight() / 2.0) b.p.zbottom = bottom top = bottom + b.getHeight() b.p.ztop = top mesh.append(top) bottom = top b.spatialLocator = self.spatialGrid[0, 0, bi] # also update the 1-D axial assembly level grid (this is intended to replace z, # ztop, zbottom, etc.) # length of this is numBlocks + 1 bounds = list(self.spatialGrid._bounds) bounds[2] = np.array(mesh) self.spatialGrid._bounds = tuple(bounds) def getTotalHeight(self, typeSpec=None): """ Determine the height of this assembly in cm. Parameters ---------- typeSpec : See :py:meth:`armi.composites.Composite.hasFlags` Returns ------- height : float the height in cm """ h = 0.0 for b in self: if b.hasFlags(typeSpec): h += b.getHeight() return h def getHeight(self, typeSpec=None): return self.getTotalHeight(typeSpec) def getReactiveHeight(self, enrichThresh=0.02): """ Returns the zBottom and total height in cm that has fissile enrichment over enrichThresh. """ reactiveH = 0.0 zBot = None z = 0.0 for b in self: h = b.getHeight() if b.getFissileMass() > 0.01 and b.getFissileMassEnrich() > enrichThresh: if zBot is None: zBot = z reactiveH += h z += h return zBot, reactiveH def getElevationBoundariesByBlockType(self, blockType=None): """ Gets of list of elevations, ordered from bottom to top of all boundaries of the block of specified type. Useful for determining location of the top of the upper grid plate or active fuel, etc by using [0] to get the lowest boundary and [-1] to get highest Notes ----- The list will have duplicates when blocks of the same type share a boundary. this is intentional. It makes it easy to grab pairs off the list and know that the first item in a pair is the bottom boundary and the second is the top. Parameters ---------- blockType : str Block type to find. empty accepts all Returns ------- elevation : list of floats Every float in the list is an elevation of a block boundary for the block type specified (has duplicates) """ elevation, elevationsWithBlockBoundaries = 0.0, [] # loop from bottom to top, stopping at the first instance of blockType for b in self: if b.hasFlags(blockType): elevationsWithBlockBoundaries.append(elevation) # bottom Boundary elevationsWithBlockBoundaries.append(elevation + b.getHeight()) # top Boundary elevation += b.getHeight() return elevationsWithBlockBoundaries def getElevationsMatchingParamValue(self, param, value): """ Return the elevations (z-coordinates) where the specified param takes the specified value. Uses linear interpolation, assuming params correspond to block centers Parameters ---------- param : str Name of param to try and match value: float Returns ------- heights : list z-coordinates where the specified param takes the specified value """ heights = [] # loop from bottom to top for i in range(0, len(self) - 1): diff1 = self[i].p[param] - value diff2 = self[i + 1].p[param] - value z1 = (self[i].p.zbottom + self[i].p.ztop) / 2 z2 = (self[i + 1].p.zbottom + self[i + 1].p.ztop) / 2 if diff1 == diff2: # params are flat if diff1 != 0: # no match continue else: if z1 not in heights: heights.append(z1) if z2 not in heights: heights.append(z2) # check if param is bounded by two adjacent blocks elif diff1 * diff2 <= 0: tie = diff1 / (diff1 - diff2) z = z1 + tie * (z2 - z1) if z not in heights: # avoid duplicates heights.append(z) return heights def getAge(self): """Gets a height-averaged residence time of this assembly in days.""" at = 0.0 for b in self: at += b.p.residence * b.getHeight() return at / self.getTotalHeight() def makeAxialSnapList(self, refAssem=None, refMesh=None, force=False): """ Creates a list of block indices that should track axially with refAssem's. When axially expanding, the control rods, shields etc. need to maintain mesh lines with the rest of the core. To do this, we'll just keep track of which indices of a reference assembly we should stick with. This method writes the indices of the top of a block to settings as topIndex. Keep in mind that assemblies can have different number of blocks. This is why this function is useful. So this makes a list of reference indices that correspond to different axial mesh points on this assembly. This is the depletion mesh we're returning, useful for snapping after axial extension. Note that the neutronics mesh on rebusOutputs might be different. See Also -------- setBlockMesh : applies a snap. """ if not force and self[-1].p.topIndex > 0: return refMesh = refAssem.getAxialMesh() if refMesh is None else refMesh selfMesh = self.getAxialMesh() # make a list relating this assemblies axial mesh points to indices of the # reference assembly z = 0.0 for b in self: top = z + b.getHeight() try: b.p.topIndex = np.where(np.isclose(refMesh, top))[0].tolist()[0] except IndexError: runLog.error( "Height {0} in this assembly ({1} in {4}) is not in the reactor mesh " "list from {2}\nThis has: {3}\nIf you want to run " "a case with non-uniform axial mesh, activate the `detailedAxialExpansion` " "setting".format(top, self, refMesh, selfMesh, self.parent) ) raise z = top def _shouldMassBeConserved(self, belowFuelColumn, b): """ Determine from a rule set if the mass of a block component should be conserved during axial expansion. Parameters ---------- belowFuelColumn : boolean Determines whether a block is below the fuel column or not in fuel assemblies b : armi block The block that is being examined for modification Returns ------- conserveMass : boolean Should the mass be conserved in this block conserveComponents : list of components What components should have their mass conserved (if any) belowFuelColumn : boolean Update whether the block is above or below a fuel column See Also -------- armi.assemblies.Assembly.setBlockMesh """ if b.hasFlags(Flags.FUEL): # fuel block conserveMass = True conserveComponents = b.getComponents(Flags.FUEL) elif self.hasFlags(Flags.FUEL): # non-fuel block of a fuel assembly. if belowFuelColumn: # conserve mass of everything below the fuel so as to not invalidate # grid-plate dose calcs. conserveMass = True # conserve mass of everything except fluids. conserveComponents = [comp for comp in b.getComponents() if not isinstance(comp.material, Fluid)] else: # plenum or above block in fuel assembly. don't conserve mass. conserveMass = False conserveComponents = [] else: # non fuel block in non-fuel assem. Don't conserve mass. conserveMass = False conserveComponents = [] return conserveMass, conserveComponents def setBlockMesh(self, blockMesh, conserveMassFlag=False): """ Snaps the axial mesh points of this assembly to correspond with the reference mesh. Notes ----- This function only conserves mass on certain conditions: 1) Fuel Assembly a) Structural material below the assembly conserves mass to accurate depict grid plate shielding Sodium is not conserved. b) Fuel blocks only conserve mass of the fuel, not the structure since the fuel slides up through the cladding (thus fuel/cladding should be reduced). c) Structure above the assemblies (expected to be plenum) do not conserve mass since plenum regions have their height reduced to conserve the total structure mass when the fuel grows in the cladding. See b) 2) Reflectors, shields, and control rods a) These assemblies do not conserve mass since they should remain uniform to keep radial shielding accurate. This approach should be conservative. b) Control rods do not have their mass conserved and the control rod interface is required to be run after this function is called to correctly place mass of poison axially. Parameters ---------- blockMesh : iterable A list of floats describing the upper mesh points of each block in cm. conserveMassFlag : bool or str Option for how to treat mass conservation when the block mesh changes. Conservation of mass for fuel components is enabled by conserveMassFlag="auto". If not auto, a boolean value should be passed. The default is False, which does not conserve any masses. True conserves mass for all components. See Also -------- makeAxialSnapList : Builds the lookup table used by this method getAxialMesh : builds a mesh compatible with this """ # Just adjust the heights and everything else will fall into place zBottom = 0.0 belowFuelColumn = True if self[-1].p.topIndex == 0: runLog.warning( "Reference uniform mesh not being applied to {}. It was likely " "excluded through the setting `nonUniformAssemFlags`.".format(self.p.type) ) return for b in self: if b.isFuel(): belowFuelColumn = False topIndex = b.p.topIndex if not 0 <= topIndex < len(blockMesh): runLog.warning( "index {0} does not exist in topvals (len:{1}). 0D case? Skipping snap".format( topIndex, len(blockMesh) ) ) return newTop = blockMesh[topIndex] if newTop is None: runLog.warning("Skipping axial snapping on {0}".format(self), 1) return if conserveMassFlag == "auto": conserveMass, conserveComponents = self._shouldMassBeConserved(belowFuelColumn, b) else: conserveMass = conserveMassFlag conserveComponents = b.getComponents() oldBlockHeight = b.getHeight() b.setHeight(newTop - zBottom) if conserveMass: heightRatio = oldBlockHeight / b.getHeight() for c in conserveComponents: c.changeNDensByFactor(heightRatio) zBottom = newTop self.calculateZCoords() def setBlockHeights(self, blockHeights): """Set the block heights of all blocks in the assembly.""" mesh = np.cumsum(blockHeights) self.setBlockMesh(mesh) def dump(self, fName=None): """Pickle the assembly and write it to a file.""" if not fName: fName = self.getName() + ".dump.pkl" with open(fName, "w") as pkl: pickle.dump(self, pkl) def iterBlocks(self, typeSpec=None, exact=False): """Produce an iterator over all blocks in this assembly from bottom to top. Parameters ---------- typeSpec : Flags or list of Flags, optional Restrict returned blocks to have these flags. exact : bool, optional If true, only produce blocks that have those exact flags. Returns ------- iterable of Block See Also -------- * :meth:`__iter__` - if no type spec provided, assemblies can be naturally iterated upon. * :meth:`iterChildrenWithFlags` - alternative if you know you have a type spec that isn't ``None``. """ if typeSpec is None: return iter(self) return self.iterChildrenWithFlags(typeSpec, exact) def getBlocks(self, typeSpec=None, exact=False): """ Get blocks in an assembly from bottom to top. Parameters ---------- typeSpec : Flags or list of Flags, optional Restrict returned blocks to those of this type. exact : bool, optional If true, will only return if there's an exact match in typeSpec Returns ------- blocks : list List of blocks. """ return list(self.iterBlocks(typeSpec, exact)) def getBlocksAndZ(self, typeSpec=None, returnBottomZ=False, returnTopZ=False): """ Get blocks and their z-coordinates from bottom to top. This method is useful when you need to know the z-coord of a block. Parameters ---------- typeSpec : Flags or list of Flags, optional Block type specification to restrict to returnBottomZ : bool, optional If true, will return bottom coordinates instead of centers. Returns ------- blocksAndCoords, list (block, zCoord) tuples Examples -------- for block, bottomZ in a.getBlocksAndZ(returnBottomZ=True): print({0}'s bottom mesh point is {1}'.format(block, bottomZ)) """ if returnBottomZ and returnTopZ: raise ValueError("Both returnTopZ and returnBottomZ are set to `True`") blocks, zCoords = [], [] bottom = 0.0 for b in self: top = bottom + b.getHeight() mid = (bottom + top) / 2.0 if b.hasFlags(typeSpec): blocks.append(b) if returnBottomZ: val = bottom elif returnTopZ: val = top else: val = mid zCoords.append(val) bottom = top return zip(blocks, zCoords) def hasContinuousCoolantChannel(self): return all(b.containsAtLeastOneChildWithFlags(Flags.COOLANT) for b in self) def getFirstBlock(self, typeSpec: TypeSpec = None, exact: bool = False) -> Optional[blocks.Block]: """Find the first block that matches the spec. Parameters ---------- typeSpec Specification to require on the returned block. exact Require block to exactly match ``typeSpec`` Returns ------- Block or None First block that matches if such a block could be found. """ if typeSpec is None: items = iter(self) else: items = self.iterChildrenWithFlags(typeSpec, exact) try: # Create an iterator and attempt to advance it to the first value. return next(items) except StopIteration: # No items found in the iteration -> no blocks match the request return None def getFirstBlockByType(self, typeName: str) -> Optional[blocks.Block]: blocks = filter(lambda b: b.getType() == typeName, self) try: return next(blocks) except StopIteration: return None def getBlockAtElevation(self, elevation: float) -> Optional[blocks.Block]: """ Returns the block at a specified axial dimension elevation (given in cm). If height matches the exact top of the block, the block is considered at that height. Parameters ---------- elevation : float The elevation of interest to grab a block (cm) Returns ------- targetBlock : block or None The block that exists at the specified height in the reactor. ``None`` if a block was not found. """ bottomOfBlock = 0.0 for b in self: topOfBlock = bottomOfBlock + b.getHeight() if ( topOfBlock > elevation or abs(topOfBlock - elevation) / elevation < 1e-10 ) and bottomOfBlock < elevation: return b bottomOfBlock = topOfBlock return None def getBIndexFromZIndex(self, zIndex): """ Returns the ARMI block axial index corresponding to a DIF3D node axial index. Parameters ---------- zIndex : float The axial index (beginning with 0) of a DIF3D node. Returns ------- bIndex : int The axial index (beginning with 0) of the ARMI block containing the DIF3D node corresponding to zIndex. """ zIndexTot = -1 for bIndex, b in enumerate(self): zIndexTot += b.p.axMesh if zIndexTot >= zIndex: return bIndex return -1 # no block index found def getBlocksBetweenElevations(self, zLower, zUpper, eps=1e-10): """ Return block(s) between two axial elevations and their corresponding heights. Parameters ---------- zLower, zUpper : float Elevations in cm where blocks should be found. eps : float, optional Lower bound for relative block height fraction that we care about. Below this bound, small slivers of overlapping block are ignored. Returns ------- blockInfo : list list of (blockObj, overlapHeightInCm) tuples Examples -------- If the block structure looks like: 50.0 to 100.0 Block3 25.0 to 50.0 Block2 0.0 to 25.0 Block1 Then, >>> a.getBlocksBetweenElevations(0, 50) [(Block1, 25.0), (Block2, 25.0)] >>> a.getBlocksBetweenElevations(0, 30) [(Block1, 25.0), (Block2, 5.0)] """ blocksHere = [] for b in self: if b.p.ztop >= zLower and b.p.zbottom <= zUpper: # at least some of this block overlaps the window of interest top = min(b.p.ztop, zUpper) bottom = max(b.p.zbottom, zLower) heightHere = top - bottom # Filter out blocks that have an extremely small height fraction if heightHere / b.getHeight() > eps: blocksHere.append((b, heightHere)) return blocksHere def getParamValuesAtZ(self, param, elevations, interpType="linear", fillValue=np.nan): """ Interpolates a param axially to find it at any value of elevation z. By default, assumes that all parameters are for the center of a block. So for parameters such as THoutletTemperature that are defined on the top, this may be off. See the paramDefinedAt parameters. Defaults to linear interpolations. Notes ----- This caches interpolators for each param and must be cleared if new params are set or new heights are set. Warning ------- Fails when requested to extrapolate. With higher order splines it is possible to interpolate non-physical values, for example, a negative flux or dpa. Please use caution when going off default in interpType and be certain that interpolated values are physical. Parameters ---------- param : str the parameter to interpolate elevations : array of float the elevations from the bottom of the assembly in cm at which you want the point. interpType: str or int used in interp1d. interp1d documentation: Specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic' where 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of first, second or third order) or as an integer specifying the order of the spline interpolator to use. Default is 'linear'. fillValue: str Rough pass through to scipy.interpolate.interp1d. If 'extend', then the lower and upper bounds are used as the extended value. If 'extrapolate', then extrapolation is permitted. Returns ------- valAtZ : np.ndarray This will be of the shape (z,data-shape) """ interpolator = self.getParamOfZFunction(param, interpType=interpType, fillValue=fillValue) return interpolator(elevations) def getParamOfZFunction(self, param, interpType="linear", fillValue=np.nan): """ Interpolates a param axially to find it at any value of elevation z. By default, assumes that all parameters are for the center of a block. So for parameters such as THoutletTemperature that are defined on the top, this may be off. See the paramDefinedAt parameters. Defaults to linear interpolations. Notes ----- This caches interpolators for each param and must be cleared if new params are set or new heights are set. Warning ------- Fails when requested to extrapololate. With higher order splines it is possible to interpolate nonphysical values, for example, a negative flux or dpa. Please use caution when going off default in interpType and be certain that interpolated values are physical. Parameters ---------- param : str the parameter to interpolate interpType: str or int used in interp1d. interp1d documentation: Specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic' where 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of first, second or third order) or as an integer specifying the order of the spline interpolator to use. Default is 'linear'. fillValue: float Rough pass through to scipy.interpolate.interp1d. If 'extend', then the lower and upper bounds are used as the extended value. If 'extrapolate', then extrapolation is permitted. Returns ------- valAtZ : np.ndarray This will be of the shape (z,data-shape) """ paramDef = self[0].p.paramDefs[param] if not isinstance(paramDef.location, ParamLocation): raise Exception( "Cannot interpolate on `{}`. The ParamDefinition does not define a " "valid location `{}`.\nValid locations are {}".format( param, paramDef.location, ", ".join([str(pl) for pl in ParamLocation]), ) ) atCenter = bool(paramDef.location & (ParamLocation.CENTROID | ParamLocation.VOLUME_INTEGRATED)) z = self.getAxialMesh(atCenter) if paramDef.location & ParamLocation.BOTTOM: z.insert(0, 0.0) z.pop(-1) z = np.asarray(z) values = self.getChildParamValues(param).transpose() boundsError = None if fillValue == "extend": boundsError = False if values.ndim == 1: fillValue = values[0], values[-1] elif values.ndim == 2: fillValue = values[:, 0], values[:, 1] else: raise Exception( 'Unsupported shape ({}) returned from getChildParamValues("{}").' "Shape must be 1 or 2 dimensions".format(values.shape, param) ) interpolater = interpolate.interp1d( z, values, kind=interpType, fill_value=fillValue, assume_sorted=True, bounds_error=boundsError, ) return interpolater def reestablishBlockOrder(self): """ The block ordering has changed, so the spatialGrid and Block-wise spatialLocator and name objects need updating. See Also -------- calculateZCoords : updates the ztop/zbottom params on each block after reordering. """ # replace grid with one that has the right number of locations self.spatialGrid = grids.AxialGrid.fromNCells(len(self)) self.spatialGrid.armiObject = self for zi, b in enumerate(self): b.spatialLocator = self.spatialGrid[0, 0, zi] # update the name too. NOTE: You must update the history tracker. b.setName(b.makeName(self.p.assemNum, zi)) def countBlocksWithFlags(self, blockTypeSpec=None): """ Returns the number of blocks of a specified type. blockTypeSpec : Flags or list Restrict to only these types of blocks. typeSpec is None, return all of the blocks Returns ------- blockCounter : int number of blocks of this type """ return sum(1 for _ in self.iterBlocks(blockTypeSpec)) def getDim(self, typeSpec, dimName): """ With a preference for fuel blocks, find the first component in the Assembly with flags that match ``typeSpec`` and return dimension as specified by ``dimName``. Example: getDim(Flags.WIRE, 'od') will return a wire's OD in cm. """ # prefer fuel blocks. bList = self.getBlocks(Flags.FUEL) if not bList: # no fuel blocks. take first block. bList = self for b in bList: dim = b.getDim(typeSpec, dimName) if dim: return dim # return none if there is nothing to return return None def getSymmetryFactor(self): """Return the symmetry factor of this assembly.""" return self[0].getSymmetryFactor() def rotate(self, rad): """Rotates the spatial variables on an assembly by the specified angle. Each Block on the Assembly is rotated in turn. Parameters ---------- rad : float number (in radians) specifying the angle of counter clockwise rotation """ self.p.orientation[2] += math.degrees(rad) for b in self: b.rotate(rad) def isOnWhichSymmetryLine(self): grid = self.parent.spatialGrid return grid.overlapsWhichSymmetryLine(self.spatialLocator.getCompleteIndices()) def orientBlocks(self, parentSpatialGrid): """Add special grids to the blocks inside this Assembly, respecting their orientation. Parameters ---------- parentSpatialGrid : Grid Spatial Grid of the parent of this Assembly (probably a system-level grid). """ for b in self: if b.spatialGrid is None: try: b.autoCreateSpatialGrids(parentSpatialGrid) except (ValueError, NotImplementedError) as e: runLog.extra(str(e), single=True) # Do more grid initializations from a manual or auto created grid if b.spatialGrid is not None: b.assignPinIndices() class HexAssembly(Assembly): """An assembly that is hexagonal in cross-section.""" _BLOCK_TYPE = blocks.HexBlock def rotate(self, rad: float): """Rotate an assembly and its children. .. impl:: A hexagonal assembly shall support rotating around the z-axis in 60 degree increments. :id: I_ARMI_ROTATE_HEX_ASSEM :implements: R_ARMI_ROTATE_HEX This method loops through every ``Block`` in this ``HexAssembly`` and rotates it by a given angle (in radians). The rotation angle is positive in the counter-clockwise direction. To perform the ``Block`` rotation, the :meth:`armi.reactor.blocks.HexBlock.rotate` method is called. Parameters ---------- rad : float Counter clockwise rotation in radians. **MUST** be in increments of 60 degrees (PI / 3) Raises ------ ValueError If rotation is not divisible by pi / 3. """ if math.isclose(rad % (math.pi / 3), 0, abs_tol=1e-12): return super().rotate(rad) msg = f"Rotation must be in 60 degree increments, got {math.degrees(rad)} degrees ({rad} radians)." runLog.error(msg) raise ValueError(msg) class CartesianAssembly(Assembly): """An assembly that is rectangular in cross-section.""" _BLOCK_TYPE = blocks.CartesianBlock class RZAssembly(Assembly): """ RZAssembly are assemblies in RZ geometry; they need to be different objects than HexAssembly because they use different locations and need to have Radial Meshes in their setting. Notes ----- ThRZAssemblies should be a subclass of Assemblies because they should have a common place to put information about subdividing the global mesh for transport. This is similar to how blocks have 'AxialMesh' in their blocks. """ def __init__(self, name, assemNum=None): Assembly.__init__(self, name, assemNum) self.p.RadMesh = 1 def radialOuter(self): """Returns the outer radial boundary of this assembly.""" return self[0].radialOuter() def radialInner(self): """Returns the inner radial boundary of this assembly.""" return self[0].radialInner() def thetaOuter(self): """Returns the outer azimuthal boundary of this assembly.""" return self[0].thetaOuter() def thetaInner(self): """Returns the outer azimuthal boundary of this assembly.""" return self[0].thetaInner() class ThRZAssembly(RZAssembly): """ ThRZAssembly are assemblies in ThetaRZ geometry, they need to be different objects than HexAssembly because they use different locations and need to have Radial Meshes in their setting. """ def __init__(self, assemType, assemNum=None): RZAssembly.__init__(self, assemType, assemNum) self.p.AziMesh = 1 ================================================ FILE: armi/reactor/assemblyParameters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Assembly Parameter Definitions.""" from armi import runLog from armi.reactor import parameters from armi.reactor.parameters import ParamLocation from armi.reactor.parameters.parameterDefinitions import isNumpyArray from armi.utils import units def getAssemblyParameterDefinitions(): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.CENTROID) as pb: pb.defParam( "orientation", units=units.DEGREES, description=( "Triple representing rotations counterclockwise around each spatial axis. " "For example, a hex assembly rotated by 1/6th has orientation (0, 0, 60.0)" ), default=None, ) with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0) as pb: pb.defParam( "arealPd", units=f"{units.MW}/{units.METERS}^2", description="Power in assembly divided by its XY cross-sectional area. Related to PCT.", ) pb.defParam( "buLimit", units=units.UNITLESS, description="buLimit", default=parameters.NoDefault, ) pb.defParam( "chargeBu", units=units.PERCENT_FIMA, description="Max block-average burnup in this assembly when it most recently entered " "the core. If the assembly was discharged and then re-charged, this value will only " "reflect the most recent charge.", ) pb.defParam( "chargeCycle", units=units.UNITLESS, description="Cycle number that this assembly most recently entered the core. If the " "assembly was discharged and then re-charged, this value will only reflect the most " "recent charge.", ) pb.defParam( "chargeFis", units=units.KG, description="Fissile mass in assembly when it most recently entered the core. If the " "assembly was discharged and then re-charged, this value will only reflect the most " "recent charge.", ) pb.defParam( "chargeTime", units=units.YEARS, description="Time at which this assembly most recently entered the core. If the " "assembly was discharged and then re-charged, this value will only reflect the most " "recent charge.", default=parameters.NoDefault, ) pb.defParam( "multiplicity", units=units.UNITLESS, description="The number of physical assemblies that the associated object represents. " "This is typically 1, but may need to change when the assembly is moved between " "containers with different types of symmetry. For instance, if an assembly moves from " "a Core with 1/3rd symmetry into a spent-fuel pool with full symmetry, rather than " "splitting the assembly into 3, the multiplicity can be set to 3. For now, this is a " "bit of a hack to make fuel handling work; multiplicity in the 1/3 core should be 3 to " "begin with, in which case this parameter could be used as the primary means of " "handling symmetry and fractional domains throughout ARMI. We will probably roll that " "out once the dust settles on some of this SFP work. For now, the Core stores " "multiplicity as 1 always, since the powerMultiplier to adjust to full-core " "quantities.", default=1, ) pb.defParam("daysSinceLastMove", units=units.UNITLESS, description="daysSinceLastMove") pb.defParam("kInf", units=units.UNITLESS, description="kInf") pb.defParam("maxDpaPeak", units=units.DPA, description="maxDpaPeak") pb.defParam("maxPercentBu", units=units.PERCENT, description="maxPercentBu") pb.defParam("numMoves", units=units.UNITLESS, description="numMoves") pb.defParam("timeToLimit", units=units.DAYS, description="timeToLimit", default=1e6) pb.defParam( "guideTubeTopElevation", units=units.CM, description=("Elevation of the top of the guide tube relative to the bottom of the duct."), categories=[parameters.Category.assignInBlueprints], saveToDB=True, ) with pDefs.createBuilder(location=ParamLocation.AVERAGE) as pb: pb.defParam( "detailedNDens", setter=isNumpyArray("detailedNDens"), units=f"atoms/(bn*{units.CM})", description=( "High-fidelity number density vector with up to thousands of nuclides. " "Used in high-fi depletion runs where low-fi depletion may also be occurring. " "This param keeps the hi-fi and low-fi depletion values from interfering." ), saveToDB=True, default=None, ) def _enforceNotesRestrictions(self, value): """Enforces that notes can only be of type str with max length of 1000.""" if type(value) is not str: runLog.error("Values stored in the `notes` parameter must be strings of less than 1000 characters!") raise ValueError elif len(value) > 1000: runLog.warning( "Strings stored in the `notes` parameter must be less than 1000 characters. " f"Truncating the note starting with {value[0:15]}... at 1000 characters!" ) self._p_notes = value[0:1000] else: self._p_notes = value pb.defParam( "notes", units=units.UNITLESS, description="A string with notes about the assembly, limited to 1000 characters. This " "parameter is not meant to store data. Needlessly storing large strings on this " "parameter for every assembly is potentially unwise from a memory perspective.", saveToDB=True, default="", setter=_enforceNotesRestrictions, ) with pDefs.createBuilder(location=ParamLocation.NA, default=0.0, categories=["control rods"]) as pb: pb.defParam( "crCriticalFraction", units=units.UNITLESS, description=( "The insertion fraction when the control rod assembly is in its critical " "configuration. Note that the default of -1.0 is a trigger for this value not " "being set yet." ), saveToDB=True, default=-1.0, ) pb.defParam( "crCurrentElevation", units=units.CM, description="The current elevation of the bottom of the moveable section of a control rod assembly.", categories=[parameters.Category.assignInBlueprints], saveToDB=True, ) pb.defParam( "crInsertedElevation", units=units.CM, description=( "The elevation of the furthest-most insertion point of a control rod assembly. For " "a control rod assembly inserted from the top, this will be the lower tip of the " "bottom-most moveable section in the assembly when fully inserted." ), categories=[parameters.Category.assignInBlueprints], saveToDB=True, ) pb.defParam( "crRodLength", units=units.CM, description="length of the control material within the control rod", saveToDB=True, ) pb.defParam( "crWithdrawnElevation", units=units.CM, description=( "The elevation of the tip of a control rod assembly when it is fully withdrawn. " "For a control rod assembly inserted from the top, this will be the lower tip of " "the bottom-most moveable section in the assembly when fully withdrawn." ), categories=[parameters.Category.assignInBlueprints], saveToDB=True, ) with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0, categories=["thermal hydraulics"]) as pb: pb.defParam( "THcoolantOutletT", units=units.DEGC, description="The nominal average bulk coolant outlet temperature out of the block.", categories=["broadcast"], ) with pDefs.createBuilder() as pb: pb.defParam( "type", units=units.UNITLESS, description="The name of the assembly input on the blueprints input", default="defaultAssemType", saveToDB=True, ) pb.defParam( "ringPosHist", units=units.UNITLESS, description=( "Ring and position history for this assembly written at BOC. Index 1 corresponds to position at BOC1." ), default=None, saveToDB=True, ) pb.defParam( "nozzleType", units=units.UNITLESS, description="nozzle type for assembly", default="Default", saveToDB=True, categories=[parameters.Category.assignInBlueprints], ) with pDefs.createBuilder(default=0.0) as pb: pb.defParam("assemNum", units=units.UNITLESS, description="Assembly number") pb.defParam( "dischargeTime", units=units.YEARS, description="Time the Assembly was removed from the Reactor.", ) pb.defParam( "hotChannelFactors", units=units.UNITLESS, description="Definition of set of HCFs to be applied to assembly.", default="Default", saveToDB=True, categories=[parameters.Category.assignInBlueprints], ) with pDefs.createBuilder(categories=["radialGeometry"]) as pb: pb.defParam( "AziMesh", units=units.UNITLESS, description="Number of points in the Azimuthal mesh.", saveToDB=False, default=1, ) pb.defParam( "RadMesh", units=units.UNITLESS, description="Number of points in the Radial mesh.", saveToDB=False, default=1, ) return pDefs ================================================ FILE: armi/reactor/blockParameters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Parameter definitions for Blocks.""" from armi import runLog from armi.physics.neutronics import crossSectionGroupManager from armi.reactor import parameters from armi.reactor.parameters import ParamLocation from armi.reactor.parameters.parameterDefinitions import isNumpyArray from armi.utils import units from armi.utils.units import ASCII_LETTER_A, ASCII_LETTER_Z, ASCII_LETTER_a def getBlockParameterDefinitions(): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.CENTROID) as pb: pb.defParam( "orientation", units=units.DEGREES, description=( "Triple representing rotations counterclockwise around each spatial axis. For " "example, a hex assembly rotated by 1/6th has orientation (0, 0, 60.0)" ), default=None, ) pb.defParam( "detailedNDens", setter=isNumpyArray("detailedNDens"), units=f"atoms/(bn*{units.CM})", description=( "High-fidelity number density vector with up to thousands of nuclides. " "Used in high-fi depletion runs where low-fi depletion may also be occurring. " "This param keeps the hi-fi and low-fi depletion values from interfering." ), location=ParamLocation.AVERAGE, saveToDB=False, default=None, ) with pDefs.createBuilder(default=0.0, location=ParamLocation.AVERAGE, categories=["depletion"]) as pb: pb.defParam( "newDPA", units=units.DPA, description="Dose in DPA accrued during the current time step", ) pb.defParam( "percentBu", units=units.PERCENT_FIMA, description="Percentage of the initial heavy metal atoms that have been fissioned", categories=["cumulative"], ) pb.defParam( "percentBuByPin", units=units.PERCENT_FIMA, description="Percent burnup of the initial heavy metal atoms that have been fissioned for each pin", default=None, saveToDB=False, location=ParamLocation.CHILDREN, ) pb.defParam( "residence", units=units.DAYS, description=( "Duration that a block has been in the core multiplied by the fraction " "of full power generated in that time." ), categories=["cumulative"], ) with pDefs.createBuilder(default=0.0, location=ParamLocation.VOLUME_INTEGRATED, categories=["depletion"]) as pb: pb.defParam( "molesHmNow", units=f"{units.MOLES}", description="Total number of atoms of heavy metal", ) pb.defParam( "molesHmBOL", units=f"{units.MOLES}", description="Total number of atoms of heavy metal at BOL.", ) pb.defParam( "massHmBOL", units=units.GRAMS, description="Mass of heavy metal at BOL", ) pb.defParam( "initialB10ComponentVol", units=f"{units.CM}^3", description=( "cc's of un-irradiated, cold B10 containing component (includes full volume of any components with B10)" ), ) with pDefs.createBuilder(default=0.0, location=ParamLocation.AVERAGE) as pb: def envGroup(self, envGroupChar): if isinstance(envGroupChar, (int, float)): intValue = int(envGroupChar) runLog.warning( f"Attempting to set `b.p.envGroup` to int value ({envGroupChar})." "Possibly loading from old database", single=True, label="env group as int " + str(intValue), ) self.envGroupNum = intValue return elif not isinstance(envGroupChar, str): raise Exception(f"Wrong type for envGroupChar {envGroupChar}: {type(envGroupChar)}") if envGroupChar.islower(): # if lower case find the distance from lowercase a and add the span of A to Z lowerCaseOffset = ASCII_LETTER_Z - ASCII_LETTER_A + 1 # 26 envGroupNum = ord(envGroupChar) - ASCII_LETTER_a + lowerCaseOffset else: envGroupNum = ord(envGroupChar) - ASCII_LETTER_A self._p_envGroup = envGroupChar self._p_envGroupNum = envGroupNum envGroupNumDef = parameters.ALL_DEFINITIONS["envGroupNum"] envGroupNumDef.assigned = parameters.SINCE_ANYTHING pb.defParam( "envGroup", units=units.UNITLESS, description="The environment group letter of this block", default="A", setter=envGroup, ) def envGroupNum(self, envGroupNum): # support capital and lowercase alpha chars (52= 26*2) if envGroupNum > 52: raise RuntimeError("Invalid env group number ({}): too many groups. 52 is the max.".format(envGroupNum)) self._p_envGroupNum = envGroupNum lowerCaseOffset = ASCII_LETTER_Z - ASCII_LETTER_A if envGroupNum > lowerCaseOffset: envGroupNum = envGroupNum - (lowerCaseOffset + 1) self._p_envGroup = chr(envGroupNum + ASCII_LETTER_a) else: self._p_envGroup = chr(envGroupNum + ASCII_LETTER_A) envGroupDef = parameters.ALL_DEFINITIONS["envGroup"] envGroupDef.assigned = parameters.SINCE_ANYTHING pb.defParam( "envGroupNum", units=units.UNITLESS, description="An integer representation of the environment group " "(burnup/temperature/etc.). linked to envGroup.", default=0, setter=envGroupNum, ) pb.defParam( "buRate", units=f"{units.PERCENT_FIMA}/{units.DAYS}", # This is very related to power, but normalized to %FIMA. description=( "Current rate of burnup accumulation. Useful for estimating times when burnup limits may be exceeded." ), ) pb.defParam( "buRatePeak", units=f"{units.PERCENT_FIMA}/{units.DAYS}", description="Current rate of burnup accumulation at peak location", location=ParamLocation.MAX, ) pb.defParam( "detailedDpa", units=units.DPA, description="displacements per atom", categories=["cumulative", "detailedAxialExpansion", "depletion"], ) pb.defParam( "detailedDpaPeak", units=units.DPA, description="displacements per atom with peaking factor", categories=["cumulative", "detailedAxialExpansion", "depletion"], location=ParamLocation.MAX, ) pb.defParam( "detailedDpaRate", units=f"{units.DPA}/{units.SECONDS}", description="Current time derivative of average detailed DPA", categories=["detailedAxialExpansion", "depletion"], ) pb.defParam( "displacementX", units=units.METERS, description="Assembly displacement in the x direction", ) pb.defParam( "displacementY", units=units.METERS, description="Assembly displacement in the y direction", ) pb.defParam( "heliumInB4C", units=f"He/{units.SECONDS}/{units.CM}^3", description="Alpha particle production rate in B4C control and shield material.", location=ParamLocation.AVERAGE, ) pb.defParam( "powerRx", units=f"{units.WATTS}/{units.CM}^3", description="Power density of the reactor", location=ParamLocation.AVERAGE, ) pb.defParam( "timeToLimit", units=units.DAYS, description="Time unit block violates its burnup limit.", ) pb.defParam( "zbottom", units=units.CM, description="Axial position of the bottom of this block", categories=[parameters.Category.retainOnReplacement], ) pb.defParam( "ztop", units=units.CM, description="Axial position of the top of this block", categories=[parameters.Category.retainOnReplacement], ) pb.defParam( "nHMAtBOL", units=f"atoms/(bn*{units.CM})", description="Ndens of heavy metal at BOL", saveToDB=False, ) pb.defParam( "z", units=units.CM, description="Center axial dimension of this block", categories=[parameters.Category.retainOnReplacement], ) with pDefs.createBuilder() as pb: pb.defParam( "axialExpTargetComponent", units=units.UNITLESS, description=( "The name of the target component used for axial expansion and contraction of solid components." ), default="", saveToDB=True, ) pb.defParam( "topIndex", units=units.UNITLESS, description=( "the axial block index within its parent assembly (0 is bottom block). This index with regard to the " "mesh of the reference assembly so it does not increase by 1 for each block. It is used to keep the " "mesh of the assemblies uniform with axial expansion. See setBlockMesh, makeAxialSnapList", ), default=0, saveToDB=True, categories=[parameters.Category.retainOnReplacement], ) pb.defParam( "eqRegion", units=units.UNITLESS, description="Equilibrium shuffling region. Corresponds to how many full cycles fuel here has gone through.", default=0.0, ) pb.defParam( "id", units=units.UNITLESS, description="Inner diameter of the Block.", default=None, ) pb.defParam( "height", units=units.CM, description="the block height", default=None, categories=[parameters.Category.retainOnReplacement], ) def xsType(self, value): self._p_xsType = value self._p_xsTypeNum = crossSectionGroupManager.getXSTypeNumberFromLabel(value) xsTypeNumDef = parameters.ALL_DEFINITIONS["xsTypeNum"] xsTypeNumDef.assigned = parameters.SINCE_ANYTHING pb.defParam( "xsType", units=units.UNITLESS, description="The xs group letter of this block", default="A", setter=xsType, ) def xsTypeNum(self, value): self._p_xsTypeNum = value self._p_xsType = crossSectionGroupManager.getXSTypeLabelFromNumber(value) xsTypeDef = parameters.ALL_DEFINITIONS["xsType"] xsTypeDef.assigned = parameters.SINCE_ANYTHING pb.defParam( "xsTypeNum", units=units.UNITLESS, description="An integer representation of the cross section type, linked to xsType.", default=65, # NOTE: buGroupNum actually starts at 0 setter=xsTypeNum, ) pb.defParam( "type", units=units.UNITLESS, description="string name of the input block", default="defaultType", saveToDB=True, ) with pDefs.createBuilder(default=0.0) as pb: pb.defParam( "assemNum", units=units.UNITLESS, description="Index that refers, nominally, to the assemNum parameter of the containing " "Assembly object. This is stored on the Block to aid in visualizing shuffle patterns " "and the like, and should not be used within the code. These are not guaranteed to be " "consistent with the containing Assembly, so they should not be used as a reliable " "means to reconstruct the model.", categories=[parameters.Category.retainOnReplacement], ) pb.defParam( "breedRatio", units=units.UNITLESS, description="Breeding ratio", categories=["detailedAxialExpansion"], location=ParamLocation.AVERAGE, ) pb.defParam("buLimit", units=units.PERCENT_FIMA, description="Burnup limit") pb.defParam( "heightBOL", units=units.CM, description="As-fabricated height of this block (as input). Used in fuel performance. Should be constant.", location=ParamLocation.AVERAGE, categories=[parameters.Category.retainOnReplacement], ) pb.defParam( "intrinsicSource", units=units.UNITLESS, description="Intrinsic neutron source from spontaneous fissions before a decay period", location=ParamLocation.AVERAGE, ) pb.defParam( "kgFis", units=units.KG, description="Mass of fissile material in block", location=ParamLocation.VOLUME_INTEGRATED, ) pb.defParam( "kgHM", units=units.KG, description="Mass of heavy metal in block", location=ParamLocation.VOLUME_INTEGRATED, ) pb.defParam("nPins", units=units.UNITLESS, description="Number of pins") pb.defParam( "percentBuPeak", units=units.PERCENT_FIMA, description="Peak percentage of the initial heavy metal atoms that have been fissioned", location=ParamLocation.MAX, categories=["cumulative", "eq cumulative shift"], ) pb.defParam( "puFrac", units=units.UNITLESS, description="Current Pu number density relative to HM at BOL", location=ParamLocation.AVERAGE, ) pb.defParam( "smearDensity", units=units.UNITLESS, description=( "Smear density of fuel pins in this block. Defined as the ratio of fuel " "area to total space inside cladding." ), location=ParamLocation.AVERAGE, ) return pDefs ================================================ FILE: armi/reactor/blocks/__init__.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ARMI provides several different Block types for downstream users. The generic Block is meant to be a base class. And then ARMI provides different geometries that might be interesting or useful, such as hexagonal or cartesian blocks. ARMI encourages you to build your own subclass of an ARMI Block type, to simplify your reactor blueprints. """ # ruff: noqa: F401 from armi.reactor.blocks.block import PIN_COMPONENTS, Block from armi.reactor.blocks.cartesianBlock import CartesianBlock from armi.reactor.blocks.hexBlock import HexBlock from armi.reactor.blocks.thRZBlock import ThRZBlock ================================================ FILE: armi/reactor/blocks/block.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The generic Block base class. This is meant to be the basis of all Blocks you use in your modeling. ARMI encourages you to build your own subclass of an ARMI Block type, to simplify your reactor blueprints. Blocks are axial chunks of assemblies. They contain most of the state variables, including power, flux, and homogenized number densities. Blocks are further divided into components. """ import collections import copy import math from typing import ClassVar, Optional, Tuple, Type import numpy as np from armi import runLog from armi.bookkeeping import report from armi.nuclearDataIO import xsCollections from armi.reactor import ( blockParameters, components, composites, grids, parameters, ) from armi.reactor.components import basicShapes from armi.reactor.flags import Flags from armi.utils import densityTools, units from armi.utils.plotting import plotBlockFlux from armi.utils.units import TRACE_NUMBER_DENSITY PIN_COMPONENTS = [ Flags.CONTROL, Flags.PLENUM, Flags.SHIELD, Flags.FUEL, Flags.CLAD, Flags.PIN, Flags.WIRE, ] _PitchDefiningComponent = Optional[Tuple[Type[components.Component], ...]] class Block(composites.Composite): """ An axial slice of an assembly. Blocks are Composite objects with extra parameter bindings, and utility methods that let them play nicely with their containing Assembly. """ uniqID = 0 # dimension used to determine which component defines the block's pitch PITCH_DIMENSION = "op" # component type that can be considered a candidate for providing pitch PITCH_COMPONENT_TYPE: ClassVar[_PitchDefiningComponent] = None pDefs = blockParameters.getBlockParameterDefinitions() def __init__(self, name: str, height: float = 1.0): """ Builds a new ARMI block. name : str The name of this block height : float, optional The height of the block in cm. Defaults to 1.0 so that ``getVolume`` assumes unit height. """ composites.Composite.__init__(self, name) self.p.height = height self.p.heightBOL = height self.p.orientation = np.array((0.0, 0.0, 0.0)) self.points = [] self.macros = None # flag to indicated when DerivedShape children must be updated. self.derivedMustUpdate = False # which component to use to determine block pitch, along with its 'op' self._pitchDefiningComponent = (None, 0.0) # Manually set some parameters at BOL for problemParam in ["THcornTemp", "THedgeTemp"]: self.p[problemParam] = [] def __repr__(self): # be warned, changing this might break unit tests on input file generations return "<{type} {name} at {loc} XS: {xs} ENV GP: {env}>".format( type=self.getType(), name=self.getName(), xs=self.p.xsType, env=self.p.envGroup, loc=self.getLocation(), ) def __deepcopy__(self, memo): """ Custom deepcopy behavior to prevent duplication of macros and _lumpedFissionProducts. We detach the recursive links to the parent and the reactor to prevent blocks carrying large independent copies of stale reactors in memory. If you make a new block, you must add it to an assembly and a reactor. """ # add self to memo to prevent child objects from duplicating the parent block memo[id(self)] = b = self.__class__.__new__(self.__class__) # use __getstate__ and __setstate__ pickle-methods to initialize state = self.__getstate__() # __getstate__ removes parent del state["macros"] del state["_lumpedFissionProducts"] b.__setstate__(copy.deepcopy(state, memo)) # assign macros and LFP b.macros = self.macros b._lumpedFissionProducts = self._lumpedFissionProducts return b def createHomogenizedCopy(self, pinSpatialLocators=False): """ Create a copy of a block. Notes ----- Used to implement a copy function for specific block types that can be much faster than a deepcopy by glossing over details that may be unnecessary in certain contexts. This base class implementation is just a deepcopy of the block, in full detail (not homogenized). """ return copy.deepcopy(self) @property def core(self): from armi.reactor.reactors import Core c = self.getAncestor(lambda c: isinstance(c, Core)) return c def makeName(self, assemNum, axialIndex): """ Generate a standard block from assembly number. This also sets the block-level assembly-num param. Once, we used a axial-character suffix to represent the axial index, but this is inherently limited so we switched to a numerical name. The axial suffix needs can be brought in to plugins that require them. Examples -------- >>> makeName(120, 5) 'B0120-005' """ self.p.assemNum = assemNum return "B{0:04d}-{1:03d}".format(assemNum, axialIndex) def getSmearDensity(self, cold=True): """ Compute the smear density of pins in this block. Smear density is the area of the fuel divided by the area of the space available for fuel inside the cladding. Other space filled with solid materials is not considered available. If all the area is fuel, it has 100% smear density. Lower smear density allows more room for swelling. Warning ------- This requires circular fuel and circular cladding. Designs that vary from this will be wrong. It may make sense in the future to put this somewhere a bit more design specific. Notes ----- This only considers circular objects. If you have a cladding that is not a circle, it will be ignored. Negative areas can exist for void gaps in the fuel pin. A negative area in a gap represents overlap area between two solid components. To account for this additional space within the pin cladding the abs(negativeArea) is added to the inner cladding area. Parameters ---------- cold : bool, optional If false, returns the smear density at hot temperatures Returns ------- float The smear density as a fraction. """ fuels = self.getComponents(Flags.FUEL) if not fuels: # smear density is not computed for non-fuel blocks return 0.0 elif not self.getNumPins(): # smear density is only defined for pinned blocks return 0.0 circles = self.getComponentsOfShape(components.Circle) if not circles: raise ValueError(f"Cannot get smear density of {self}. There are no circular components.") clads = set(self.getComponents(Flags.CLAD)).intersection(set(circles)) if not clads: raise ValueError(f"Cannot get smear density of {self}. There are no clad components.") # Compute component areas innerCladdingArea = sum( math.pi * clad.getDimension("id", cold=cold) ** 2 / 4.0 * clad.getDimension("mult") for clad in clads ) sortedClads = sorted(clads) sortedCompsInsideClad = self.getSortedComponentsInsideOfComponent(sortedClads.pop()) return self.computeSmearDensity(innerCladdingArea, sortedCompsInsideClad, cold) @staticmethod def computeSmearDensity(innerCladdingArea: float, sortedCompsInsideClad: list[components.Component], cold: bool): """Compute the smear density for a sorted list of components. Parameters ---------- innerCladdingArea : float Circular area inside the cladding. sortedCompsInsideClad : list A sorted list of Components inside the cladding. cold : bool If false, returns the smear density at hot temperatures Returns ------- float The smear density as a fraction. """ fuelComponentArea = 0.0 unmovableComponentArea = 0.0 negativeArea = 0.0 for c in sortedCompsInsideClad: componentArea = c.getArea(cold=cold) if c.isFuel(): fuelComponentArea += componentArea elif c.hasFlags(Flags.CLAD): # this is another component's clad; don't count it towards unmoveable area pass elif c.hasFlags([Flags.SLUG, Flags.DUMMY]): # this flag designates that this clad/slug combination isn't fuel and shouldn't be in the average pass else: if c.containsSolidMaterial(): unmovableComponentArea += componentArea elif c.containsVoidMaterial() and componentArea < 0.0: if cold: # will error out soon runLog.error( "{} with id {} and od {} has negative area at cold dimensions".format( c, c.getDimension("id", cold=True), c.getDimension("od", cold=True), ) ) negativeArea += abs(componentArea) if cold and negativeArea: raise ValueError( "Negative component areas found. Check the cold dimensions are properly aligned and no components " "overlap." ) innerCladdingArea += negativeArea # See note 2 of self.getSmearDensity totalMovableArea = innerCladdingArea - unmovableComponentArea if totalMovableArea <= 0.0: return 0.0 else: return fuelComponentArea / totalMovableArea def autoCreateSpatialGrids(self, systemSpatialGrid=None): """ Creates a spatialGrid for a Block. Blocks do not always have a spatialGrid from Blueprints, but some Blocks can have their spatialGrids inferred based on the multiplicity of their components. This would add the ability to create a spatialGrid for a Block and give its children the corresponding spatialLocators if certain conditions are met. Parameters ---------- systemSpatialGrid : Grid, optional Spatial Grid of the system-level parent of this Assembly that contains this Block. Raises ------ ValueError If the multiplicities of the block are not only 1 or N or if generated ringNumber leads to more positions than necessary. """ if self.spatialGrid is None: self.spatialGrid = systemSpatialGrid def assignPinIndices(self): pass def getMgFlux(self, adjoint=False, average=False, gamma=False): """ Returns the multigroup neutron flux in [n/cm^2/s]. The first entry is the first energy group (fastest neutrons). Each additional group is the next energy group, as set in the ISOTXS library. It is stored integrated over volume on self.p.mgFlux Parameters ---------- adjoint : bool, optional Return adjoint flux instead of real average : bool, optional If true, will return average flux between latest and previous. Doesn't work for pin detailed yet. gamma : bool, optional Whether to return the neutron flux or the gamma flux. Returns ------- flux : multigroup neutron flux in [n/cm^2/s] """ flux = composites.ArmiObject.getMgFlux(self, adjoint=adjoint, average=False, gamma=gamma) if average and np.any(self.p.lastMgFlux): volume = self.getVolume() lastFlux = self.p.lastMgFlux / volume flux = (flux + lastFlux) / 2.0 return flux def setPinMgFluxes(self, fluxes, adjoint=False, gamma=False): """ Store the pin-detailed multi-group neutron flux. Parameters ---------- fluxes : np.ndarray The block-level pin multigroup fluxes. ``fluxes[i, g]`` represents the flux in group g for pin ``i`` located at ``self.getPinLocations()[i]``. Flux units are the standard n/cm^2/s. adjoint : bool, optional Whether to set real or adjoint data. gamma : bool, optional Whether to set gamma or neutron data. """ if gamma: if adjoint: raise ValueError("Adjoint gamma flux is currently unsupported.") else: self.p.pinMgFluxesGamma = fluxes else: if adjoint: self.p.pinMgFluxesAdj = fluxes else: self.p.pinMgFluxes = fluxes def getMicroSuffix(self): """ Returns the microscopic library suffix (e.g. 'AB') for this block. DIF3D and MC2 are limited to 6 character nuclide labels. ARMI by convention uses the first 4 for nuclide name (e.g. U235, PU39, etc.) and then uses the 5th character for cross-section type and the 6th for burnup group. This allows a variety of XS sets to be built modeling substantially different blocks. Notes ----- The single-letter use for xsType and envGroup limit users to 52 groups of each. ARMI will allow 2-letter xsType designations if and only if the `envGroup` setting has length 1 (i.e. no burnup/temp groups are defined). This is useful for high-fidelity XS modeling. """ env = self.p.envGroup if not env: raise RuntimeError( "Cannot get MicroXS suffix because {0} in {1} does not have a environment(env) group".format( self, self.parent ) ) xsType = self.p.xsType if len(xsType) == 1: return xsType + env elif len(xsType) == 2 and ord(env) != ord("A"): # default is "A" so if we got an off default 2 char, there is no way to resolve. raise ValueError("Use of non-default env groups is not allowed with multi-character xs groups!") else: # ignore env group, multi Char XS type to support assigning 2 chars in blueprints return xsType def getHeight(self): """Return the block height.""" return self.p.height def setHeight(self, modifiedHeight, conserveMass=False, adjustList=None): """ Set a new height of the block. Parameters ---------- modifiedHeight : float The height of the block in cm conserveMass : bool, optional Conserve mass of nuclides in ``adjustList``. adjustList : list, optional Nuclides that will be conserved in conserving mass in the block. It is recommended to pass a list of all nuclides in the block. Notes ----- There is a coupling between block heights, the parent assembly axial mesh, and the ztop/zbottom/z params of the sibling blocks. When you set a height, all those things are invalidated. Thus, this method has to go through and update them via ``parent.calculateZCoords``. See Also -------- armi.reactor.reactors.Core.updateAxialMesh May need to be called after this. armi.reactor.assemblies.Assembly.calculateZCoords Recalculates z-coords, automatically called by this. """ originalHeight = self.getHeight() # get before modifying if modifiedHeight < 0.0: raise ValueError(f"Cannot set height of block {self} to height of {modifiedHeight} cm") self.p.height = modifiedHeight self.clearCache() if conserveMass: if originalHeight != modifiedHeight: if not adjustList: raise ValueError("Nuclides in ``adjustList`` must be provided to conserve mass.") self.adjustDensity(originalHeight / modifiedHeight, adjustList) if self.parent: self.parent.calculateZCoords() def getWettedPerimeter(self): raise NotImplementedError def getFlowAreaPerPin(self): """ Return the flowing coolant area of the block in cm^2, normalized to the number of pins in the block. NumPins looks for max number of fuel, clad, control, etc. See Also -------- armi.reactor.blocks.Block.getNumPins figures out numPins """ numPins = self.getNumPins() try: return self.getComponent(Flags.COOLANT, exact=True).getArea() / numPins except ZeroDivisionError: raise ZeroDivisionError( f"Block {self} has 0 pins (fuel, clad, control, shield, etc.). Thus, its flow area " "per pin is undefined." ) def getHydraulicDiameter(self): raise NotImplementedError def adjustUEnrich(self, newEnrich): """ Adjust U-235/U-238 mass ratio to a mass enrichment. Parameters ---------- newEnrich : float New U-235 enrichment in mass fraction Notes ----- completeInitialLoading must be run because adjusting the enrichment actually changes the mass slightly and you can get negative burnups, which you do not want. """ fuels = self.getChildrenWithFlags(Flags.FUEL) if fuels: for fuel in fuels: fuel.adjustMassEnrichment(newEnrich) else: # no fuel in this block tU = self.getNumberDensity("U235") + self.getNumberDensity("U238") if tU: self.setNumberDensity("U235", tU * newEnrich) self.setNumberDensity("U238", tU * (1.0 - newEnrich)) self.completeInitialLoading() def getLocation(self): """Return a string representation of the location. .. impl:: Location of a block is retrievable. :id: I_ARMI_BLOCK_POSI0 :implements: R_ARMI_BLOCK_POSI If the block does not have its ``core`` attribute set, if the block's parent does not have a ``spatialGrid`` attribute, or if the block does not have its location defined by its ``spatialLocator`` attribute, return a string indicating that it is outside of the core. Otherwise, use the :py:class:`~armi.reactor.grids.Grid.getLabel` static method to convert the block's indices into a string like "XXX-YYY-ZZZ". For hexagonal geometry, "XXX" is the zero-padded hexagonal core ring, "YYY" is the zero-padded position in that ring, and "ZZZ" is the zero-padded block axial index from the bottom of the core. """ if self.core and self.parent.spatialGrid and self.spatialLocator: return self.core.spatialGrid.getLabel(self.spatialLocator.getCompleteIndices()) else: return "ExCore" def coords(self): """ Returns the coordinates of the block. .. impl:: Coordinates of a block are queryable. :id: I_ARMI_BLOCK_POSI1 :implements: R_ARMI_BLOCK_POSI Calls to the :py:meth:`~armi.reactor.grids.locations.IndexLocation.getGlobalCoordinates` method of the block's ``spatialLocator`` attribute, which recursively calls itself on all parents of the block to get the coordinates of the block's centroid in 3D cartesian space. """ return self.spatialLocator.getGlobalCoordinates() def setBuLimitInfo(self): """Sets burnup limit based on igniter, feed, etc.""" if self.p.buRate == 0: # might be cycle 1 or a non-burning block self.p.timeToLimit = 0.0 else: timeLimit = (self.p.buLimit - self.p.percentBu) / self.p.buRate + self.p.residence self.p.timeToLimit = (timeLimit - self.p.residence) / units.DAYS_PER_YEAR def getMaxArea(self): raise NotImplementedError def getArea(self, cold=False): """ Return the area of a block for a full core or a 1/3 core model. Area is consistent with the area in the model, so if you have a central assembly in a 1/3 symmetric model, this will return 1/3 of the total area of the physical assembly. This way, if you take the sum of the areas in the core (or count the atoms in the core, etc.), you will have the proper number after multiplying by the model symmetry. Parameters ---------- cold : bool flag to indicate that cold (as input) dimensions are required Notes ----- This might not work for a 1/6 core model (due to symmetry line issues). Returns ------- area : float (cm^2) See Also -------- armi.reactor.blocks.Block.getMaxArea return the full area of the physical assembly disregarding model symmetry """ # this caching requires that you clear the cache every time you adjust anything including # temperature and dimensions. area = self._getCached("area") if area: return area a = 0.0 for c in self: myArea = c.getArea(cold=cold) a += myArea fullArea = a # correct the fullHexArea by the symmetry factor this factor determines if the hex has been # clipped by symmetry lines area = fullArea / self.getSymmetryFactor() self._setCache("area", area) return area def getVolume(self): """ Return the volume of a block. Returns ------- volume : float Block or component volume in cm^3 """ # use symmetryFactor in case the assembly is sitting on a boundary and needs to be cut in half, etc. vol = sum(c.getVolume() for c in self) return vol / self.getSymmetryFactor() def getSymmetryFactor(self): """ Return a scaling factor due to symmetry on the area of the block or its components. Takes into account assemblies that are bisected or trisected by symmetry lines In 1/3 symmetric cases, the central assembly is 1/3 a full area. If edge assemblies are included in a model, the symmetry factor along both edges for overhanging assemblies should be 2.0. However, ARMI runs in most scenarios with those assemblies on the 120-edge removed, so the symmetry factor should generally be just 1.0. See Also -------- armi.reactor.converters.geometryConverter.EdgeAssemblyChanger.scaleParamsRelatedToSymmetry """ return 1.0 def adjustDensity(self, frac, adjustList, returnMass=False): """ Adjusts the total density of each nuclide in adjustList by frac. Parameters ---------- frac : float The fraction of the current density that will remain after this operation adjustList : list List of nuclide names that will be adjusted. returnMass : bool If true, will return mass difference. Returns ------- mass : float Mass difference in grams. If you subtract mass, mass will be negative. If returnMass is False (default), this will always be zero. """ self._updateDetailedNdens(frac, adjustList) mass = 0.0 if returnMass: # do this with a flag to enable faster operation when mass is not needed. volume = self.getVolume() numDensities = self.getNuclideNumberDensities(adjustList) for nuclideName, dens in zip(adjustList, numDensities): if not dens: # don't modify zeros. continue newDens = dens * frac # add a little so components remember self.setNumberDensity(nuclideName, newDens + TRACE_NUMBER_DENSITY) if returnMass: mass += densityTools.getMassInGrams(nuclideName, volume, newDens - dens) return mass def _updateDetailedNdens(self, frac, adjustList): """ Update detailed number density which is used by hi-fi depleters such as ORIGEN. Notes ----- This will perturb all number densities so it is assumed that if one of the active densities is perturbed, all of htem are perturbed. """ if self.p.detailedNDens is None: # BOL assems get expanded to a reference so the first check is needed so it won't call # .blueprints on None since BOL assems don't have a core/r return if any(nuc in self.core.r.blueprints.activeNuclides for nuc in adjustList): self.p.detailedNDens *= frac # Other power densities do not need to be updated as they are calculated in the global # flux interface, which occurs after axial expansion on the interface stack. self.p.pdensDecay *= frac def completeInitialLoading(self, bolBlock=None): """ Does some BOL bookkeeping to track things like BOL HM density for burnup tracking. This should run after this block is loaded up at BOC (called from Reactor.initialLoading). The original purpose of this was to get the moles HM at BOC for the moles Pu/moles HM at BOL calculation. This also must be called after modifying something like the smear density or zr fraction in an optimization case. In ECPT cases, a BOL block must be passed or else the burnup will try to get based on a pre-burned value. Parameters ---------- bolBlock : Block, optional A BOL-state block of this block type, required for perturbed equilibrium cases. Must have the same enrichment as this block! Returns ------- hmDens : float The heavy metal number density of this block. See Also -------- Reactor.importGeom depletion._updateBlockParametersAfterDepletion """ if bolBlock is None: bolBlock = self hmDens = bolBlock.getHMDens() # total homogenized heavy metal number density self.p.nHMAtBOL = hmDens self.p.molesHmBOL = self.getHMMoles() self.p.puFrac = self.getPuMoles() / self.p.molesHmBOL if self.p.molesHmBOL > 0.0 else 0.0 try: # non-pinned reactors (or ones without cladding) will not use smear density self.p.smearDensity = self.getSmearDensity() except ValueError: pass self.p.enrichmentBOL = self.getFissileMassEnrich() massHmBOL = 0.0 for child in self: hmMass = child.getHMMass() massHmBOL += hmMass # Components have the following parameters but not every composite will massHmBOL, # molesHmBOL, puFrac, enrichmentBOL if isinstance(child, components.Component): child.p.massHmBOL = hmMass child.p.molesHmBOL = child.getHMMoles() if child.p.molesHmBOL: child.p.enrichmentBOL = child.getFissileMassEnrich() self.p.massHmBOL = massHmBOL return hmDens def setB10VolParam(self, heightHot): """ Set the b.p.initialB10ComponentVol param according to the volume of boron-10 containing components. Parameters ---------- heightHot : Boolean True if self.height() is cold height """ # exclude fuel components since they could have slight B10 impurity and # this metric is not relevant for fuel. b10Comps = [c for c in self if c.getNumberDensity("B10") and not c.isFuel()] if not b10Comps: return # get the highest density comp dont want to sum all because some comps might have very small # impurities of boron and adding this volume won't be conservative for captures per cc. b10Comp = sorted(b10Comps, key=lambda x: x.getNumberDensity("B10"))[-1] if len(b10Comps) > 1: runLog.warning( f"More than one boron10-containing component found in {self.name}. Only {b10Comp} " f"will be considered for calculation of initialB10ComponentVol Since adding " f"multiple volumes is not conservative for captures. All compos found {b10Comps}", single=True, ) if self.isFuel(): runLog.warning( f"{self.name} has both fuel and initial b10. b10 volume may not be conserved with axial expansion.", single=True, ) # calc volume of boron components coldArea = b10Comp.getArea(cold=True) coldFactor = b10Comp.getThermalExpansionFactor() if heightHot else 1 coldHeight = self.getHeight() / coldFactor self.p.initialB10ComponentVol = coldArea * coldHeight def replaceBlockWithBlock(self, bReplacement): """ Replace the current block with the replacementBlock. Typically used in the insertion of control rods. """ paramsToSkip = set(self.p.paramDefs.inCategory(parameters.Category.retainOnReplacement).names) tempBlock = copy.deepcopy(bReplacement) oldParams = self.p newParams = self.p = tempBlock.p for paramName in paramsToSkip: newParams[paramName] = oldParams[paramName] # update synchronization information self.p.assigned = parameters.SINCE_ANYTHING paramDefs = self.p.paramDefs for paramName in set(newParams.keys()) - paramsToSkip: paramDefs[paramName].assigned = parameters.SINCE_ANYTHING newComponents = tempBlock.getChildren() self.setChildren(newComponents) self.clearCache() @staticmethod def plotFlux(core, fName=None, bList=None, peak=False, adjoint=False, bList2=[]): """A simple pass-through method to a utils plotting function. This is here to preserve the API.""" plotBlockFlux(core, fName, bList, peak, adjoint, bList2) def _updatePitchComponent(self, c): """ Update the component that defines the pitch. Given a Component, compare it to the current component that defines the pitch of the Block. If bigger, replace it. We need different implementations of this to support different logic for determining the form of pitch and the concept of "larger". See Also -------- CartesianBlock._updatePitchComponent """ # Some block types don't have a clearly defined pitch (e.g. ThRZ) if self.PITCH_COMPONENT_TYPE is None: return if not isinstance(c, self.PITCH_COMPONENT_TYPE): return try: componentPitch = c.getDimension(self.PITCH_DIMENSION) except parameters.UnknownParameterError: # some components dont have the appropriate parameter return if componentPitch and (componentPitch > self._pitchDefiningComponent[1]): self._pitchDefiningComponent = (c, componentPitch) def add(self, c): composites.Composite.add(self, c) self.derivedMustUpdate = True self.clearCache() try: mult = int(c.getDimension("mult")) if self.p.percentBuByPin is None or len(self.p.percentBuByPin) < mult: # this may be a little wasteful, but we can fix it later... self.p.percentBuByPin = [0.0] * mult except AttributeError: # maybe adding a Composite of components rather than a single pass self._updatePitchComponent(c) def removeAll(self, recomputeAreaFractions=True): for c in list(self): self.remove(c, recomputeAreaFractions=False) if recomputeAreaFractions: # only do this once self.getVolumeFractions() def remove(self, c, recomputeAreaFractions=True): composites.Composite.remove(self, c) self.clearCache() if c is self._pitchDefiningComponent[0]: self._pitchDefiningComponent = (None, 0.0) pc = self.getLargestComponent(self.PITCH_DIMENSION) if pc is not None: self._updatePitchComponent(pc) if recomputeAreaFractions: self.getVolumeFractions() def getComponentsThatAreLinkedTo(self, comp, dim): """ Determine which dimensions of which components are linked to a specific dimension of a particular component. Useful for breaking fuel components up into individuals and making sure anything that was linked to the fuel mult (like the cladding mult) stays correct. Parameters ---------- comp : Component The component that the results are linked to dim : str The name of the dimension that the results are linked to Returns ------- linkedComps : list A list of (components,dimName) that are linked to this component, dim. """ linked = [] for c in self.iterComponents(): for dimName, val in c.p.items(): if c.dimensionIsLinked(dimName): requiredComponent = val[0] if requiredComponent is comp and val[1] == dim: linked.append((c, dimName)) return linked def getComponentsInLinkedOrder(self, componentList=None): """ Return a list of the components in order of their linked-dimension dependencies. Parameters ---------- components : list, optional A list of components to consider. If None, this block's components will be used. Notes ----- This means that components other components are linked to come first. """ if componentList is None: componentList = self.getComponents() cList = collections.deque(componentList) orderedComponents = [] # Loop through the components until there are none left. counter = 0 while cList: candidate = cList.popleft() # take first item in list cleared = True # innocent until proven guilty # loop through all dimensions in this component to determine its dependencies for dimName, val in candidate.p.items(): if candidate.dimensionIsLinked(dimName): # In linked dimensions, val = (component, dimName) requiredComponent = val[0] if requiredComponent not in orderedComponents: # this component depends on one that is not in the ordered list yet. # do not add it. cleared = False break # short circuit. One failed lookup is enough to flag this component as dirty. if cleared: # this candidate is free of dependencies and is ready to be added. orderedComponents.append(candidate) else: cList.append(candidate) counter += 1 if counter > 1000: cList.append(candidate) runLog.error( "The component {0} in {1} contains a dimension that is linked to another component, " " but the required component is not present in the block. They may also be other dependency fails. " "The component dims are {2}".format(cList[0], self, cList[0].p) ) raise RuntimeError("Cannot locate linked component.") return orderedComponents def getSortedComponentsInsideOfComponent(self, component): """ Returns a list of components inside of the given component sorted from innermost to outermost. Parameters ---------- component : object Component to look inside of. Notes ----- If you just want sorted components in this block, use ``sorted(self)``. This will never include any ``DerivedShape`` objects. Since they have a derived area they don't have a well- defined dimension. For now we just ignore them. If they are desired in the future some knowledge of their dimension will be required while they are being derived. """ sortedComponents = sorted(self) componentIndex = sortedComponents.index(component) sortedComponents = sortedComponents[:componentIndex] return sortedComponents def getNumPins(self): """Return the number of pins in this block. .. impl:: Get the number of pins in a block. :id: I_ARMI_BLOCK_NPINS :implements: R_ARMI_BLOCK_NPINS Uses some simple criteria to infer the number of pins in the block. For every flag in the module list :py:data:`~armi.reactor.blocks.PIN_COMPONENTS`, loop over all components of that type in the block. If the component is an instance of :py:class:`~armi.reactor.components.basicShapes.Circle`, add its multiplicity to a list, and sum that list over all components with each given flag. After looping over all possibilities, return the maximum value returned from the process above, or if no compatible components were found, return zero. """ nPins = [ sum( [ (int(c.getDimension("mult")) if isinstance(c, basicShapes.Circle) else 0) for c in self.iterComponents(compType) ] ) for compType in PIN_COMPONENTS ] return 0 if not nPins else max(nPins) def mergeWithBlock(self, otherBlock, fraction): """ Turns this block into a mixture of this block and some other block. Parameters ---------- otherBlock : Block The block to mix this block with. The other block will not be modified. fraction : float Fraction of the other block to mix in with this block. If 0.1 is passed in, this block will become 90% what it originally was and 10% what the other block is. Notes ----- This merges on a high level (using number densities). Components will not be merged. This is used e.g. for inserting a control block partially to get a very tight criticality control. In this case, a control block would be merged with a duct block. It is also used when a control rod is specified as a certain length but that length does not fit exactly into a full block. """ numDensities = self.getNumberDensities() otherBlockDensities = otherBlock.getNumberDensities() newDensities = {} # Make sure to hit all nuclides in union of blocks for nucName in set(numDensities.keys()).union(otherBlockDensities.keys()): newDensities[nucName] = (1.0 - fraction) * numDensities.get( nucName, 0.0 ) + fraction * otherBlockDensities.get(nucName, 0.0) self.setNumberDensities(newDensities) def getComponentAreaFrac(self, typeSpec): """ Returns the area fraction of the specified component(s) among all components in the block. Parameters ---------- typeSpec : Flags or list of Flags Component types to look up Examples -------- >>> b.getComponentAreaFrac(Flags.CLAD) 0.15 Returns ------- float The area fraction of the component. """ tFrac = sum(f for (c, f) in self.getVolumeFractions() if c.hasFlags(typeSpec)) if tFrac: return tFrac else: runLog.warning( f"No component {typeSpec} exists on {self}, so area fraction is zero.", single=True, label=f"{typeSpec} areaFrac is zero", ) return 0.0 def verifyBlockDims(self): """Optional dimension checking.""" return def getDim(self, typeSpec, dimName): """ Search through blocks in this assembly and find the first component of compName. Then, look on that component for dimName. Parameters ---------- typeSpec : Flags or list of Flags Component name, e.g. Flags.FUEL, Flags.CLAD, Flags.COOLANT, ... dimName : str Dimension name, e.g. 'od', ... Returns ------- dimVal : float The dimension in cm. Examples -------- >>> getDim(Flags.WIRE, "od") 0.01 """ for c in self: if c.hasFlags(typeSpec): return c.getDimension(dimName.lower()) raise ValueError(f"Cannot get Dimension because Flag not found: {typeSpec}") def getPinCenterFlatToFlat(self, cold=False): """Return the flat-to-flat distance between the centers of opposing pins in the outermost ring.""" raise NotImplementedError # no geometry can be assumed def getWireWrapCladGap(self, cold=False): """Return the gap between the wire wrap and the clad.""" clad = self.getComponent(Flags.CLAD) wire = self.getComponent(Flags.WIRE) wireOuterRadius = wire.getBoundingCircleOuterDiameter(cold=cold) / 2.0 wireInnerRadius = wireOuterRadius - wire.getDimension("od", cold=cold) cladOuterRadius = clad.getDimension("od", cold=cold) / 2.0 return wireInnerRadius - cladOuterRadius def getPlenumPin(self): """Return the plenum pin if it exists.""" for c in self.iterComponents(Flags.GAP): if self.isPlenumPin(c): return c return None def isPlenumPin(self, c): """Return True if the specified component is a plenum pin.""" # This assumes that anything with the GAP flag will have a valid 'id' dimension. cIsCenterGapGap = isinstance(c, components.Component) and c.hasFlags(Flags.GAP) and c.getDimension("id") == 0 return self.hasFlags([Flags.PLENUM, Flags.ACLP]) and cIsCenterGapGap def getPitch(self, returnComp=False): """ Return the center-to-center hex pitch of this block. Parameters ---------- returnComp : bool, optional If true, will return the component that has the maximum pitch as well Returns ------- pitch : float or None Hex pitch in cm, if well-defined. If there is no clear component for determining pitch, returns None component : Component or None Component that has the max pitch, if returnComp == True. If no component is found to define the pitch, returns None. Notes ----- The block stores a reference to the component that defines the pitch, making the assumption that while the dimensions can change, the component containing the largest dimension will not. This lets us skip the search for largest component. We still need to ask the largest component for its current dimension in case its temperature changed, or was otherwise modified. See Also -------- setPitch : sets pitch """ c, _p = self._pitchDefiningComponent if c is None: raise ValueError("{} has no valid pitch defining component".format(self)) # ask component for dimensions, since they could have changed due to temperature p = c.getPitchData() return (p, c) if returnComp else p def hasPinPitch(self): """Return True if the block has enough information to calculate pin pitch.""" return self.spatialGrid is not None def getPinPitch(self, cold=False): """ Return sub-block pitch in blocks. This assumes the spatial grid is defined by unit steps """ return self.spatialGrid.pitch def getDimensions(self, dimension): """Return dimensional values of the specified dimension.""" dimVals = set() for c in self: try: dimVal = c.getDimension(dimension) except parameters.ParameterError: continue if dimVal is not None: dimVals.add(dimVal) return dimVals def getLargestComponent(self, dimension): """ Find the component with the largest dimension of the specified type. Parameters ---------- dimension: str The name of the dimension to find the largest component of. Returns ------- largestComponent: armi.reactor.components.Component The component with the largest dimension of the specified type. """ maxDim = -float("inf") largestComponent = None for c in self: try: dimVal = c.getDimension(dimension) except parameters.ParameterError: continue if dimVal is not None and dimVal > maxDim: maxDim = dimVal largestComponent = c return largestComponent def setPitch(self, val, updateBolParams=False): """ Sets outer pitch to some new value. This sets the settingPitch and actually sets the dimension of the outer hexagon. During a load (importGeom), the setDimension doesn't usually do anything except set the setting See Issue 034 But during a actual case modification (e.g. in an optimization sweep, then the dimension has to be set as well. See Also -------- getPitch : gets the pitch """ c, _p = self._pitchDefiningComponent if c: c.setDimension("op", val) self._pitchDefiningComponent = (c, val) else: raise RuntimeError("No pitch-defining component on block {}".format(self)) if updateBolParams: self.completeInitialLoading() def getMfp(self, gamma=False): r""" Calculate the mean free path for neutron or gammas in this block. .. math:: <\Sigma> = \frac{\sum_E(\phi_e \Sigma_e dE)}{\sum_E (\phi_e dE)} = \frac{\sum_E(\phi_e N \sum_{\text{type}}(\sigma_e) dE}{\sum_E (\phi_e dE))} Block macro is the sum of macros of all nuclides. phi_g = flux*dE already in multigroup method. Returns ------- mfp, mfpAbs, diffusionLength : tuple(float, float float) """ lib = self.core.lib flux = self.getMgFlux(gamma=gamma) flux = [fi / max(flux) for fi in flux] mfpNumerator = np.zeros(len(flux)) absMfpNumerator = np.zeros(len(flux)) transportNumerator = np.zeros(len(flux)) numDensities = self.getNumberDensities() for nucName, nDen in numDensities.items(): nucMc = self.nuclideBases.byName[nucName].label + self.getMicroSuffix() if gamma: micros = lib[nucMc].gammaXS else: micros = lib[nucMc].micros total = micros.total[:, 0] # 0th order transport = micros.transport[:, 0] # 0th order, [bn] absorb = sum(micros.getAbsorptionXS()) mfpNumerator += nDen * total # [cm] absMfpNumerator += nDen * absorb transportNumerator += nDen * transport denom = sum(flux) mfp = 1.0 / (sum(mfpNumerator * flux) / denom) sigmaA = sum(absMfpNumerator * flux) / denom sigmaTr = sum(transportNumerator * flux) / denom diffusionCoeff = 1 / (3.0 * sigmaTr) mfpAbs = 1 / sigmaA diffusionLength = math.sqrt(diffusionCoeff / sigmaA) return mfp, mfpAbs, diffusionLength def setAreaFractionsReport(self): for c, frac in self.getVolumeFractions(): report.setData( c.getName(), ["{0:10f}".format(c.getArea()), "{0:10f}".format(frac)], report.BLOCK_AREA_FRACS, ) # return the group the information went to return report.ALL[report.BLOCK_AREA_FRACS] def getBlocks(self): """ This method returns all the block(s) included in this block its implemented so that methods could iterate over reactors, assemblies or single blocks without checking to see what the type of the reactor-family object is. """ return [self] def updateComponentDims(self): """ This method updates all the dimensions of the components. Notes ----- This is VERY useful for defining a ThRZ core out of differentialRadialSegements whose dimensions are connected together some of these dimensions are derivative and can be updated by changing dimensions in a Parameter Component or other linked components See Also -------- armi.reactor.components.DifferentialRadialSegment.updateDims armi.reactor.components.Parameters armi.physics.optimize.OptimizationInterface.modifyCase (look up 'ThRZReflectorThickness') """ for c in self.getComponentsInLinkedOrder(): try: c.updateDims() except NotImplementedError: runLog.warning("{0} has no updatedDims method -- skipping".format(c)) def getIntegratedMgFlux(self, adjoint=False, gamma=False): """ Return the volume integrated multigroup neutron tracklength in [n-cm/s]. The first entry is the first energy group (fastest neutrons). Each additional group is the next energy group, as set in the ISOTXS library. Parameters ---------- adjoint : bool, optional Return adjoint flux instead of real gamma : bool, optional Whether to return the neutron flux or the gamma flux. Returns ------- integratedFlux : np.ndarray multigroup neutron tracklength in [n-cm/s] """ if adjoint: if gamma: raise ValueError("Adjoint gamma flux is currently unsupported.") integratedFlux = self.p.adjMgFlux elif gamma: integratedFlux = self.p.mgFluxGamma else: integratedFlux = self.p.mgFlux return np.array(integratedFlux) def getLumpedFissionProductCollection(self): """ Get collection of LFP objects. Will work for global or block-level LFP models. Returns ------- lfps : LumpedFissionProduct lfpName keys , lfp object values See Also -------- armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct : LFP object """ return composites.ArmiObject.getLumpedFissionProductCollection(self) def rotate(self, rad): """Function for rotating a block's spatially varying variables by a specified angle (radians). Parameters ---------- rad: float Number (in radians) specifying the angle of counter clockwise rotation. """ raise NotImplementedError def setAxialExpTargetComp(self, targetComponent): """Sets the targetComponent for the axial expansion changer. .. impl:: Set the target axial expansion components on a given block. :id: I_ARMI_MANUAL_TARG_COMP :implements: R_ARMI_MANUAL_TARG_COMP Sets the ``axialExpTargetComponent`` parameter on the block to the name of the Component which is passed in. This is then used by the :py:class:`~armi.reactor.converters.axialExpansionChanger.AxialExpansionChanger` class during axial expansion. This method is typically called from within :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct` during the process of building a Block from the blueprints. Parameter --------- targetComponent: :py:class:`Component <armi.reactor.components.component.Component>` object Component specified to be target component for axial expansion changer """ self.p.axialExpTargetComponent = targetComponent.name def getPinLocations(self) -> list[grids.IndexLocation]: """Produce all the index locations for pins in the block. Returns ------- list[grids.IndexLocation] Integer locations where pins can be found in the block. Notes ----- Only components with ``Flags.CLAD`` are considered to define a pin's location. See Also -------- :meth:`getPinCoordinates` - companion for this method. """ items = [] for clad in self.iterChildrenWithFlags(Flags.CLAD): if isinstance(clad.spatialLocator, grids.MultiIndexLocation): items.extend(clad.spatialLocator) else: items.append(clad.spatialLocator) return items def getPinCoordinates(self) -> np.ndarray: """ Compute the local centroid coordinates of any pins in this block. The pins must have a CLAD-flagged component for this to work. Returns ------- localCoords : numpy.ndarray ``(N, 3)`` array of coordinates for pins locations. ``localCoords[i]`` contains a triplet of the x, y, z location for pin ``i``. Ordered according to how they are listed as children See Also -------- :meth:`getPinLocations` - companion for this method """ indices = self.getPinLocations() coords = [location.getLocalCoordinates() for location in indices] return np.array(coords) def getTotalEnergyGenerationConstants(self): """ Get the total energy generation group constants for a block. Gives the total energy generation rates when multiplied by the multigroup flux. Returns ------- totalEnergyGenConstant: np.ndarray Total (fission + capture) energy generation group constants (Joules/cm) """ return self.getFissionEnergyGenerationConstants() + self.getCaptureEnergyGenerationConstants() def getFissionEnergyGenerationConstants(self): """ Get the fission energy generation group constants for a block. Gives the fission energy generation rates when multiplied by the multigroup flux. Returns ------- fissionEnergyGenConstant: np.ndarray Energy generation group constants (Joules/cm) Raises ------ RuntimeError: Reports if a cross section library is not assigned to a reactor. """ if not self.core.lib: raise RuntimeError( "Cannot compute energy generation group constants without a library. Please ensure a library exists." ) return xsCollections.computeFissionEnergyGenerationConstants( self.getNumberDensities(), self.core.lib, self.getMicroSuffix() ) def getCaptureEnergyGenerationConstants(self): """ Get the capture energy generation group constants for a block. Gives the capture energy generation rates when multiplied by the multigroup flux. Returns ------- fissionEnergyGenConstant: np.ndarray Energy generation group constants (Joules/cm) Raises ------ RuntimeError: Reports if a cross section library is not assigned to a reactor. """ if not self.core.lib: raise RuntimeError( "Cannot compute energy generation group constants without a library. Please ensure a library exists." ) return xsCollections.computeCaptureEnergyGenerationConstants( self.getNumberDensities(), self.core.lib, self.getMicroSuffix() ) def getNeutronEnergyDepositionConstants(self): """ Get the neutron energy deposition group constants for a block. Returns ------- energyDepConstants: np.ndarray Neutron energy generation group constants (in Joules/cm) Raises ------ RuntimeError: Reports if a cross section library is not assigned to a reactor. """ if not self.core.lib: raise RuntimeError( "Cannot get neutron energy deposition group constants without " "a library. Please ensure a library exists." ) return xsCollections.computeNeutronEnergyDepositionConstants( self.getNumberDensities(), self.core.lib, self.getMicroSuffix() ) def getGammaEnergyDepositionConstants(self): """ Get the gamma energy deposition group constants for a block. Returns ------- energyDepConstants: np.ndarray Energy generation group constants (in Joules/cm) Raises ------ RuntimeError: Reports if a cross section library is not assigned to a reactor. """ if not self.core.lib: raise RuntimeError( "Cannot get gamma energy deposition group constants without a library. Please ensure a library exists." ) return xsCollections.computeGammaEnergyDepositionConstants( self.getNumberDensities(), self.core.lib, self.getMicroSuffix() ) def getBoronMassEnrich(self): """Return B-10 mass fraction.""" b10 = self.getMass("B10") b11 = self.getMass("B11") total = b11 + b10 if total == 0.0: return 0.0 return b10 / total def getUraniumMassEnrich(self): """Returns fissile mass fraction of uranium.""" totalU = self.getMass("U") if totalU < 1e-10: return 0.0 fissileU = self.getMass(["U233", "U235"]) return fissileU / totalU def getInputHeight(self) -> float: """Determine the input height from blueprints. Returns ------- float Height for this block pulled from the blueprints. Raises ------ AttributeError If no ancestor of this block contains the input blueprints. Blueprints are usually stored on the reactor object, which is typically an ancestor of the block (block -> assembly -> core -> reactor). However, this may be the case when creating blocks from scratch in testing where the entire composite tree may not exist. """ ancestorWithBp = self.getAncestor(lambda o: getattr(o, "blueprints", None) is not None) if ancestorWithBp is not None: bp = ancestorWithBp.blueprints assemDesign = bp.assemDesigns[self.parent.getType()] heights = assemDesign.height myIndex = self.parent.index(self) return heights[myIndex] raise AttributeError(f"No ancestor of {self} has blueprints") def sort(self): """Sort the children on this block. If there is a spatial grid, the previous pin indices on the components is now invalid because the ordering of :meth:`getPinLocations` has maybe changed since the ordering of components has changed. Reassign the pin indices via :meth:`assignPinIndices` accordingly. """ super().sort() if self.spatialGrid is not None: self.assignPinIndices() ================================================ FILE: armi/reactor/blocks/cartesianBlock.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Cartesian blocks can be square or more generically rectangular in cross section.""" import math from armi.reactor import components from armi.reactor.blocks.block import Block from armi.reactor.flags import Flags class CartesianBlock(Block): """ A Cartesian Block is a vertical slice of an Assembly which is laid out on a Cartesian grid. That is, a grid that is square or rectangular. A Cartesian grid can have an origin that is in the middle of a grid cell: +---------+--------+--------+ | | | | | (-1,1) | (0,1) | (1,1) | | | | | +---------+--------+--------+ | | | | | (-1,0) | (0,0) | (1,0) | | | | | +---------+--------+--------+ | | | | | (-1,-1) | (0,-1) | (1,-1) | | | | | +---------+--------+--------+ Or the grid cells can be aligned so the origin is between the grid cells: +---------+---------+--------+--------+ | | | | | | (-2,1) | (-1,1) | (0,1) | (1,1) | | | | | | +---------+---------+--------+--------+ | | | | | | (-2,0) | (-1,0) | (0,0) | (1,0) | | | | | | +---------+---------+--------+--------+ | | | | | | (-2,-1) | (-1,-1) | (0,-1) | (1,-1) | | | | | | +---------+---------+--------+--------+ | | | | | | (-2,-2) | (-1,-2) | (0,-2) | (1,-2) | | | | | | +---------+---------+--------+--------+ """ PITCH_DIMENSION = "widthOuter" PITCH_COMPONENT_TYPE = components.Rectangle def getMaxArea(self): """Get area of this block if it were totally full.""" xw, yw = self.getPitch() return xw * yw def setPitch(self, val, updateBolParams=False): raise NotImplementedError("Directly setting the pitch of a cartesian block is currently not supported.") def getSymmetryFactor(self): """Return a factor between 1 and N where 1/N is how much cut-off by symmetry lines this mesh cell is.""" if self.core is not None: indices = self.spatialLocator.getCompleteIndices() if self.core.symmetry.isThroughCenterAssembly: if indices[0] == 0 and indices[1] == 0: # central location return 4.0 elif indices[0] == 0 or indices[1] == 0: # edge location return 2.0 return 1.0 def getPinCenterFlatToFlat(self, cold=False): """Return the flat-to-flat distance between the centers of opposing pins (corner-2-corner) in the outer ring.""" clad = self.getComponent(Flags.CLAD) nRings = self.numRingsToHoldNumCells(clad.getDimension("mult")) pinPitch = self.getPinPitch(cold=cold) pinPitchDist = math.sqrt(pinPitch[0] ** 2 + pinPitch[1] ** 2) if self.core.symmetry.isThroughCenterAssembly: return 2 * (nRings - 1) * pinPitchDist else: return ((2 * nRings) - 1) * pinPitchDist def getNumCellsGivenRings(self, nRings: int): """Calculate the number of cells in a Cartesian grid with a given number of rings. The logic here is separated out into two scenarios: one for when the origin is inside the center grid cell and one where the origin is on the line between grid cells. """ if self.core.symmetry.isThroughCenterAssembly: return (2 * nRings - 1) ** 2 else: return (2 * nRings) ** 2 def numRingsToHoldNumCells(self, nCells: int): """Calculate the number of rings needed in a Cartesian grid to hold a given number of cells. The logic here is separated out into two scenarios: one for when the origin is inside the center grid cell and one where the origin is on the line between grid cells. """ if self.core.symmetry.isThroughCenterAssembly: return math.ceil((math.sqrt(nCells) + 1) / 2.0) else: return math.ceil(math.sqrt(nCells) / 2.0) ================================================ FILE: armi/reactor/blocks/hexBlock.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The HexBlock is a vertical slice of a hexagon-shaped assembly. This is a common geometry in reactor design.""" import copy import functools import math import operator from typing import Callable, ClassVar, Optional, Tuple, Type import numpy as np from armi import runLog from armi.physics.neutronics import GAMMA, NEUTRON from armi.reactor import components, geometry, grids from armi.reactor.blocks.block import Block from armi.reactor.components.basicShapes import Circle, Hexagon from armi.reactor.components.complexShapes import Helix from armi.reactor.flags import Flags from armi.reactor.parameters import ParamLocation from armi.utils import hexagon, iterables, units _PitchDefiningComponent = Optional[Tuple[Type[components.Component], ...]] class HexBlock(Block): """ Defines a Block shaped like a hexagon. .. impl:: ARMI has the ability to create hex shaped blocks. :id: I_ARMI_BLOCK_HEX :implements: R_ARMI_BLOCK_HEX This class defines hexagonal-shaped Blocks. It inherits functionality from the parent class, Block, and defines hexagonal-specific methods including, but not limited to, querying pin pitch, pin linear power densities, hydraulic diameter, and retrieving inner and outer pitch. """ PITCH_COMPONENT_TYPE: ClassVar[_PitchDefiningComponent] = (components.Hexagon,) def __init__(self, name, height=1.0): Block.__init__(self, name, height) def coords(self): """ Returns the coordinates of the block. .. impl:: Coordinates of a block are queryable. :id: I_ARMI_BLOCK_POSI2 :implements: R_ARMI_BLOCK_POSI Calls to the :py:meth:`~armi.reactor.grids.locations.IndexLocation.getGlobalCoordinates` method of the block's ``spatialLocator`` attribute, which recursively calls itself on all parents of the block to get the coordinates of the block's centroid in 3D cartesian space. Will additionally adjust the x and y coordinates based on the block parameters ``displacementX`` and ``displacementY``. """ x, y, _z = self.spatialLocator.getGlobalCoordinates() x += self.p.displacementX * 100.0 y += self.p.displacementY * 100.0 return ( round(x, units.FLOAT_DIMENSION_DECIMALS), round(y, units.FLOAT_DIMENSION_DECIMALS), ) def createHomogenizedCopy(self, pinSpatialLocators=False): """ Create a new homogenized copy of a block that is less expensive than a full deepcopy. .. impl:: Block compositions can be homogenized. :id: I_ARMI_BLOCK_HOMOG :implements: R_ARMI_BLOCK_HOMOG This method creates and returns a homogenized representation of itself in the form of a new Block. The homogenization occurs in the following manner. A single Hexagon Component is created and added to the new Block. This Hexagon Component is given the :py:class:`armi.materials.mixture._Mixture` material and a volume averaged temperature (``getAverageTempInC``). The number densities of the original Block are also stored on this new Component (:need:`I_ARMI_CMP_GET_NDENS`). Several parameters from the original block are copied onto the homogenized block (e.g., macros, lumped fission products, burnup group, number of pins, and spatial grid). Notes ----- This can be used to improve performance when a new copy of a reactor needs to be built, but the full detail of the block (including component geometry, material, number density, etc.) is not required for the targeted physics solver being applied to the new reactor model. The main use case is for the uniform mesh converter (UMC). Frequently, a deterministic neutronics solver will require a uniform mesh reactor, which is produced by the UMC. Many deterministic solvers for fast spectrum reactors will also treat the individual blocks as homogenized mixtures. Since the neutronics solver does not need to know about the geometric and material details of the individual child components within a block, we can save significant effort while building the uniform mesh reactor with the UMC by omitting this detailed data and only providing the necessary level of detail for the uniform mesh reactor: number densities on each block. Individual components within a block can have different temperatures, and this can affect cross sections. This temperature variation is captured by the lattice physics module. As long as temperature distribution is correctly captured during cross section generation, it does not need to be transferred to the neutronics solver directly through this copy operation. If you make a new block, you must add it to an assembly and a reactor. Returns ------- b : A homogenized block containing a single Hexagon Component that contains an average temperature and the number densities from the original block. See Also -------- armi.reactor.converters.uniformMesh.UniformMeshGeometryConverter.makeAssemWithUniformMesh """ b = self.__class__(self.getName(), height=self.getHeight()) b.setType(self.getType(), self.p.flags) # assign macros and LFP b.macros = self.macros b._lumpedFissionProducts = self._lumpedFissionProducts b.p.envGroup = self.p.envGroup hexComponent = Hexagon( "homogenizedHex", "_Mixture", self.getAverageTempInC(), self.getAverageTempInC(), self._pitchDefiningComponent[1], ) hexComponent.setNumberDensities(self.getNumberDensities()) b.add(hexComponent) b.p.nPins = self.p.nPins if pinSpatialLocators: # create a null component with cladding flags and spatialLocator from source block's # clad components in case pin locations need to be known for physics solver if self.hasComponents(Flags.CLAD): cladComponents = self.getComponents(Flags.CLAD) for i, clad in enumerate(cladComponents): pinComponent = Circle( f"voidPin{i}", "Void", self.getAverageTempInC(), self.getAverageTempInC(), 0.0, ) pinComponent.setType("pin", Flags.CLAD) pinComponent.spatialLocator = copy.deepcopy(clad.spatialLocator) if isinstance(pinComponent.spatialLocator, grids.MultiIndexLocation): for i1, i2 in zip(list(pinComponent.spatialLocator), list(clad.spatialLocator)): i1.associate(i2.grid) pinComponent.setDimension("mult", clad.getDimension("mult")) b.add(pinComponent) if self.spatialGrid is not None: b.spatialGrid = self.spatialGrid return b def getMaxArea(self): """Compute the max area of this block if it was totally full.""" pitch = self.getPitch() if not pitch: return 0.0 return hexagon.area(pitch) def getDuctIP(self): """Returns the duct IP dimension.""" duct = self.getComponent(Flags.DUCT, exact=True) return duct.getDimension("ip") def getDuctOP(self): """Returns the duct OP dimension.""" duct = self.getComponent(Flags.DUCT, exact=True) return duct.getDimension("op") def setPinPowers(self, powers, powerKeySuffix=""): """ Updates the pin linear power densities of this block. The linear densities are represented by the ``linPowByPin`` parameter. It is expected that the ordering of ``powers`` is consistent with :meth:`getPinLocations`. That helps ensure alignment with component-level look ups like :meth:`~armi.reactor.components.Circle.getPinIndices`. The ``linPowByPin`` parameter can be directly assigned to instead of using this method if the multiplicity of the pins in the block is equal to the number of pins in the block. Parameters ---------- powers : list of floats, required The block-level pin linear power densities. ``powers[i]`` represents the average linear power density of pin ``i`` location at ``self.getPinLocations()[i]``. The units of linear power density is watts/cm (i.e., watts produced per cm of pin length). powerKeySuffix: str, optional Must be either an empty string, :py:const:`NEUTRON <armi.physics.neutronics.const.NEUTRON>`, or :py:const:`GAMMA <armi.physics.neutronics.const.GAMMA>`. Defaults to empty string. """ numPins = self.getNumPins() if not numPins or numPins != len(powers): raise ValueError( f"Invalid power data for {self} with {numPins} pins. Got {len(powers)} entries in powers: {powers}" ) powerKey = f"linPowByPin{powerKeySuffix}" self.p[powerKey] = powers # If using the *powerKeySuffix* parameter, we also need to set total power, which is sum of neutron and gamma # powers. We assume that a solo gamma calculation to set total power does not make sense. if powerKeySuffix: if powerKeySuffix == GAMMA: if self.p[f"linPowByPin{NEUTRON}"] is None: msg = f"Neutron power has not been set yet. Cannot set total power for {self}." raise UnboundLocalError(msg) self.p.linPowByPin = self.p[f"linPowByPin{NEUTRON}"] + self.p[powerKey] else: self.p.linPowByPin = self.p[powerKey] def rotate(self, rad: float): """ Rotates a block's spatially varying parameters by a specified angle in the counter-clockwise direction. The parameters must have a ParamLocation of either CORNERS or EDGES and must be a Python list of length 6 in order to be eligible for rotation; all parameters that do not meet these two criteria are not rotated. .. impl:: Rotating a hex block updates parameters on the boundary, the orientation parameter, and the spatial coordinates on contained objects. :id: I_ARMI_ROTATE_HEX_BLOCK :implements: R_ARMI_ROTATE_HEX This method rotates a block on a hexagonal grid, conserving the 60-degree symmetry of the grid. It first determines how many rotations the block will undergo based on the 60-degree hex grid. Then it uses that "rotation number" to do a few things: reset the orientation parameter, rotate the children, and rotate the boundary parameters. It also sets the "displacement in X" and "displacement in Y" parameters. Parameters ---------- rad: float, required Angle of counter-clockwise rotation in units of radians. Rotations must be in 60-degree increments (i.e., PI/3, 2 * PI/3, PI, 4 * PI/3, 5 * PI/3, and 2 * PI). """ rotNum = round((rad % (2 * math.pi)) / math.radians(60)) self._rotateChildLocations(rad, rotNum) if self.p.orientation is None: self.p.orientation = np.array([0.0, 0.0, 0.0]) self.p.orientation[2] += rotNum * 60.0 self._rotateBoundaryParameters(rotNum) self._rotateDisplacement(rad) def _rotateChildLocations(self, radians: float, rotNum: int): """Update spatial locators for children.""" if self.spatialGrid is None: return locationRotator = functools.partial(self.spatialGrid.rotateIndex, rotations=rotNum) rotationMatrix = np.array([[math.cos(radians), -math.sin(radians)], [math.sin(radians), math.cos(radians)]]) for c in self: if isinstance(c.spatialLocator, grids.MultiIndexLocation): newLocations = list(map(locationRotator, c.spatialLocator)) c.spatialLocator = grids.MultiIndexLocation(self.spatialGrid) c.spatialLocator.extend(newLocations) elif isinstance(c.spatialLocator, grids.CoordinateLocation): oldCoords = c.spatialLocator.getLocalCoordinates() newXY = rotationMatrix.dot(oldCoords[:2]) newLocation = grids.CoordinateLocation(newXY[0], newXY[1], oldCoords[2], self.spatialGrid) c.spatialLocator = newLocation elif isinstance(c.spatialLocator, grids.IndexLocation): c.spatialLocator = locationRotator(c.spatialLocator) elif c.spatialLocator is not None: msg = f"{c} on {self} has an invalid spatial locator for rotation: {c.spatialLocator}" runLog.error(msg) raise TypeError(msg) def _rotateBoundaryParameters(self, rotNum: int): """Rotate any parameters defined on the corners or edge of bounding hexagon. Parameters ---------- rotNum : int Rotation number between zero and five, inclusive, specifying how many rotations have taken place. """ names = self.p.paramDefs.atLocation(ParamLocation.CORNERS).names names += self.p.paramDefs.atLocation(ParamLocation.EDGES).names for name in names: original = self.p[name] if isinstance(original, (list, np.ndarray)): if len(original) == 6: # Rotate by making the -rotNum item be first self.p[name] = iterables.pivot(original, -rotNum) elif len(original) == 0: # Hasn't been defined yet, no warning needed. pass else: msg = ( "No rotation method defined for spatial parameters that aren't defined " f"once per hex edge/corner. No rotation performed on {name}" ) runLog.warning(msg) elif isinstance(original, (int, float)): # this is a scalar and there shouldn't be any rotation. pass elif original is None: # param is not set yet. no rotations as well. pass else: raise TypeError( f"b.rotate() method received unexpected data type for {name} on block {self}\n" + f"expected list, np.ndarray, int, or float. received {original}" ) def _rotateDisplacement(self, rad: float): # This specifically uses the .get() functionality to avoid an error if this parameter does not exist. dispx = self.p.get("displacementX") dispy = self.p.get("displacementY") if (dispx is not None) and (dispy is not None): self.p.displacementX = dispx * math.cos(rad) - dispy * math.sin(rad) self.p.displacementY = dispx * math.sin(rad) + dispy * math.cos(rad) def verifyBlockDims(self): """Perform some checks on this type of block before it is assembled.""" try: wireComp = self.getComponent(Flags.WIRE, quiet=True) # Quiet because None case is checked for below ductComps = self.getComponents(Flags.DUCT) cladComp = self.getComponent(Flags.CLAD, quiet=True) # Quiet because None case is checked for below except ValueError: # there are probably more that one clad/wire, so we really dont know what this block looks like runLog.info(f"Block design {self} is too complicated to verify dimensions. Make sure they are correct!") return # check wire wrap in contact with clad if cladComp is not None and wireComp is not None: wwCladGap = self.getWireWrapCladGap(cold=True) if round(wwCladGap, 6) != 0.0: runLog.warning( "The gap between wire wrap and clad in block {} was {} cm. Expected 0.0.".format(self, wwCladGap), single=True, ) # check clad duct overlap pinToDuctGap = self.getPinToDuctGap(cold=True) # Allow for some tolerance; user input precision may lead to slight negative gaps if pinToDuctGap is not None and pinToDuctGap < -0.005: raise ValueError( "Gap between pins and duct is {0:.4f} cm in {1}. Make more room.".format(pinToDuctGap, self) ) elif pinToDuctGap is None: # only produce a warning if pin or clad are found, but not all of pin, clad and duct. We may need to tune # this logic a bit ductComp = next(iter(ductComps), None) if (cladComp is not None or wireComp is not None) and any( [c is None for c in (wireComp, cladComp, ductComp)] ): runLog.warning("Some component was missing in {} so pin-to-duct gap not calculated".format(self)) def getPinToDuctGap(self, cold=False): """ Returns the distance in cm between the outer most pin and the duct in a block. Parameters ---------- cold : boolean Determines whether the results should be cold or hot dimensions. Returns ------- pinToDuctGap : float Returns the diameteral gap between the outer most pins in a hex pack to the duct inner face to face in cm. """ wire = self.getComponent(Flags.WIRE, quiet=True) # Quiet because None case is checked for below ducts = sorted(self.getChildrenWithFlags(Flags.DUCT)) duct = None if any(ducts): duct = ducts[0] if not isinstance(duct, components.Hexagon): # getPinCenterFlatToFlat only works for hexes # inner most duct might be circle or some other shape duct = None elif isinstance(duct, components.HoledHexagon): # has no ip and is circular on inside so following # code will not work duct = None clad = self.getComponent(Flags.CLAD, quiet=True) # Quiet because None case is checked for below if any(c is None for c in (duct, wire, clad)): return None # NOTE: If nRings was a None, this could be for a non-hex packed fuel assembly see thermal hydraulic design # basis for description of equation pinCenterFlatToFlat = self.getPinCenterFlatToFlat(cold=cold) pinOuterFlatToFlat = ( pinCenterFlatToFlat + clad.getDimension("od", cold=cold) + 2.0 * wire.getDimension("od", cold=cold) ) ductMarginToContact = duct.getDimension("ip", cold=cold) - pinOuterFlatToFlat pinToDuctGap = ductMarginToContact / 2.0 return pinToDuctGap def getRotationNum(self) -> int: """Get index 0 through 5 indicating number of rotations counterclockwise around the z-axis.""" # assume rotation only in Z return np.rint(self.p.orientation[2] / 360.0 * 6) % 6 def setRotationNum(self, rotNum: int): """ Set orientation based on a number 0 through 5 indicating number of rotations counterclockwise around the z-axis. """ self.p.orientation[2] = 60.0 * rotNum def getSymmetryFactor(self): """ Return a factor between 1 and N where 1/N is how much cut-off by symmetry lines this mesh cell is. Reactor-level meshes have symmetry information so we have a reactor for this to work. That is why it is not implemented on the grid/locator level. When edge-assemblies are included on both edges (i.e. MCNP or DIF3D-FD 1/3-symmetric cases), the edge assemblies have symmetry factors of 2.0. Otherwise (DIF3D-nodal) there's a full assembly on the bottom edge (overhanging) and no assembly at the top edge so the ones at the bottom are considered full (symmetryFactor=1). If this block is not in any grid at all, then there can be no symmetry so return 1. """ try: symmetry = self.parent.spatialLocator.grid.symmetry except Exception: return 1.0 if symmetry.domain == geometry.DomainType.THIRD_CORE and symmetry.boundary == geometry.BoundaryType.PERIODIC: indices = self.spatialLocator.getCompleteIndices() if indices[0] == 0 and indices[1] == 0: # central location return 3.0 else: symmetryLine = self.core.spatialGrid.overlapsWhichSymmetryLine(indices) # Detect if upper edge assemblies are included. Doing this is the only way to know definitively whether # or not the edge assemblies are half-assems or full. Seeing the first one is the easiest way to detect # them. Check it last in the and statement so we don't waste time doing it. upperEdgeLoc = self.core.spatialGrid[-1, 2, 0] if symmetryLine in [ grids.BOUNDARY_0_DEGREES, grids.BOUNDARY_120_DEGREES, ] and bool(self.core.childrenByLocator.get(upperEdgeLoc)): return 2.0 return 1.0 def autoCreateSpatialGrids(self, systemSpatialGrid=None): """ Given a block without a spatialGrid, create a spatialGrid and give its children the corresponding spatialLocators (if it is a simple block). In this case, a simple block would be one that has either multiplicity of components equal to 1 or N but no other multiplicities. Also, this should only happen when N fits exactly into a given number of hex rings. Otherwise, do not create a grid for this block. Parameters ---------- systemSpatialGrid : Grid, optional Spatial Grid of the system-level parent of this Assembly that contains this Block. Notes ----- When a hex grid has another hex grid nested inside it, the nested grid has the opposite orientation (corners vs flats up). This method takes care of that. If components inside this block are multiplicity 1, they get a single locator at the center of the grid cell. If the multiplicity is greater than 1, all the components are added to a multiIndexLocation on the hex grid. Raises ------ ValueError If the multiplicities of the block are not only 1 or N or if generated ringNumber leads to more positions than necessary. """ # not necessary if self.spatialGrid is not None: return # Check multiplicities mults = {c.getDimension("mult") for c in self.iterComponents()} # Do some validation: Should we try to create a spatial grid? multz = {float(m) for m in mults} if len(multz) == 1 and 1.0 in multz: runLog.extra( f"Block {self.p.type} does not need a spatial grid: multiplicities are all 1.", single=True, ) return elif len(multz) != 2 or 1.0 not in multz: runLog.extra( f"Could not create a spatialGrid for block {self.p.type}, multiplicities are not {{1, N}} " f"they are {mults}", single=True, ) return # build the grid, from pitch and orientation if isinstance(systemSpatialGrid, grids.HexGrid): cornersUp = not systemSpatialGrid.cornersUp else: cornersUp = False grid = grids.HexGrid.fromPitch( self.getPinPitch(cold=True), numRings=0, armiObject=self, cornersUp=cornersUp, ) ringNumber = hexagon.numRingsToHoldNumCells(self.getNumPins()) numLocations = 0 for ring in range(ringNumber): numLocations = numLocations + hexagon.numPositionsInRing(ring + 1) if numLocations != self.getNumPins(): raise ValueError( "Cannot create spatialGrid, number of locations in rings {} not equal to pin number {}".format( numLocations, self.getNumPins() ) ) # set the spatial position of the sub-block components spatialLocators = grids.MultiIndexLocation(grid=grid) for ring in range(ringNumber): for pos in range(grid.getPositionsInRing(ring + 1)): i, j = grid.getIndicesFromRingAndPos(ring + 1, pos + 1) spatialLocators.append(grid[i, j, 0]) # finally, fill the spatial grid, and put the sub-block components on it if self.spatialGrid is None: self.spatialGrid = grid for c in self: if c.getDimension("mult") > 1: c.spatialLocator = spatialLocators elif c.getDimension("mult") == 1: c.spatialLocator = grids.CoordinateLocation(0.0, 0.0, 0.0, grid) def assignPinIndices(self): """Assign pin indices for pin components on the block.""" if self.spatialGrid is None: return locations = self.getPinLocations() if not locations: return # Clear out any previous values. If your block is built with one ordering # and then sorted, things that used to have pin indices may now have invalid # pin indices. Wipe them out just to be safe for c in self: c.p.pinIndices = None ijGetter = operator.attrgetter("i", "j") allIJ: tuple[tuple[int, int]] = tuple(map(ijGetter, locations)) # Flags for components that we want to set this parameter # Usually things are linked to one of these "important" flags, like # a cladding component having linked dimensions to a fuel component primaryFlags = (Flags.FUEL, Flags.CONTROL, Flags.SHIELD) withPinIndices: list[components.Component] = [] for c in self.iterChildrenWithFlags(primaryFlags): if self._setPinIndices(c, ijGetter, allIJ): withPinIndices.append(c) # Iterate over every other thing on the grid and make sure # 1) it share a lattice site with something that has pin indices, or # 2) it itself declares the pin indices for c in self: if c.p.pinIndices is not None: continue # Does anything with pin indices share this lattice site? if any(other.spatialLocator == c.spatialLocator for other in withPinIndices): continue if self._setPinIndices(c, ijGetter, allIJ): withPinIndices.append(c) @staticmethod def _setPinIndices( c: components.Component, ijGetter: Callable[[grids.IndexLocation], tuple[int, int]], allIJ: tuple[int, int] ): localLocations = c.spatialLocator if isinstance(localLocations, grids.MultiIndexLocation): localIJ = list(map(ijGetter, localLocations)) # CoordinateLocations do not live on the grid, by definition elif isinstance(localLocations, grids.CoordinateLocation): return False elif isinstance(localLocations, grids.IndexLocation): localIJ = [ijGetter(localLocations)] else: return False localIndices = list(map(allIJ.index, localIJ)) c.p.pinIndices = localIndices return True def getPinCenterFlatToFlat(self, cold=False): """Return the flat-to-flat distance between the centers of opposing pins in the outermost ring.""" clad = self.getComponent(Flags.CLAD) nRings = hexagon.numRingsToHoldNumCells(clad.getDimension("mult")) pinPitch = self.getPinPitch(cold=cold) pinCenterCornerToCorner = 2 * (nRings - 1) * pinPitch pinCenterFlatToFlat = math.sqrt(3.0) / 2.0 * pinCenterCornerToCorner return pinCenterFlatToFlat def hasPinPitch(self): """Return True if the block has enough information to calculate pin pitch.""" try: return (self.getComponent(Flags.CLAD, quiet=True) is not None) and ( self.getComponent(Flags.WIRE, quiet=True) is not None ) except ValueError: # not well defined pitch due to multiple pin and/or wire components return False def getPinPitch(self, cold=False): """ Get the pin pitch in cm. Assumes that the pin pitch is defined entirely by contacting cladding tubes and wire wraps. Grid spacers not yet supported. Parameters ---------- cold : boolean Determines whether the dimensions should be cold or hot Returns ------- pinPitch : float pin pitch in cm """ try: clad = self.getComponent(Flags.CLAD, quiet=True) # Quiet because None case is checked for below wire = self.getComponent(Flags.WIRE, quiet=True) # Quiet because None case is checked for below except ValueError: raise ValueError(f"Block {self} has multiple clad and wire components, so pin pitch is not well-defined.") if wire and clad: return clad.getDimension("od", cold=cold) + wire.getDimension("od", cold=cold) else: raise ValueError(f"Cannot get pin pitch in {self} because it does not have a wire and a clad") def getWettedPerimeter(self): """ Return the total wetted perimeter of the block in cm. Notes ----- Please be aware that this method is specific to Fast Reactors, and probably even Sodium Fast Reactors. This is obviously an awkward design choice, and we hope to improve upon it soon. """ # flags pertaining to hexagon components where the interior of the hexagon is wetted wettedHollowHexagonComponentFlags = ( Flags.DUCT, Flags.GRID_PLATE, Flags.INLET_NOZZLE, Flags.HANDLING_SOCKET, Flags.DUCT | Flags.DEPLETABLE, Flags.GRID_PLATE | Flags.DEPLETABLE, Flags.INLET_NOZZLE | Flags.DEPLETABLE, Flags.HANDLING_SOCKET | Flags.DEPLETABLE, ) # flags pertaining to circular pin components where the exterior of the circle is wetted wettedPinComponentFlags = ( Flags.CLAD, Flags.WIRE, ) # flags pertaining to components where both the interior and exterior are wetted wettedHollowComponentFlags = ( Flags.DUCT | Flags.INNER, Flags.DUCT | Flags.INNER | Flags.DEPLETABLE, ) # obtain all wetted components based on type wettedHollowHexagonComponents = [] for flag in wettedHollowHexagonComponentFlags: c = self.getComponent(flag, exact=True) wettedHollowHexagonComponents.append(c) if c else None wettedPinComponents = [] for flag in wettedPinComponentFlags: comps = self.getComponents(flag) wettedPinComponents.extend(comps) wettedHollowCircleComponents = [] wettedHollowHexComponents = [] for flag in wettedHollowComponentFlags: c = self.getComponent(flag, exact=True) if isinstance(c, Hexagon): wettedHollowHexComponents.append(c) if c else None else: wettedHollowCircleComponents.append(c) if c else None # calculate wetted perimeters according to their geometries # hollow hexagon = 6 * ip / sqrt(3) wettedHollowHexagonPerimeter = 0.0 for c in wettedHollowHexagonComponents: wettedHollowHexagonPerimeter += 6 * c.getDimension("ip") / math.sqrt(3) if c else 0.0 # solid circle = NumPins * pi * (Comp Diam + Wire Diam) wettedPinPerimeter = 0.0 for c in wettedPinComponents: correctionFactor = 1.0 if isinstance(c, Helix): # account for the helical wire wrap correctionFactor = np.hypot( 1.0, math.pi * c.getDimension("helixDiameter") / c.getDimension("axialPitch"), ) compWettedPerim = c.getDimension("od") * correctionFactor * c.getDimension("mult") * math.pi wettedPinPerimeter += compWettedPerim # hollow circle = (id + od) * pi wettedHollowCirclePerimeter = 0.0 for c in wettedHollowCircleComponents: wettedHollowCirclePerimeter += c.getDimension("id") + c.getDimension("od") if c else 0.0 wettedHollowCirclePerimeter *= math.pi # hollow hexagon = 6 * (ip + op) / sqrt(3) wettedHollowHexPerimeter = 0.0 for c in wettedHollowHexComponents: wettedHollowHexPerimeter += c.getDimension("ip") + c.getDimension("op") if c else 0.0 wettedHollowHexPerimeter *= 6 / math.sqrt(3) return ( wettedHollowHexagonPerimeter + wettedPinPerimeter + wettedHollowCirclePerimeter + wettedHollowHexPerimeter ) def getFlowArea(self): """Return the total flowing coolant area of the block in cm^2.""" area = self.getComponent(Flags.COOLANT, exact=True).getArea() for c in self.getComponents(Flags.INTERDUCTCOOLANT, exact=True): area += c.getArea() return area def getHydraulicDiameter(self): """ Return the hydraulic diameter in this block in cm. Hydraulic diameter is 4A/P where A is the flow area and P is the wetted perimeter. In a hex assembly, the wetted perimeter includes the cladding, the wire wrap, and the inside of the duct. The flow area is the inner area of the duct minus the area of the pins and the wire. """ return 4.0 * self.getFlowArea() / self.getWettedPerimeter() ================================================ FILE: armi/reactor/blocks/thRZBlock.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A simple base class to help define blocks in a Theta-R-Z geometry.""" from armi.reactor.blocks.block import Block class ThRZBlock(Block): def getMaxArea(self): """Return the area of the Theta-R-Z block if it was totally full.""" raise NotImplementedError("Cannot get max area of a TRZ block. Fully specify your geometry.") def radialInner(self): """Return a smallest radius of all the components.""" innerRadii = self.getDimensions("inner_radius") smallestInner = min(innerRadii) if innerRadii else None return smallestInner def radialOuter(self): """Return a largest radius of all the components.""" outerRadii = self.getDimensions("outer_radius") largestOuter = max(outerRadii) if outerRadii else None return largestOuter def thetaInner(self): """Return a smallest theta of all the components.""" innerTheta = self.getDimensions("inner_theta") smallestInner = min(innerTheta) if innerTheta else None return smallestInner def thetaOuter(self): """Return a largest theta of all the components.""" outerTheta = self.getDimensions("outer_theta") largestOuter = max(outerTheta) if outerTheta else None return largestOuter def axialInner(self): """Return the lower z-coordinate.""" return self.getDimensions("inner_axial") def axialOuter(self): """Return the upper z-coordinate.""" return self.getDimensions("outer_axial") def verifyBlockDims(self): """Perform dimension checks related to ThetaRZ blocks.""" return ================================================ FILE: armi/reactor/blueprints/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Blueprints describe the geometric and composition details of the objects in the reactor (e.g. fuel assemblies, control rods, etc.). Inputs captured within this blueprints module pertain to major design criteria like custom material properties or basic structures like the assemblies in use. This is essentially a wrapper for a yaml loader. The given yaml file is expected to rigidly adhere to given key:value pairings. See the :ref:`blueprints documentation <bp-input-file>` for more details. The file structure is expectation is:: nuclide flags: AM241: {burn: true, xs: true} ... custom isotopics: {} # optional blocks: name: component name: component dimensions ... assemblies: name: specifier: ABC blocks: [...] height: [...] axial mesh points: [...] xs types: [...] # optional myMaterialModification1: [...] myMaterialModification2: [...] # optionally extra settings (note this is probably going to be a removed feature) # hotChannelFactors: TWRPclad Examples -------- >>> design = blueprints.Blueprints.load(self.yamlString) >>> print(design.gridDesigns) Notes ----- The blueprints system was built to enable round trip translations between text representations of input and objects in the code. """ import copy import io import math import pathlib import traceback import typing import h5py import ordered_set import yamlize import yamlize.objects from ruamel.yaml import RoundTripLoader from armi import ( context, getPluginManager, getPluginManagerOrFail, migration, plugins, runLog, ) from armi.nucDirectory import nuclideBases from armi.physics.neutronics.settings import CONF_LOADING_FILE from armi.reactor import assemblies from armi.reactor.blueprints import isotopicOptions from armi.reactor.blueprints.assemblyBlueprint import AssemblyKeyedList from armi.reactor.blueprints.blockBlueprint import BlockKeyedList from armi.reactor.blueprints.componentBlueprint import ( ComponentGroups, ComponentKeyedList, ) from armi.reactor.blueprints.gridBlueprint import Grids, Triplet from armi.reactor.blueprints.reactorBlueprint import SystemBlueprint, Systems from armi.reactor.converters import axialExpansionChanger from armi.reactor.flags import Flags from armi.settings.fwSettings.globalSettings import ( CONF_ACCEPTABLE_BLOCK_AREA_ERROR, CONF_ASSEM_FLAGS_SKIP_AXIAL_EXP, CONF_DETAILED_AXIAL_EXPANSION, CONF_INPUT_HEIGHTS_HOT, CONF_NON_UNIFORM_ASSEM_FLAGS, ) from armi.utils import tabulate, textProcessors from armi.utils.customExceptions import InputError context.BLUEPRINTS_IMPORTED = True context.BLUEPRINTS_IMPORT_CONTEXT = "".join(traceback.format_stack()) def loadFromCs(cs, roundTrip=False): """Function to load Blueprints based on supplied ``Settings``.""" from armi.utils import directoryChangers with directoryChangers.DirectoryChanger(cs.inputDirectory, dumpOnException=False): bpPath = pathlib.Path(cs[CONF_LOADING_FILE]) if bpPath.suffix.lower() in (".h5", ".hdf5"): # This is a case settings from a database so the blueprints are also in the database. try: db = h5py.File(bpPath, "r") bpString = db["inputs/blueprints"].asstr()[()] stream = io.StringIO(bpString) stream = Blueprints.migrate(stream) bp = Blueprints.load(stream) except KeyError: # not all reactors need to be created from blueprints, so they may not exist bp = None else: with open(cs[CONF_LOADING_FILE], "r") as bpYaml: root = bpPath.parent.absolute() bpYaml = textProcessors.resolveMarkupInclusions(bpYaml, root) try: bp = Blueprints.load(bpYaml, roundTrip=roundTrip) except yamlize.yamlizing_error.YamlizingError as err: if "cross sections" in err.args[0]: runLog.error( "The loading file {} contains invalid `cross sections` input. " "Please run the `modify` entry point on this case to automatically convert." "".format(cs[CONF_LOADING_FILE]) ) raise return bp class _BlueprintsPluginCollector(yamlize.objects.ObjectType): """ Simple metaclass for adding yamlize.Attributes from plugins to Blueprints. This calls the defineBlueprintsSections() plugin hook to discover new class attributes to add before the yamlize code fires off to make the root yamlize.Object. Since yamlize.Object itself uses a metaclass to define the attributes to turn into yamlize.Attributes, these need to be folded in early. """ def __new__(mcs, name, bases, attrs): pm = getPluginManager() if pm is None: runLog.warning( "Blueprints were instantiated before the framework was " "configured with plugins. Blueprints cannot be imported before " "ARMI has been configured." ) else: pluginSections = pm.hook.defineBlueprintsSections() for plug in pluginSections: for attrName, section, resolver in plug: assert isinstance(section, yamlize.Attribute) if attrName in attrs: raise plugins.PluginError( "There is already a section called '{}' in the reactor blueprints".format(attrName) ) attrs[attrName] = section attrs["_resolveFunctions"].append(resolver) newType = yamlize.objects.ObjectType.__new__(mcs, name, bases, attrs) return newType class Blueprints(yamlize.Object, metaclass=_BlueprintsPluginCollector): """Base Blueprintsobject representing all the subsections in the input file.""" nuclideFlags = yamlize.Attribute(key="nuclide flags", type=isotopicOptions.NuclideFlags, default=None) customIsotopics = yamlize.Attribute(key="custom isotopics", type=isotopicOptions.CustomIsotopics, default=None) blockDesigns = yamlize.Attribute(key="blocks", type=BlockKeyedList, default=None) assemDesigns = yamlize.Attribute(key="assemblies", type=AssemblyKeyedList, default=None) systemDesigns = yamlize.Attribute(key="systems", type=Systems, default=None) gridDesigns = yamlize.Attribute(key="grids", type=Grids, default=None) componentDesigns = yamlize.Attribute(key="components", type=ComponentKeyedList, default=None) componentGroups = yamlize.Attribute(key="component groups", type=ComponentGroups, default=None) # These are used to set up new attributes that come from plugins. _resolveFunctions = [] def __new__(cls): # yamlizable does not call __init__, so attributes that are not defined above need to be # initialized here self = yamlize.Object.__new__(cls) self.assemblies = {} self._prepped = False self._assembliesBySpecifier = {} # Better for performance since these are used for lookups self.allNuclidesInProblem = ordered_set.OrderedSet() self.activeNuclides = ordered_set.OrderedSet() self.inertNuclides = ordered_set.OrderedSet() self.nucsToForceInXsGen = ordered_set.OrderedSet() self.elementsToExpand = [] return self def __init__(self): # Yamlize does not call __init__, instead we use Blueprints.load which creates and instance # of a Blueprints object and initializes it with valuesconstructAssemusing setattr. self._assembliesBySpecifier = {} self._prepped = False self.systemDesigns = Systems() self.assemDesigns = AssemblyKeyedList() self.blockDesigns = BlockKeyedList() self.assemblies = {} self.grids = Grids() self.elementsToExpand = [] def __repr__(self): return f"<{self.__class__.__name__} Assemblies:{len(self.assemDesigns)} Blocks:{len(self.blockDesigns)}>" def constructAssem(self, cs, name=None, specifier=None, orientation=0.0): """ Construct a new assembly instance from the assembly designs in this Blueprints object. Parameters ---------- cs : Settings Used to apply various modeling options when constructing an assembly. name : str (optional, and should be exclusive with specifier) Name of the assembly to construct. This should match the key that was used to define the assembly in the Blueprints YAML file. specifier : str (optional, and should be exclusive with name) Identifier of the assembly to construct. This should match the identifier that was used to define the assembly in the Blueprints YAML file. orientation : float (optional, is usually just zero) Rotate the Assembly at creation. Raises ------ ValueError If neither name nor specifier are passed Notes ----- There is some possibility for "compiling" the logic with closures to make constructing an assembly / block / component faster. At this point is is pretty much irrelevant because we are currently just deepcopying already constructed assemblies. Currently, this method is backward compatible with other code in ARMI and generates the `.assemblies` attribute (the BOL assemblies). Eventually, this should be removed. """ self._prepConstruction(cs) if name is not None: assem = self.assemblies[name] elif specifier is not None: assem = self._assembliesBySpecifier[specifier] else: raise ValueError("Must supply assembly name or specifier to construct") a = copy.deepcopy(assem) # since a deepcopy has the same assembly numbers and block id's, we need to make it unique a.makeUnique() if orientation: a.rotate(math.radians(orientation)) return a def _prepConstruction(self, cs): """ This method initializes a bunch of information within a Blueprints object such as assigning assembly and block type numbers, resolving the nuclides in the problem, and pre-populating assemblies. Ideally, it would not be necessary at all, but the ``cs`` currently contains a bunch of information necessary to create the applicable model. If it were possible, it would be terrific to override the Yamlizable.from_yaml method to run this code after the instance has been created, but we need additional information in order to build the assemblies that is not within the YAML file. This method should not be called directly, but it is used in testing. """ if not self._prepped: self._assignTypeNums() for func in self._resolveFunctions: func(self, cs) self._resolveNuclides(cs) self._assembliesBySpecifier.clear() self.assemblies.clear() for aDesign in self.assemDesigns: a = aDesign.construct(cs, self) self._assembliesBySpecifier[aDesign.specifier] = a self.assemblies[aDesign.name] = a runLog.header("=========== Verifying Assembly Configurations ===========") self._checkAssemblyAreaConsistency(cs) if not cs[CONF_DETAILED_AXIAL_EXPANSION]: # this is required to set up assemblies so they know how to snap to the reference # mesh. They won't know the mesh to conform to otherwise.... axialExpansionChanger.makeAssemsAbleToSnapToUniformMesh( self.assemblies.values(), cs[CONF_NON_UNIFORM_ASSEM_FLAGS] ) if not cs[CONF_INPUT_HEIGHTS_HOT]: runLog.header("=========== Axially expanding all assemblies from Tinput to Thot ===========") # expand axial heights from cold to hot so dims and masses are consistent with # specified component hot temperatures. assemsToSkip = [Flags.fromStringIgnoreErrors(t) for t in cs[CONF_ASSEM_FLAGS_SKIP_AXIAL_EXP]] assemsToExpand = list( a for a in list(self.assemblies.values()) if not any(a.hasFlags(f) for f in assemsToSkip) ) axialExpander = getPluginManagerOrFail().hook.getAxialExpansionChanger() if axialExpander is not None: axialExpander.expandColdDimsToHot( assemsToExpand, cs[CONF_DETAILED_AXIAL_EXPANSION], ) getPluginManagerOrFail().hook.afterConstructionOfAssemblies(assemblies=self.assemblies.values(), cs=cs) self._prepped = True def _assignTypeNums(self): if self.blockDesigns is None: # this happens when directly defining assemblies. self.blockDesigns = BlockKeyedList() for aDesign in self.assemDesigns: for bDesign in aDesign.blocks: if bDesign not in self.blockDesigns: self.blockDesigns.add(bDesign) def _resolveNuclides(self, cs): """ Process elements and determine how to expand them to natural isotopics. Also builds meta-data about which nuclides are in the problem. This system works by building a dictionary in the ``elementsToExpand`` attribute with ``Element`` keys and list of ``NuclideBase`` values. The actual expansion of elementals to isotopics occurs during :py:meth:`Component construction <armi.reactor.blueprints.componentBlueprint. ComponentBlueprint._constructMaterial>`. """ from armi import utils actives = set() inerts = set() nuclideFlags = self.nuclideFlags or isotopicOptions.genDefaultNucFlags() nucsToForceInXsGen = set() # just expanding flags now. ndense gets expanded in comp blueprints self.elementsToExpand = [] for nucFlag in nuclideFlags: # this returns any nuclides that are flagged specifically for expansion by input ( expandedElements, undefBurnChainActiveNuclides, ) = nucFlag.fileAsActiveOrInert( actives, inerts, ) self.elementsToExpand.extend(expandedElements) inerts -= actives self.customIsotopics = self.customIsotopics or isotopicOptions.CustomIsotopics() eleKeep, eleExpand = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs) # Flag all elementals for expansion unless they've been flagged otherwise by # user input or automatic lattice/datalib rules. for nucBase in nuclideBases.instances: isAlreadyIsotopic = not isinstance(nucBase, nuclideBases.NaturalNuclideBase) if isAlreadyIsotopic: # `elemental` may be a NaturalNuclideBase or a NuclideBase # skip all NuclideBases (isotopics) continue # we now know its an elemental elemental = nucBase if elemental in eleKeep: continue if elemental.name in actives: currentSet = actives elif elemental.name in inerts: currentSet = inerts else: # This was not specified in the nuclide flags at all as burn or xs. # If a material with this in its composition is brought in it's nice from a user # perspective to allow it. # But current behavior is that all nuclides in problem must be declared up front. continue self.elementsToExpand.append(elemental.element) if elemental.name in nuclideFlags and nuclideFlags[elemental.element.symbol].expandTo: # user-input expandTo has precedence newNuclides = [nuclideBases.byName[nn] for nn in nuclideFlags[elemental.element.symbol].expandTo] elif elemental in eleExpand and elemental.element.symbol in nuclideFlags: # code-specific expansion required based on code and ENDF newNuclides = eleExpand[elemental] # Overlay code details onto nuclideFlags for other parts of the code that use them. # Also, if this element is not in nuclideFlags at all, we just don't add it. nuclideFlags[elemental.element.symbol].expandTo = [nb.name for nb in newNuclides] else: # expand to all possible natural isotopics newNuclides = elemental.element.getNaturalIsotopics() # remove the elemental and add the isotopic currentSet.remove(elemental.name) for nb in newNuclides: currentSet.add(nb.name) # force everything asked for in xsGen nucsToForceInXsGen = ordered_set.OrderedSet(sorted(actives.union(inerts))) # add all detailed isotopes in ENDF if requested isotopicOptions.autoUpdateNuclideFlags(cs, nuclideFlags, inerts) self.nuclideFlags = nuclideFlags if self.elementsToExpand: runLog.info( "Will expand {} elementals to have natural isotopics".format( ", ".join(element.symbol for element in self.elementsToExpand) ) ) self.activeNuclides = ordered_set.OrderedSet(sorted(actives)) self.inertNuclides = ordered_set.OrderedSet(sorted(inerts)) self.allNuclidesInProblem = ordered_set.OrderedSet(sorted(actives.union(inerts))) self.nucsToForceInXsGen = ordered_set.OrderedSet(sorted(nucsToForceInXsGen)) # Inform user which nuclides are truncating the burn chain. if undefBurnChainActiveNuclides and nuclideBases.burnChainImposed: runLog.info( tabulate.tabulate( [ [ "Nuclides truncating the burn-chain:", utils.createFormattedStrWithDelimiter(list(undefBurnChainActiveNuclides)), ] ], tableFmt="plain", ), single=True, ) def _checkAssemblyAreaConsistency(self, cs): references = None for a in self.assemblies.values(): if references is None: references = (a, a.getArea()) continue assemblyArea = a.getArea() if isinstance(a, assemblies.RZAssembly): # R-Z assemblies by definition have different areas, so skip the check continue if abs(references[1] - assemblyArea) > 1e-9: runLog.error("REFERENCE COMPARISON ASSEMBLY:") references[0][0].printContents() runLog.error("CURRENT COMPARISON ASSEMBLY:") a[0].printContents() raise InputError( "Assembly {} has a different area {} than assembly {} {}. Check inputs for accuracy".format( a, assemblyArea, references[0], references[1] ) ) blockArea = a[0].getArea() for b in a[1:]: if abs(b.getArea() - blockArea) / blockArea > cs[CONF_ACCEPTABLE_BLOCK_AREA_ERROR]: runLog.error("REFERENCE COMPARISON BLOCK:") a[0].printContents(includeNuclides=False) runLog.error("CURRENT COMPARISON BLOCK:") b.printContents(includeNuclides=False) for c in b: runLog.error( "{0} area {1} effective area {2}".format(c, c.getArea(), c.getVolume() / b.getHeight()) ) raise InputError( "Block {} has a different area {} than block {} {}. Check inputs for accuracy".format( b, b.getArea(), a[0], blockArea ) ) @classmethod def migrate(cls, inp: typing.TextIO): """Given a stream representation of a blueprints file, migrate it. Parameters ---------- inp : typing.TextIO Input stream to migrate. """ for migI in migration.ACTIVE_MIGRATIONS: if issubclass(migI, migration.base.BlueprintsMigration): mig = migI(stream=inp) inp = mig.apply() return inp @classmethod def load(cls, stream, roundTrip=False): """This method is a wrapper around the `yamlize.Object.load()` method.""" # With the release of ruamel.yaml 0.19.1, we began getting the following error: # AttributeError: 'RoundTripLoader' object has no attribute 'max_depth' # Setting that attribute to `None` solved the issue. However, it would be prudent to rework blueprints loading # to side step the issue entirely. This occurs because of the way `yamlize` works when it calls # `get_single_node`. RoundTripLoader.max_depth = None return super().load(stream, Loader=RoundTripLoader) def addDefaultSFP(self): """Create a default SFP if it's not in the blueprints.""" if self.systemDesigns is not None: if not any(structure.typ == "sfp" for structure in self.systemDesigns): sfp = SystemBlueprint("Spent Fuel Pool", "sfp", Triplet()) sfp.typ = "sfp" self.systemDesigns["Spent Fuel Pool"] = sfp else: runLog.warning(f"Can't add default SFP to {self}, there are no systemDesigns!") def migrate(bp: Blueprints, cs): """ Apply migrations to the input structure. This is a good place to perform migrations that address changes to the system design description (settings, blueprints). We have access both here, so we can even move stuff between files. """ from armi.reactor.blueprints import gridBlueprint if bp.systemDesigns is None: bp.systemDesigns = Systems() if bp.gridDesigns is None: bp.gridDesigns = gridBlueprint.Grids() if "core" in [rd.name for rd in bp.gridDesigns]: raise ValueError("Cannot auto-create a 2nd `core` grid. Adjust input.") if "core" in [rd.name for rd in bp.systemDesigns]: raise ValueError("Cannot auto-create a 2nd `core` grid. Adjust input.") bp.systemDesigns["core"] = SystemBlueprint("core", "core", Triplet()) ================================================ FILE: armi/reactor/blueprints/assemblyBlueprint.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module defines the blueprints input object for assemblies. In addition to defining the input format, the ``AssemblyBlueprint`` class is responsible for constructing ``Assembly`` objects. An attempt has been made to decouple ``Assembly`` construction from the rest of ARMI as much as possible. For example, an assembly does not require a reactor to be constructed, or a geometry file (but uses contained Block geometry type as a surrogate). """ import yamlize from armi import getPluginManagerOrFail, runLog from armi.reactor import assemblies, grids, parameters from armi.reactor.blueprints import blockBlueprint from armi.reactor.flags import Flags from armi.settings.fwSettings.globalSettings import CONF_INPUT_HEIGHTS_HOT def _configureAssemblyTypes(): assemTypes = dict() pm = getPluginManagerOrFail() for pluginAssemTypes in pm.hook.defineAssemblyTypes(): for blockType, assemType in pluginAssemTypes: assemTypes[blockType] = assemType return assemTypes class Modifications(yamlize.Map): """ The names of material modifications and lists of the modification values for each block in the assembly. """ key_type = yamlize.Typed(str) value_type = yamlize.Sequence class ByComponentModifications(yamlize.Map): """The name of a component within the block and an associated Modifications object.""" key_type = yamlize.Typed(str) value_type = Modifications class MaterialModifications(yamlize.Map): """ A yamlize map for reading and holding material modifications. A user may specify material modifications directly as keys/values on this class, in which case these material modifications will be blanket applied to the entire block. If the user wishes to specify material modifications specific to a component within the block, they should use the `by component` attribute, specifying the keys/values underneath the name of a specific component in the block. .. impl:: User-impact on material definitions. :id: I_ARMI_MAT_USER_INPUT0 :implements: R_ARMI_MAT_USER_INPUT Defines a yaml map attribute for the assembly portion of the blueprints (see :py:class:`~armi.blueprints.assemblyBlueprint.AssemblyBlueprint`) that allows users to specify material attributes as lists corresponding to each axial block in the assembly. Two types of specifications can be made: 1. Key-value pairs can be specified directly, where the key is the name of the modification and the value is the list of block values. 2. The "by component" attribute can be used, in which case the user can specify material attributes that are specific to individual components in each block. This is enabled through the :py:class:`~armi.reactor.blueprints.assemblyBlueprint.ByComponentModifications` class, which basically just allows for one additional layer of attributes corresponding to the component names. These material attributes can be used during the resolution of material classes during core instantiation (see :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct` and :py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`). """ key_type = yamlize.Typed(str) value_type = yamlize.Sequence byComponent = yamlize.Attribute( key="by component", type=ByComponentModifications, default=ByComponentModifications(), ) class AssemblyBlueprint(yamlize.Object): """ A data container for holding information needed to construct an ARMI assembly. This class utilizes ``yamlize`` to enable serialization to and from the blueprints YAML file. .. impl:: Create assembly from blueprint file. :id: I_ARMI_BP_ASSEM :implements: R_ARMI_BP_ASSEM Defines a yaml construct that allows the user to specify attributes of an assembly from within their blueprints file, including a name, flags, specifier for use in defining a core map, a list of blocks, a list of block heights, a list of axial mesh points in each block, a list of cross section identifiers for each block, and material options (see :need:`I_ARMI_MAT_USER_INPUT0`). Relies on the underlying infrastructure from the ``yamlize`` package for reading from text files, serialization, and internal storage of the data. Is implemented as part of a blueprints file by being imported and used as an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints` class. Includes a ``construct`` method, which instantiates an instance of :py:class:`~armi.reactor.assemblies.Assembly` with the characteristics as specified in the blueprints. """ name = yamlize.Attribute(type=str) flags = yamlize.Attribute(type=str, default=None) specifier = yamlize.Attribute(type=str) blocks = yamlize.Attribute(type=blockBlueprint.BlockList) height = yamlize.Attribute(type=yamlize.FloatList) axialMeshPoints = yamlize.Attribute(key="axial mesh points", type=yamlize.IntList) radialMeshPoints = yamlize.Attribute(key="radial mesh points", type=int, default=None) azimuthalMeshPoints = yamlize.Attribute(key="azimuthal mesh points", type=int, default=None) materialModifications = yamlize.Attribute( key="material modifications", type=MaterialModifications, default=MaterialModifications(), ) xsTypes = yamlize.Attribute(key="xs types", type=yamlize.StrList) # note: yamlizable does not call an __init__ method, instead it uses __new__ and setattr _assemTypes = _configureAssemblyTypes() @classmethod def getAssemClass(cls, blocks): """ Get the ARMI ``Assembly`` class for the specified blocks. Parameters ---------- blocks : list of Blocks Blocks for which to determine appropriate containing Assembly type """ blockClasses = {b.__class__ for b in blocks} for bType, aType in cls._assemTypes.items(): if bType in blockClasses: return aType raise ValueError('Unsupported block geometries in {}: "{}"'.format(cls.name, blocks)) def construct(self, cs, blueprint): """ Construct an instance of this specific assembly blueprint. Parameters ---------- cs : Settings Settings object which containing relevant modeling options. blueprint : Blueprint Root blueprint object containing relevant modeling options. """ runLog.info("Constructing assembly `{}`".format(self.name)) self._checkParamConsistency() a = self._constructAssembly(cs, blueprint) a.calculateZCoords() return a def _constructAssembly(self, cs, blueprint): """Construct the current assembly.""" blocks = [] for axialIndex, bDesign in enumerate(self.blocks): b = self._createBlock(cs, blueprint, bDesign, axialIndex) blocks.append(b) assemblyClass = self.getAssemClass(blocks) a = assemblyClass(self.name) flags = None if self.flags is not None: flags = Flags.fromString(self.flags) a.p.flags = flags # set a basic grid with the right number of blocks with bounds to be adjusted. a.spatialGrid = grids.AxialGrid.fromNCells(len(blocks)) a.spatialGrid.armiObject = a # init submeshes radMeshPoints = self.radialMeshPoints or 1 a.p.RadMesh = radMeshPoints aziMeshPoints = self.azimuthalMeshPoints or 1 a.p.AziMesh = aziMeshPoints # Loop a second time because we needed all the blocks before choosing the assembly class. for axialIndex, b in enumerate(blocks): b.name = b.makeName(a.p.assemNum, axialIndex) a.add(b) # Assign values for the parameters if they are defined on the blueprints for paramDef in a.p.paramDefs.inCategory(parameters.Category.assignInBlueprints): val = getattr(self, paramDef.name) if val is not None: a.p[paramDef.name] = val return a @staticmethod def _shouldMaterialModiferBeApplied(value) -> bool: """Determine if a material modifier entry is applicable. Two exceptions: 1. Modifiers that are empty strings are not applied. 2. Modifiers that are ``None`` are not applied Parameters ---------- value : object Entry in a material modifications array Returns ------- bool: Result of the check """ return bool(value != "" and value is not None) def _createBlock(self, cs, blueprint, bDesign, axialIndex): """Create a block based on the block design and the axial index.""" meshPoints = self.axialMeshPoints[axialIndex] height = self.height[axialIndex] xsType = self.xsTypes[axialIndex] materialInput = {} for key, mod in { "byBlock": {**self.materialModifications}, **self.materialModifications.byComponent, }.items(): materialInput[key] = { modName: modList[axialIndex] for modName, modList in mod.items() if self._shouldMaterialModiferBeApplied(modList[axialIndex]) } b = bDesign.construct(cs, blueprint, axialIndex, meshPoints, height, xsType, materialInput) b.completeInitialLoading() # set b10 volume cc since its a cold dim param b.setB10VolParam(cs[CONF_INPUT_HEIGHTS_HOT]) return b def _checkParamConsistency(self) -> None: """Check that the number of block params specified is equal to the number of blocks specified.""" # general things to check paramsToCheck = { "mesh points": self.axialMeshPoints, "heights": self.height, "xs types": self.xsTypes, } # check by-block mat mods for modName, modList in self.materialModifications.items(): paramName = f"mat mod for {modName}" paramsToCheck[paramName] = modList # check by-component mat mods for comp in self.materialModifications.byComponent.values(): for modName, modList in comp.items(): paramName = f"material modifications for {modName}" paramsToCheck[paramName] = modList # perform the check for paramName, blockVals in paramsToCheck.items(): if len(self.blocks) != len(blockVals): msg = ( f"Assembly {self.name} had {len(self.blocks)} block(s), but {len(blockVals)} " f"'{paramName}'. These numbers should be equal. Check input for errors." ) runLog.error(msg) raise ValueError(msg) for paramDef in parameters.forType(assemblies.Assembly).inCategory(parameters.Category.assignInBlueprints): setattr( AssemblyBlueprint, paramDef.name, yamlize.Attribute(name=paramDef.name, default=None), ) class AssemblyKeyedList(yamlize.KeyedList): """ Effectively and OrderedDict of assembly items, keyed on the assembly name. This uses yamlize KeyedList for YAML serialization. """ item_type = AssemblyBlueprint key_attr = AssemblyBlueprint.name heights = yamlize.Attribute(type=yamlize.FloatList, default=None) axialMeshPoints = yamlize.Attribute(key="axial mesh points", type=yamlize.IntList, default=None) # NOTE: yamlize does not call an __init__ method, instead it uses __new__ and setattr @property def bySpecifier(self): """Used by the reactor to ``_loadComposites`` later, specifiers are two character strings.""" return {aDesign.specifier: aDesign for aDesign in self} ================================================ FILE: armi/reactor/blueprints/blockBlueprint.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module defines the ARMI input for a block definition, and code for constructing an ARMI ``Block``.""" import collections from inspect import signature from typing import Iterable, Iterator, Set import yamlize from armi import getPluginManagerOrFail, runLog from armi.materials.material import Material from armi.reactor import blocks, parameters from armi.reactor.blueprints import componentBlueprint from armi.reactor.components.component import Component from armi.reactor.composites import Composite from armi.reactor.converters import blockConverters from armi.reactor.flags import Flags from armi.settings.fwSettings.globalSettings import CONF_INPUT_HEIGHTS_HOT def _configureGeomOptions(): blockTypes = dict() pm = getPluginManagerOrFail() for pluginBlockTypes in pm.hook.defineBlockTypes(): for compType, blockType in pluginBlockTypes: blockTypes[compType] = blockType return blockTypes class BlockBlueprint(yamlize.KeyedList): """Input definition for Block. .. impl:: Create a Block from blueprint file. :id: I_ARMI_BP_BLOCK :implements: R_ARMI_BP_BLOCK Defines a yaml construct that allows the user to specify attributes of a block from within their blueprints file, including a name, flags, a radial grid to specify locations of pins, and the name of a component which drives the axial expansion of the block (see :py:mod:`~armi.reactor.converters.axialExpansionChanger`). In addition, the user may specify key-value pairs to specify the components contained within the block, where the keys are component names and the values are component blueprints (see :py:class:`~armi.reactor.blueprints.ComponentBlueprint.ComponentBlueprint`). Relies on the underlying infrastructure from the ``yamlize`` package for reading from text files, serialization, and internal storage of the data. Is implemented into a blueprints file by being imported and used as an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints` class. Includes a ``construct`` method, which instantiates an instance of :py:class:`~armi.reactor.blocks.Block` with the characteristics as specified in the blueprints. """ item_type = componentBlueprint.ComponentBlueprint key_attr = componentBlueprint.ComponentBlueprint.name name = yamlize.Attribute(key="name", type=str) gridName = yamlize.Attribute(key="grid name", type=str, default=None) flags = yamlize.Attribute(type=str, default=None) axialExpTargetComponent = yamlize.Attribute(key="axial expansion target component", type=str, default=None) _geomOptions = _configureGeomOptions() def _getBlockClass(self, outerComponent): """ Get the ARMI ``Block`` class for the specified outerComponent. Parameters ---------- outerComponent : Component Largest component in block. """ for compCls, blockCls in self._geomOptions.items(): if isinstance(outerComponent, compCls): return blockCls raise ValueError( "Block input for {} has outer component {} which is " " not a supported Block geometry subclass. Update geometry." "".format(self.name, outerComponent) ) def construct(self, cs, blueprint, axialIndex, axialMeshPoints, height, xsType, materialInput): """ Construct an ARMI ``Block`` to be placed in an ``Assembly``. Parameters ---------- cs : Settings Settings object for the appropriate simulation. blueprint : Blueprints Blueprints object containing various detailed information, such as nuclides to model axialIndex : int The Axial index this block exists within the parent assembly axialMeshPoints : int number of mesh points for use in the neutronics kernel height : float initial height of the block xsType : str String representing the xsType of this block. materialInput : dict Double-layered dict. Top layer groups the by-block material modifications under the `byBlock` key and the by-component material modifications under the component's name. The inner dict under each key contains material modification names and values. """ runLog.debug("Constructing block {}".format(self.name)) components = collections.OrderedDict() # build grid before components so you can load # the components into the grid. gridDesign = self._getGridDesign(blueprint) if gridDesign: spatialGrid = gridDesign.construct() else: spatialGrid = None self._checkByComponentMaterialInput(materialInput) allLatticeIds = set() for componentDesign in self: filteredMaterialInput, byComponentMatModKeys = self._filterMaterialInput(materialInput, componentDesign) c = componentDesign.construct( blueprint, filteredMaterialInput, cs[CONF_INPUT_HEIGHTS_HOT], ) components[c.name] = c # check that the mat mods for this component are valid options # this will only examine by-component mods, block mods are done later if isinstance(c, Component): # there are other things like composite groups that don't get # material modifications -- skip those validMatModOptions = self._getMaterialModsFromBlockChildren(c) for key in byComponentMatModKeys: if key not in validMatModOptions: raise ValueError(f"{c} in block {self.name} has invalid material modification: {key}") if spatialGrid: componentLocators = gridDesign.getMultiLocator(spatialGrid, componentDesign.latticeIDs) if componentLocators: # this component is defined in the block grid # We can infer the multiplicity from the grid. # Otherwise it's a component that is in a block # with grids but that's not in the grid itself. c.spatialLocator = componentLocators mult = c.getDimension("mult") if mult and mult != 1.0 and mult != len(c.spatialLocator): raise ValueError( f"For {c} in {self.name} there is a conflicting ``mult`` input ({mult}) " f"and number of lattice positions ({len(c.spatialLocator)}). " "Recommend leaving off ``mult`` input when using grids." ) elif not mult or mult == 1.0: # learn mult from grid definition c.setDimension("mult", len(c.spatialLocator)) idsInGrid = list(gridDesign.gridContents.values()) if componentDesign.latticeIDs: for latticeID in componentDesign.latticeIDs: allLatticeIds.add(str(latticeID)) # the user has given this component latticeIDs. check that # each of the ids appears in the grid, otherwise # their blueprints are probably wrong if len([i for i in idsInGrid if i == str(latticeID)]) == 0: raise ValueError( f"latticeID {latticeID} in block blueprint '{self.name}' is expected " "to be present in the associated block grid. " "Check that the component's latticeIDs align with the block's grid." ) # for every id in grid, confirm that at least one component had it if gridDesign: idsInGrid = list(gridDesign.gridContents.values()) for idInGrid in idsInGrid: if str(idInGrid) not in allLatticeIds: raise ValueError( f"ID {idInGrid} in grid {gridDesign.name} is not in any components of block {self.name}. " "All IDs in the grid must appear in at least one component." ) # check that the block level mat mods use valid options in the same way # as we did for the by-component mods above validMatModOptions = self._getBlockwiseMaterialModifierOptions(components.values()) if "byBlock" in materialInput: for key in materialInput["byBlock"]: if key not in validMatModOptions: raise ValueError(f"Block {self.name} has invalid material modification key: {key}") # Resolve linked dims after all components in the block are created for c in components.values(): c.resolveLinkedDims(components) boundingComp = sorted(components.values())[-1] # give a temporary name (will be updated by b.makeName as real blocks populate systems) b = self._getBlockClass(boundingComp)(name=f"block-bol-{axialIndex:03d}") for paramDef in b.p.paramDefs.inCategory(parameters.Category.assignInBlueprints): val = getattr(self, paramDef.name) if val is not None: b.p[paramDef.name] = val flags = None if self.flags is not None: flags = Flags.fromString(self.flags) b.setType(self.name, flags) if self.axialExpTargetComponent is not None: try: b.setAxialExpTargetComp(components[self.axialExpTargetComponent]) except KeyError as noMatchingComponent: raise RuntimeError( f"Block {b} --> axial expansion target component {self.axialExpTargetComponent} " "specified in the blueprints does not match any component names. " "Revise axial expansion target component in blueprints " "to match the name of a component and retry." ) from noMatchingComponent for c in components.values(): b.add(c) b.p.nPins = b.getNumPins() b.p.axMesh = _setBlueprintNumberOfAxialMeshes(axialMeshPoints, cs["axialMeshRefinementFactor"]) b.p.height = height b.p.heightBOL = height # for fuel performance b.p.xsType = xsType b.setBuLimitInfo() b = self._mergeComponents(b) b.verifyBlockDims() b.spatialGrid = spatialGrid return b def _getBlockwiseMaterialModifierOptions(self, children: Iterable[Composite]) -> Set[str]: """Collect all the material modifiers that exist on a block.""" validMatModOptions = set() for c in children: perChildModifiers = self._getMaterialModsFromBlockChildren(c) validMatModOptions.update(perChildModifiers) return validMatModOptions def _getMaterialModsFromBlockChildren(self, c: Composite) -> Set[str]: """Collect all the material modifiers from a child of a block.""" perChildModifiers = set() for material in self._getMaterialsInComposite(c): for materialParentClass in material.__class__.__mro__: # we must loop over parents as well, since applyInputParams # could call to Parent.applyInputParams() if issubclass(materialParentClass, Material): perChildModifiers.update(signature(materialParentClass.applyInputParams).parameters.keys()) # self is a parameter to methods, so it gets picked up here # but that's obviously not a real material modifier perChildModifiers.discard("self") return perChildModifiers def _getMaterialsInComposite(self, child: Composite) -> Iterator[Material]: """Collect all the materials in a composite.""" # Leaf node, no need to traverse further down if isinstance(child, Component): yield child.material return # Don't apply modifications to other things that could reside # in a block e.g., component groups def _checkByComponentMaterialInput(self, materialInput): for component in materialInput: if component != "byBlock": if component not in [componentDesign.name for componentDesign in self]: if materialInput[component]: # ensure it is not empty raise ValueError( f"The component '{component}' used to specify a by-component" f" material modification is not in block '{self.name}'." ) @staticmethod def _filterMaterialInput(materialInput, componentDesign): """ Get the by-block material modifications and those specifically for this component. If a material modification is specified both by-block and by-component for a given component, the by-component value will be used. """ filteredMaterialInput = {} byComponentMatModKeys = set() # first add the by-block modifications without question if "byBlock" in materialInput: for modName, modVal in materialInput["byBlock"].items(): filteredMaterialInput[modName] = modVal # then get the by-component modifications as appropriate for component, mod in materialInput.items(): if component == "byBlock": pass # we already added these else: # these are by-component mods, first test if the component matches # before adding. if component matches, add the modifications, # overwriting any by-block modifications of the same type if component == componentDesign.name: for modName, modVal in mod.items(): byComponentMatModKeys.add(modName) filteredMaterialInput[modName] = modVal return filteredMaterialInput, byComponentMatModKeys def _getGridDesign(self, blueprint): """ Get the appropriate grid design. This happens when a lattice input is provided on the block. Otherwise all components are ambiguously defined in the block. """ if self.gridName: if self.gridName not in blueprint.gridDesigns: raise KeyError( f"Lattice {self.gridName} defined on {self} is not defined in the blueprints `lattices` section." ) return blueprint.gridDesigns[self.gridName] return None @staticmethod def _mergeComponents(b): solventNamesToMergeInto = set(c.p.mergeWith for c in b.iterComponents() if c.p.mergeWith) if solventNamesToMergeInto: runLog.warning( "Component(s) {} in block {} has merged components inside it. The merge was valid at hot " "temperature, but the merged component only has the basic thermal expansion factors " "of the component(s) merged into. Expansion properties or dimensions of non hot " "temperature may not be representative of how the original components would have acted had " "they not been merged. It is recommended that merging happen right before " "a physics calculation using a block converter to avoid this." "".format(solventNamesToMergeInto, b.name), single=True, ) for solventName in solventNamesToMergeInto: soluteNames = [] for c in b: if c.p.mergeWith == solventName: soluteNames.append(c.name) converter = blockConverters.MultipleComponentMerger(b, soluteNames, solventName) b = converter.convert() return b for paramDef in parameters.forType(blocks.Block).inCategory(parameters.Category.assignInBlueprints): setattr( BlockBlueprint, paramDef.name, yamlize.Attribute(name=paramDef.name, default=None), ) def _setBlueprintNumberOfAxialMeshes(meshPoints, factor): """Set the blueprint number of axial mesh based on the axial mesh refinement factor.""" if factor <= 0: raise ValueError(f"A positive axial mesh refinement factor must be provided. A value of {factor} is invalid.") if factor != 1: runLog.important( "An axial mesh refinement factor of {} is applied to blueprint based on setting specification.".format( factor ), single=True, ) return int(meshPoints) * factor class BlockKeyedList(yamlize.KeyedList): """ An OrderedDict of BlockBlueprints keyed on the name. Utilizes yamlize for serialization to and from YAML. This is used within the ``blocks:`` main entry of the blueprints. """ item_type = BlockBlueprint key_attr = BlockBlueprint.name class BlockList(yamlize.Sequence): """ A list of BlockBlueprints keyed on the name. Utilizes yamlize for serialization to and from YAML. This is used to define the ``blocks:`` attribute of the assembly definitions. """ item_type = BlockBlueprint ================================================ FILE: armi/reactor/blueprints/componentBlueprint.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module defines the ARMI input for a component definition, and code for constructing an ARMI ``Component``. Special logic is required for handling component links. """ import yamlize from armi import materials, runLog from armi.nucDirectory import nuclideBases from armi.reactor import components, composites from armi.reactor.flags import Flags from armi.utils import densityTools COMPONENT_GROUP_SHAPE = "group" class ComponentDimension(yamlize.Object): """ Dummy object for ensuring well-formed component links are specified within the YAML input. This can be either a number (float or int), or a conformation string (``name.dimension``). """ def __init__(self, value): # note: yamlizable does not call an __init__ method, instead it uses __new__ and setattr self.value = value if isinstance(value, str): if not components.COMPONENT_LINK_REGEX.search(value): raise ValueError(f"Bad component link `{value}`, must be in form `name.dimension`") def __repr__(self): return f"<ComponentDimension value: {self.value}>" @classmethod def from_yaml(cls, loader, node, _rtd=None): """ Override the ``Yamlizable.from_yaml`` to inject custom interpretation of component dimension. This allows us to create a new object with either a string or numeric value. """ try: val = loader.construct_object(node) self = ComponentDimension(val) loader.constructed_objects[node] = self return self except ValueError as ve: raise yamlize.YamlizingError(str(ve), node) @classmethod def to_yaml(cls, dumper, self, _rtd=None): """ Override the ``Yamlizable.to_yaml`` to remove the object-like behavior, otherwise we'd end up with a ``{value: ...}`` dictionary. This allows someone to programmatically edit the component dimensions without using the ``ComponentDimension`` class. """ if not isinstance(self, cls): self = cls(self) node = dumper.represent_data(self.value) dumper.represented_objects[self] = node return node def __mul__(self, other): return self.value * other def __add__(self, other): return self.value + other def __div__(self, other): return self.value / other def __sub__(self, other): return self.value - other def __eq__(self, other): return self.value == other def __ne__(self, other): return self.value != other def __gt__(self, other): return self.value > other def __ge__(self, other): return self.value >= other def __lt__(self, other): return self.value < other def __le__(self, other): return self.value <= other def __hash__(self): return id(self) class ComponentBlueprint(yamlize.Object): """ This class defines the inputs necessary to build ARMI component objects. It uses ``yamlize`` to enable serialization to and from YAML. .. impl:: Construct component from blueprint file. :id: I_ARMI_BP_COMP :implements: R_ARMI_BP_COMP Defines a yaml construct that allows the user to specify attributes of a component from within their blueprints file, including a name, flags, shape, material and/or isotopic vector, input temperature, corresponding component dimensions, and ID for placement in a block lattice (see :py:class:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint`). Component dimensions that can be defined for a given component are dependent on the component's ``shape`` attribute, and the dimensions defining each shape can be found in the :py:mod:`~armi.reactor.components` module. Limited validation on the inputs is performed to ensure that the component shape corresponds to a valid shape defined by the ARMI application. Relies on the underlying infrastructure from the ``yamlize`` package for reading from text files, serialization, and internal storage of the data. Is implemented as part of a blueprints file by being imported and used as an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints` class. Can also be used within the :py:class:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint` class to enable specification of components directly within the "blocks" portion of the blueprint file. Includes a ``construct`` method, which instantiates an instance of :py:class:`~armi.reactor.components.component.Component` with the characteristics specified in the blueprints (see :need:`I_ARMI_MAT_USER_INPUT1`). """ name = yamlize.Attribute(type=str) flags = yamlize.Attribute(type=str, default=None) @name.validator def name(self, name): """Validate component names.""" if name == "cladding": # many users were mixing cladding and clad and it caused issues downstream where physics plugins checked for # clad. raise ValueError(f"Cannot set ComponentBlueprint.name to {name}. Prefer 'clad'.") shape = yamlize.Attribute(type=str) @shape.validator def shape(self, shape): normalizedShape = shape.strip().lower() if normalizedShape not in components.ComponentType.TYPES and normalizedShape != COMPONENT_GROUP_SHAPE: raise ValueError(f"Cannot set ComponentBlueprint.shape to unknown shape: {shape}") material = yamlize.Attribute(type=str, default=None) Tinput = yamlize.Attribute(type=float, default=None) Thot = yamlize.Attribute(type=float, default=None) isotopics = yamlize.Attribute(type=str, default=None) latticeIDs = yamlize.Attribute(type=list, default=None) origin = yamlize.Attribute(type=list, default=None) orientation = yamlize.Attribute(type=str, default=None) mergeWith = yamlize.Attribute(type=str, default=None) area = yamlize.Attribute(type=float, default=None) def construct(self, blueprint, matMods, inputHeightsConsideredHot): """Construct a component or group. .. impl:: User-defined on material alterations are applied here. :id: I_ARMI_MAT_USER_INPUT1 :implements: R_ARMI_MAT_USER_INPUT Allows for user input to impact a component's materials by applying the "material modifications" section of a blueprints file (see :need:`I_ARMI_MAT_USER_INPUT0`) to the material during construction. This takes place during lower calls to ``_conformKwargs()`` and subsequently ``_constructMaterial()``, which operate using the component blueprint and associated material modifications from the component's block. Within ``_constructMaterial()``, the material class is resolved into a material object by calling :py:func:`~armi.materials.resolveMaterialClassByName`. The ``applyInputParams()`` method of that material class is then called, passing in the associated material modifications data, which the material class can then use to modify the isotopics as necessary. Parameters ---------- blueprint : Blueprints Blueprints object containing various detailed information, such as nuclides to model matMods : dict Material modifications to apply to the component. inputHeightsConsideredHot : bool See the case setting of the same name. """ runLog.debug(f"Constructing component {self.name}") kwargs = self._conformKwargs(blueprint, matMods) shape = self.shape.lower().strip() if shape == COMPONENT_GROUP_SHAPE: group = blueprint.componentGroups[self.name] constructedObject = composites.Composite(self.name) for groupedComponent in group: componentDesign = blueprint.componentDesigns[groupedComponent.name] component = componentDesign.construct(blueprint, {}, inputHeightsConsideredHot) # override free component multiplicity if it's set based on the group definition component.setDimension("mult", groupedComponent.mult) _setComponentFlags(component, self.flags, blueprint) insertDepletableNuclideKeys(component, blueprint) constructedObject.add(component) else: constructedObject = components.factory(shape, [], kwargs) _setComponentFlags(constructedObject, self.flags, blueprint) insertDepletableNuclideKeys(constructedObject, blueprint) constructedObject.p.theoreticalDensityFrac = constructedObject.material.getTD() self._setComponentCustomDensity( constructedObject, blueprint, matMods, inputHeightsConsideredHot, ) if hasattr(constructedObject, "material") and "Custom" in str(constructedObject.material): if len(constructedObject.material.massFrac) == 0: msg = f"Custom material does not have isotopics: {self}" runLog.error(msg, single=True) raise IOError(msg) return constructedObject def _setComponentCustomDensity(self, comp, blueprint, matMods, inputHeightsConsideredHot): """Apply a custom density to a material with custom isotopics but not a 'custom material'.""" if self.isotopics is None: # No custom isotopics specified return densityFromCustomIsotopic = blueprint.customIsotopics[self.isotopics].density if densityFromCustomIsotopic is None: # Nothing to do return if densityFromCustomIsotopic <= 0: runLog.error( "A zero or negative density was specified in a custom isotopics input. This is not permitted, if a 0 " f"density material is needed, use 'Void'. The component is {comp} and the isotopics entry is " f"{self.isotopics}." ) raise ValueError("A zero or negative density was specified in the custom isotopics for a component") elif len(matMods): runLog.warning( f"Custom isotopics and material modifications have both been defined for {self.material} for component" f"{comp}. Please consider carefully if these are in conflict.", single=True, label=f"custom iso + mat mods {self.material} {comp}", ) mat = materials.resolveMaterialClassByName(self.material)() if not isinstance(mat, materials.Custom): # check for some problem cases overSpecs = [k for k in matMods if k.endswith("_frac")] if len(overSpecs): runLog.error( f"Both {overSpecs} and a custom isotopic with density {blueprint.customIsotopics[self.isotopics]} " f"have been specified for material {self.material}. This is an overspecification.", single=True, ) if not mat.density(Tc=self.Tinput) > 0: runLog.error( f"A custom density has been assigned to material '{self.material}', which has no baseline density. " "Only materials with a starting density may be assigned a density. This comes up e.g. if isotopics " "are assigned to 'Void'." ) raise ValueError("Cannot apply custom densities to materials without density.") # Apply a density scaling to account for the temperature change between Tinput and Thot if isinstance(mat, materials.Fluid): densityRatio = densityFromCustomIsotopic / mat.density(Tc=comp.inputTemperatureInC) else: # For solids we need to consider if the input heights are hot or cold, in order to get the density # correct. There may be a better place in the initialization to determine if the block height will be # interpreted as hot dimensions, which would allow us to not have to pass the case settings this far. dLL = mat.linearExpansionFactor(Tc=comp.temperatureInC, T0=comp.inputTemperatureInC) if inputHeightsConsideredHot: f = 1.0 / (1 + dLL) ** 2 else: f = 1.0 / (1 + dLL) ** 3 scaledDensity = comp.density() / f densityRatio = densityFromCustomIsotopic / scaledDensity comp.changeNDensByFactor(densityRatio) runLog.important( f"A custom material density was specified in the custom isotopics for non-custom material {mat}. The " f"component density has been altered to {comp.density()} at temperature {comp.temperatureInC} C", single=True, ) def _conformKwargs(self, blueprint, matMods): """This method gets the relevant kwargs to construct the component.""" kwargs = {"mergeWith": self.mergeWith or "", "isotopics": self.isotopics or ""} for attr in self.attributes: # yamlize magic val = attr.get_value(self) if attr.name == "shape" or val == attr.default: continue elif attr.name == "material": # value is a material instance value = self._constructMaterial(blueprint, matMods) elif attr.name == "latticeIDs": # Don't pass latticeIDs on to the component constructor. # They're applied during block construction. continue elif attr.name == "flags": # Don't pass these to the component constructor. These are used to # override the flags derived from the type, if present. continue else: value = attr.get_value(self) # Keep digging until the actual value is found. This is a bit of a hack to get around an issue in # yamlize/ComponentDimension where Dimensions can end up chained. while isinstance(value, ComponentDimension): value = value.value kwargs[attr.name] = value return kwargs def _constructMaterial(self, blueprint, matMods): nucsInProblem = blueprint.allNuclidesInProblem # make material with defaults mat = materials.resolveMaterialClassByName(self.material)() if self.isotopics is not None: # Apply custom isotopics before processing input mods so # the input mods have the final word blueprint.customIsotopics.apply(mat, self.isotopics) # add mass fraction custom isotopics info, since some material modifications need to see them e.g. in the base # Material.applyInputParams matMods.update({"customIsotopics": {k: v.massFracs for k, v in blueprint.customIsotopics.items()}}) if len(matMods) > 1: # don't apply if only customIsotopics is in there try: # update material with updated input params from blueprints file. mat.applyInputParams(**matMods) except TypeError as ee: errorMessage = ee.args[0] if "got an unexpected keyword argument" in errorMessage: # This component does not accept material modification inputs of the names passed in # Keep going since the modification could work for another component pass else: raise ValueError( f"Something went wrong in applying the material modifications {matMods} " f"to component {self.name}.\nError message is: \n{errorMessage}." ) expandElementals(mat, blueprint) missing = set(mat.massFrac.keys()).difference(nucsInProblem) if missing: raise ValueError( f"The nuclides {missing} are present in material {mat} by compositions, but are not specified in the " "`nuclide flags` section of the input file. They need to be added, or custom isotopics need to be " "applied." ) return mat def expandElementals(mat, blueprint): """ Expand elements to isotopics during material construction. Does so as required by modeling options or user input. See Also -------- armi.reactor.blueprints.Blueprints._resolveNuclides Sets the metadata defining this behavior. """ elementExpansionPairs = [] for elementToExpand in blueprint.elementsToExpand: if elementToExpand.symbol not in mat.massFrac: continue nucFlags = blueprint.nuclideFlags.get(elementToExpand.symbol) nuclidesToBecome = ( [nuclideBases.byName[nn] for nn in nucFlags.expandTo] if (nucFlags and nucFlags.expandTo) else None ) elementExpansionPairs.append((elementToExpand, nuclidesToBecome)) densityTools.expandElementalMassFracsToNuclides(mat.massFrac, elementExpansionPairs) def insertDepletableNuclideKeys(c, blueprint): """ Auto update number density keys on all DEPLETABLE components. .. impl:: Insert any depletable blueprint flags onto this component. :id: I_ARMI_BP_NUC_FLAGS0 :implements: R_ARMI_BP_NUC_FLAGS This is called during the component construction process for each component from within :py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`. For a given initialized component, check its flags to determine if it has been marked as depletable. If it is, use :py:func:`~armi.nucDirectory.nuclideBases.initReachableActiveNuclidesThroughBurnChain` to apply the user-specifications in the "nuclide flags" section of the blueprints to the Component such that all active isotopes and derivatives of those isotopes in the burn chain are initialized to have an entry in the component's ``nuclides`` array. Note that certain case settings, including ``fpModel`` and ``fpModelLibrary``, may trigger modifications to the active nuclides specified by the user in the "nuclide flags" section of the blueprints. Notes ----- This should be moved to a neutronics/depletion plugin hook but requires some refactoring in how active nuclides and reactors are initialized first. See Also -------- armi.physics.neutronics.isotopicDepletion.isotopicDepletionInterface.isDepletable : contains design docs describing the ``DEPLETABLE`` flagging situation """ if c.hasFlags(Flags.DEPLETABLE): # depletable components, whether auto-derived or explicitly flagged need expanded nucs ( c.p.nuclides, c.p.numberDensities, ) = nuclideBases.initReachableActiveNuclidesThroughBurnChain( c.p.nuclides, c.p.numberDensities, blueprint.activeNuclides, ) class ComponentKeyedList(yamlize.KeyedList): """ An OrderedDict of ComponentBlueprints keyed on the name. This is used within the ``components:`` main entry of the blueprints. This is *not* (yet) used when components are defined within a block blueprint. That is handled in the blockBlueprint construct method. """ item_type = ComponentBlueprint key_attr = ComponentBlueprint.name class GroupedComponent(yamlize.Object): """ A pointer to a component with a multiplicity to be used in a ComponentGroup. Multiplicity can be a fraction (e.g. to set volume fractions) """ name = yamlize.Attribute(type=str) mult = yamlize.Attribute(type=float) class ComponentGroup(yamlize.KeyedList): """ A single component group containing multiple GroupedComponents. Example ------- triso: kernel: mult: 0.7 buffer: mult: 0.3 """ group_name = yamlize.Attribute(type=str) key_attr = GroupedComponent.name item_type = GroupedComponent class ComponentGroups(yamlize.KeyedList): """ A list of component groups. This is used in the top-level blueprints file. """ key_attr = ComponentGroup.group_name item_type = ComponentGroup # This import-time magic requires all possible components be imported before this module imports. The intent was to make # registration basically automatic. This has proven to be quite problematic and will be replaced with an explicit # plugin-level component registration system. for dimName in set([kw for cType in components.ComponentType.TYPES.values() for kw in cType.DIMENSION_NAMES]): setattr( ComponentBlueprint, dimName, yamlize.Attribute(name=dimName, type=ComponentDimension, default=None), ) def _setComponentFlags(component, flags, blueprint): """Update component flags based on user input in blueprint.""" # The component __init__ calls setType(), which gives us our initial guess at what the flags should be. if flags is not None: # override the flags from __init__ with the ones from the blueprint component.p.flags = Flags.fromString(flags) else: # Potentially add the DEPLETABLE flag. Don't do this if we set flags explicitly. # WARNING: If you add flags explicitly, it will turn off depletion so be sure to add depletable to your list of # flags if you expect depletion if any(nuc in blueprint.activeNuclides for nuc in component.getNuclides()): component.p.flags |= Flags.DEPLETABLE ================================================ FILE: armi/reactor/blueprints/gridBlueprint.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Input definitions for Grids. Grids are given names which can be referred to on other input structures (like core maps and pin maps). These are in turn interpreted into concrete things at lower levels. For example: * Core map lattices get turned into :py:mod:`armi.reactor.grids`, which get set to ``core.spatialGrid``. * Block pin map lattices get applied to the components to provide some subassembly spatial details. Lattice inputs here are floating in space. Specific dimensions and anchor points are handled by the lower-level objects definitions. This is intended to maximize lattice reusability. See Also -------- armi.utils.asciimaps Description of the ascii maps and their formats. Examples -------- :: grids: control: geom: hex symmetry: full lattice map: | - - - - - - - - - 1 1 1 1 1 1 1 1 1 4 - - - - - - - - 1 1 1 1 1 1 1 1 1 1 1 - - - - - - - 1 8 1 1 1 1 1 1 1 1 1 1 - - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 7 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 6 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 sfp: geom: cartesian lattice pitch: x: 25.0 y: 25.0 lattice map: | 2 2 2 2 2 2 1 1 1 2 2 1 3 1 2 2 3 1 1 2 2 2 2 2 2 core: geom: hex symmetry: third periodic origin: x: 0.0 y: 10.1 z: 1.1 lattice map: | - SH SH SH - SH SH SH SH SH RR RR RR SH RR RR RR RR SH RR RR RR RR RR SH RR OC OC RR RR SH OC OC OC RR RR SH OC OC OC OC RR RR OC MC OC OC RR SH MC MC PC OC RR SH MC MC MC OC OC RR MC MC MC OC RR SH PC MC MC OC RR SH MC MC MC MC OC RR IC MC MC OC RR SH IC US MC OC RR IC IC MC OC RR SH IC MC MC OC RR IC IC MC PC RR SH """ import copy import itertools from io import StringIO from typing import Tuple import numpy as np import yamlize from ruamel.yaml import scalarstring from armi import runLog from armi.reactor import blueprints, geometry, grids from armi.utils import asciimaps from armi.utils.customExceptions import InputError from armi.utils.mathematics import isMonotonic class Triplet(yamlize.Object): """A x, y, z triplet for coordinates or lattice pitch.""" x = yamlize.Attribute(type=float) y = yamlize.Attribute(type=float, default=0.0) z = yamlize.Attribute(type=float, default=0.0) def __init__(self, x=0.0, y=0.0, z=0.0): self.x = x self.y = y self.z = z class Pitch(yamlize.Object): """A x, y, z triplet or triangular hex pitch for coordinates or lattice pitch for hexagonal grids.""" hex = yamlize.Attribute(type=float, default=0.0) x = yamlize.Attribute(type=float, default=0.0) y = yamlize.Attribute(type=float, default=0.0) z = yamlize.Attribute(type=float, default=0.0) def __init__(self, hexPitch=0.0, x=0.0, y=0.0, z=0.0): """ Parameters ---------- hex : float, optional Triangular/hex lattice pitch x : float, optional Cartesian grid: pitch in the x direction Hexagonal grid: interpreted as hex lattice pitch y : float, optional Cartesian grid: pitch in the y direction z : float, optional Pitch in the z direction Raises ------ InputError * If a `hexPitch` and `x` or `y` pitch are provided simultaneously. * If no non-zero value is provided for any parameter. """ if hexPitch and (x or y): raise InputError("Cannot mix `hex` with `x` and `y` attributes of `latticePitch`.") if not any([hexPitch, x, y, z]): raise InputError("`lattice pitch` must have at least one non-zero attribute! Check the blueprints.") self.hex = hexPitch or x self.x = x self.y = y self.z = z class GridBlueprint(yamlize.Object): """ A grid input blueprint. These directly build Grid objects and contain information about how to populate the Grid with child ArmiObjects for the Reactor Model. The grids get origins either from a parent block (for pin lattices) or from a System (for Cores, SFPs, and other components). .. impl:: Define a lattice map in reactor core. :id: I_ARMI_BP_GRID :implements: R_ARMI_BP_GRID Defines a yaml construct that allows the user to specify a grid from within their blueprints file, including a name, geometry, dimensions, symmetry, and a map with the relative locations of components within that grid. Relies on the underlying infrastructure from the ``yamlize`` package for reading from text files, serialization, and internal storage of the data. Is implemented as part of a blueprints file by being used in key-value pairs within the :py:class:`~armi.reactor.blueprints.gridBlueprint.Grid` class, which is imported and used as an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints` class. Includes a ``construct`` method, which instantiates an instance of one of the subclasses of :py:class:`~armi.reactor.grids.structuredgrid.StructuredGrid`. This is typically called from within :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct`, which then also associates the individual components in the block with locations specified in the grid. Attributes ---------- name : str The grid name geom : str The geometry of the grid (e.g. 'cartesian') latticeMap : str An asciimap representation of the lattice contents latticeDimensions : Pitch An x/y/z Triplet or hex pitch with grid dimensions in cm. This is used to specify a uniform grid, such as Cartesian or Hex. Mutually exclusive with gridBounds. gridBounds : dict A dictionary containing explicit grid boundaries. Specific keys used will depend on the type of grid being defined. Mutually exclusive with latticeDimensions. symmetry : str A string defining the symmetry mode of the grid gridContents : dict A {(i,j): str} dictionary mapping spatialGrid indices in 2-D to string specifiers of what's supposed to be in the grid. orientationBOL : dict A {(i,j): float} dictionary mapping spatialGrid indices in 2-D to the orientation of what's supposed to be in the grid. """ name = yamlize.Attribute(key="name", type=str) geom = yamlize.Attribute(key="geom", type=str, default=geometry.HEX) latticeMap = yamlize.Attribute(key="lattice map", type=str, default=None) latticeDimensions = yamlize.Attribute(key="lattice pitch", type=Pitch, default=None) gridBounds = yamlize.Attribute(key="grid bounds", type=dict, default=None) symmetry = yamlize.Attribute( key="symmetry", type=str, default=str(geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC)), ) # gridContents is the final form of grid contents information; it is set regardless of how the # input is read. When writing, we attempt to preserve the input mode and write ascii map if that # was what was originally provided. gridContents = yamlize.Attribute(key="grid contents", type=dict, default=None) # allowing us to add custom orientations to the objects on this gritd, at BOL orientationBOL = yamlize.Attribute(key="orientationBOL", type=dict, default=None) @gridContents.validator def gridContents(self, value): if value is None: return True if not all(isinstance(key, tuple) for key in value.keys()): raise InputError("Grid contents Keys need to be like [i, j]. Check the blueprints.") return True @orientationBOL.validator def orientationBOL(self, value): if value is None: return True if not all(isinstance(key, tuple) for key in value.keys()): raise InputError("Orientation BOL Keys need to be like [i, j]. Check the blueprints.") return True def __init__( self, name=None, geom=geometry.HEX, latticeMap=None, symmetry=str(geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC)), gridContents=None, orientationBOL=None, gridBounds=None, ): """ A Grid blueprint. Notes ----- yamlize does not call an ``__init__`` method, instead it uses ``__new__`` and setattr this is only needed for when you want to make this object from a non-YAML source. Warning ------- This is a Yamlize object, so ``__init__`` never really gets called. Only ``__new__`` does. """ self.name = name self.geom = str(geom) self.latticeMap = latticeMap self._readFromLatticeMap = False self.symmetry = str(symmetry) self.gridContents = gridContents self.orientationBOL = orientationBOL self.gridBounds = gridBounds @property def readFromLatticeMap(self): """ This is implemented as a property, since as a Yamlize object, ``__init__`` is not always called and we have to lazily evaluate its default value. """ return getattr(self, "_readFromLatticeMap", False) @readFromLatticeMap.setter def readFromLatticeMap(self, value): self._readFromLatticeMap = value def construct(self): """Build a Grid from a grid definition.""" self._readGridContents() grid = self._constructSpatialGrid() return grid def _constructSpatialGrid(self): """ Build spatial grid. If you do not enter ``latticeDimensions``, a unit grid will be produced which must be adjusted to the proper dimensions (often by inspection of children) at a later time. """ symmetry = geometry.SymmetryType.fromStr(self.symmetry) if self.symmetry else None geom = self.geom maxIndex = self._getMaxIndex() runLog.extra(f"Creating the spatial grid {self.name}", single=True) if geom in (geometry.RZT, geometry.RZ): if self.gridBounds is None: # This check is regrettably late. It would be nice if we could validate that bounds # are provided if R-Theta mesh is being used. raise InputError( f"Grid bounds must be provided for `{self.name}` to specify a grid with r-theta components." ) for key in ("theta", "r"): if key not in self.gridBounds: raise InputError(f"{key} grid bounds were not provided for `{self.name}`.") # convert to list, otherwise it is a CommentedSeq theta = np.array(self.gridBounds["theta"]) radii = np.array(self.gridBounds["r"]) for lst, name in ((theta, "theta"), (radii, "radii")): if not isMonotonic(lst, "<"): raise InputError( f"Grid bounds for {self.name}:{name} is not sorted or contains duplicates. Check blueprints." ) spatialGrid = grids.ThetaRZGrid(bounds=(theta, radii, (0.0, 0.0))) if geom in (geometry.HEX, geometry.HEX_CORNERS_UP): if not self.latticeDimensions: pitch = 1.0 else: ld = self.latticeDimensions if ld.hex and (ld.x or ld.y): raise InputError("Cannot mix `hex` with `x` and `y` attributes of `latticePitch`.") if not any([ld.hex, ld.x, ld.y, ld.z]): raise InputError("`lattice pitch` must have at least one non-zero attribute! Check the blueprints.") pitch = ld.hex or ld.x # add 2 for potential dummy assems spatialGrid = grids.HexGrid.fromPitch( pitch, numRings=maxIndex + 2, cornersUp=geom == geometry.HEX_CORNERS_UP, ) elif geom == geometry.CARTESIAN: # if full core or not cut-off, bump the first assembly from the center of the mesh into # the positive values. xw, yw = (self.latticeDimensions.x, self.latticeDimensions.y) if self.latticeDimensions else (1.0, 1.0) # Specifically in the case of grid blueprints, where we have grid contents available, we # can also infer "through center" based on the contents. Note that the "through center" # symmetry check cannot be performed when the grid contents has not been provided (i.e., # None or empty). if self.gridContents and symmetry.domain == geometry.DomainType.FULL_CORE: nx, ny = _getGridSize(self.gridContents.keys()) if nx == ny and nx % 2 == 1: symmetry.isThroughCenterAssembly = True isOffset = symmetry is not None and not symmetry.isThroughCenterAssembly spatialGrid = grids.CartesianGrid.fromRectangle(xw, yw, numRings=maxIndex + 1, isOffset=isOffset) runLog.debug("Built grid: {}".format(spatialGrid)) # set geometric metadata on spatialGrid. This information is needed in various parts of the # code and is best encapsulated on the grid itself rather than on the container state. spatialGrid._geomType: str = str(self.geom) self.symmetry = str(symmetry) spatialGrid._symmetry: str = self.symmetry return spatialGrid def _getMaxIndex(self): """ Find the max index in the grid contents. Used to limit the size of the spatialGrid. Used to be called maxNumRings. """ if self.gridContents: return max(itertools.chain(*zip(*self.gridContents.keys()))) else: return 6 def expandToFull(self): """ Unfold the blueprints to represent full symmetry. Notes ----- This relatively rudimentary, and copies entries from the currently-represented domain to their corresponding locations in full symmetry. This may not produce the desired behavior for some scenarios, such as when expanding fuel shuffling paths or the like. Future work may make this more sophisticated. """ if geometry.SymmetryType.fromAny(self.symmetry).domain == geometry.DomainType.FULL_CORE: return # fill the new grid contents grid = self.construct() self._expandToFullOrientationBOL(grid) newContents = copy.copy(self.gridContents) for idx, contents in self.gridContents.items(): equivs = grid.getSymmetricEquivalents(idx) for idx2 in equivs: newContents[idx2] = contents self.gridContents = newContents # set the grid symmetry split = geometry.THROUGH_CENTER_ASSEMBLY in self.symmetry self.symmetry = str( geometry.SymmetryType( geometry.DomainType.FULL_CORE, geometry.BoundaryType.NO_SYMMETRY, throughCenterAssembly=split, ) ) def _expandToFullOrientationBOL(self, grid): """Set the orientationBOL parameter during expandToFulLCore(). Parameters ---------- grid : Grid Spatial grid for the current ARMI object. """ if self.orientationBOL is None: return newOrientations = copy.copy(self.orientationBOL) for idx, contents in self.gridContents.items(): equivs = grid.getSymmetricEquivalents(idx) angle = 360.0 / (len(equivs) + 1) for count, idx2 in enumerate(equivs): loc = grid.indicesToRingPos(*idx) if loc in self.orientationBOL: loc2 = grid.indicesToRingPos(*idx2) newOrientation = self.orientationBOL[loc] + (count + 1) * angle newOrientations[loc2] = newOrientation % 360.0 self.orientationBOL = newOrientations def _readGridContents(self): """ Read the specifiers as a function of grid position. The contents can either be provided as: * A dict mapping indices to specifiers (default output of this) * An asciimap The output will always be stored in ``self.gridContents``. """ if self.gridContents: return elif self.latticeMap: self._readGridContentsLattice() if self.gridContents is None: # Make sure we have at least something; clients shouldn't have to worry about whether # gridContents exist at all. self.gridContents = dict() def _readGridContentsLattice(self): """Read an ascii map of grid contents. This update the gridContents attribute, which is a dict mapping grid i,j,k indices to textual specifiers (e.g. ``IC``)). """ self.readFromLatticeMap = True symmetry = geometry.SymmetryType.fromStr(self.symmetry) geom = geometry.GeomType.fromStr(self.geom) latticeCls = asciimaps.asciiMapFromGeomAndDomain(self.geom, symmetry.domain) asciimap = latticeCls() asciimap.readAscii(self.latticeMap) self.gridContents = dict() iOffset = 0 jOffset = 0 if geom == geometry.GeomType.CARTESIAN and symmetry.domain == geometry.DomainType.FULL_CORE: # asciimaps is not smart about where the center should be, so we need to offset # apropriately to get (0,0) in the middle nx, ny = _getGridSize(asciimap.keys()) # turns out this works great for even and odd cases. love it when integer math works in your favor iOffset = int(-nx / 2) jOffset = int(-ny / 2) for (i, j), spec in asciimap.items(): if spec == "-": # skip placeholders continue self.gridContents[i + iOffset, j + jOffset] = spec def getLocators(self, spatialGrid: grids.Grid, latticeIDs: list): """ Return spatialLocators in grid corresponding to lattice IDs. This requires a fully-populated ``gridContents`` attribute. """ if latticeIDs is None: return [] if self.gridContents is None: return [] # tried using yamlize to coerce ints to strings but failed after much struggle, so we just # auto-convert here to deal with int-like specifications. (yamlize.StrList fails to coerce # when ints are provided) latticeIDs = [str(i) for i in latticeIDs] locators = [] for (i, j), spec in self.gridContents.items(): locator = spatialGrid[i, j, 0] if spec in latticeIDs: locators.append(locator) return locators def getMultiLocator(self, spatialGrid, latticeIDs): """Create a MultiIndexLocation based on lattice IDs.""" spatialLocator = grids.MultiIndexLocation(grid=spatialGrid) spatialLocator.extend(self.getLocators(spatialGrid, latticeIDs)) return spatialLocator class Grids(yamlize.KeyedList): item_type = GridBlueprint key_attr = GridBlueprint.name def _getGridSize(idx) -> Tuple[int, int]: """ Return the number of spaces between the min and max of a collection of (int, int) tuples, inclusive. This essentially returns the number of grid locations along the i, and j dimensions, given the (i,j) indices of each occupied location. This is useful for determining certain grid offset behavior. """ nx = max(key[0] for key in idx) - min(key[0] for key in idx) + 1 ny = max(key[1] for key in idx) - min(key[1] for key in idx) + 1 return nx, ny def _filterOutsideDomain(gridBp): """Remove grid contents that lie outside the represented domain. This removes extra objects; ARMI allows the user input specifiers in regions outside of the represented domain, which is fine as long as the contained specifier is consistent with the corresponding region in the represented domain given the symmetry condition. For instance, if we have a 1/3-core hex model, it is typically okay for an assembly to be specified outside of the first 1/3rd of the core, as long as it is the same assembly as would be there when expanding the first 1/3rd into a full-core model. However, we do not really want these hanging around, since editing the represented 1/Nth of the core will probably lead to consistency issues, so we remove them. """ grid = gridBp.construct() contentsToRemove = { idx for idx, _contents in gridBp.gridContents.items() if not grid.locatorInDomain(grid[idx + (0,)], symmetryOverlap=False) } for idx in contentsToRemove: symmetrics = grid.getSymmetricEquivalents(idx) for symmetric in symmetrics: if symmetric in gridBp.gridContents: if gridBp.gridContents[symmetric] != gridBp.gridContents[idx]: raise ValueError( "The contents at `{}` (`{}`) in grid `{}` is not the " "same as it's symmetric equivalent at `{}` (`{}`). " "Check your grid blueprints for symmetry.".format( idx, gridBp.gridContents[idx], gridBp.name, symmetric, gridBp.gridContents[symmetric], ) ) del gridBp.gridContents[idx] def saveToStream(stream, bluep, full=False, tryMap=False): """ Save the blueprints to the passed stream. This can save either the entire blueprints, or just the `grids:` section of the blueprints, based on the passed ``full`` argument. Saving just the grid blueprints can be useful when cobbling blueprints together with !include flags. .. impl:: Write a blueprint file from a blueprint object. :id: I_ARMI_BP_TO_DB :implements: R_ARMI_BP_TO_DB First makes a copy of the blueprints that are passed in. Then modifies any grids specified in the blueprints into a canonical lattice map style, if needed. Then uses the ``dump`` method that is inherent to all ``yamlize`` subclasses to write the blueprints to the given ``stream`` object. If called with the ``full`` argument, the entire blueprints is dumped. If not, only the grids portion is dumped. Parameters ---------- stream : file output stream of some kind bluep : armi.reactor.blueprints.Blueprints, or Grids full : bool Is this a full output file, or just a partial/grids? tryMap : bool regardless of input form, attempt to output as a lattice map """ # To save, we want to try our best to output our grid blueprints in the lattice map style. However, we do not want # to wreck the state that the current blueprints are in. So we make a copy and do some manipulations to try to # canonicalize it and save that, leaving the original blueprints unmolested. bp = copy.deepcopy(bluep) if isinstance(bp, blueprints.Blueprints): gridDesigns = bp.gridDesigns elif isinstance(bp, blueprints.Grids): gridDesigns = bp else: raise TypeError(f"Expected Blueprints or Grids, got {type(bp)}") for gridDesignType, gridDesign in gridDesigns.items(): # The core equilibrium path should be put into the grid contents rather than a lattice map until we write a # string-> tuple parser for reading it back in. Skip this type of grid. if gridDesignType == "coreEqPath": continue _filterOutsideDomain(gridDesign) if not gridDesign.gridContents: # there is no grid, so there must be lattice, and that goes to output continue if gridDesign.readFromLatticeMap or tryMap: symmetry = geometry.SymmetryType.fromStr(gridDesign.symmetry) aMap = asciimaps.asciiMapFromGeomAndDomain(gridDesign.geom, symmetry.domain)() try: if gridDesign.latticeMap: # Try to use the lattice map first, it was the original source of truth. aMap.readAscii(gridDesign.latticeMap) else: # If there is no original lattice map, use the current grid of data. aMap.asciiLabelByIndices = {(key[0], key[1]): val for key, val in gridDesign.gridContents.items()} aMap.gridContentsToAscii() except Exception as e: runLog.warning( "The `lattice map` for the current assembly arrangement cannot be written. Defaulting to using the " f"`grid contents` dictionary instead. Exception: {e}" ) aMap = None if aMap is not None: # If there is an ascii map available then use it to fill out the contents of the lattice map section of # the grid design. This also clears out the grid contents so there is not duplicate data. gridDesign.gridContents = None mapString = StringIO() aMap.writeAscii(mapString) gridDesign.latticeMap = scalarstring.LiteralScalarString(mapString.getvalue()) else: gridDesign.latticeMap = None else: # Grid contents were supplied as a dictionary, so we shouldn't even have a latticeMap, unless it was set # explicitly in code somewhere. Discard if there is one. gridDesign.latticeMap = None toSave = bp if full else gridDesigns # NOTE: type(bp) here used because importing Blueprints causes a circular import type(toSave).dump(toSave, stream) ================================================ FILE: armi/reactor/blueprints/isotopicOptions.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Defines nuclide flags and custom isotopics via input. Nuclide flags control meta-data about nuclides. Custom isotopics allow specification of arbitrary isotopic compositions. """ import yamlize from armi import materials, runLog from armi.nucDirectory import elements, nucDir, nuclideBases from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import ( CONF_FISSION_PRODUCT_LIBRARY_NAME, CONF_FP_MODEL, ) from armi.physics.neutronics.settings import ( CONF_MCNP_LIB_BASE, CONF_NEUTRONICS_KERNEL, CONF_XS_KERNEL, ) from armi.utils import densityTools, units from armi.utils.customExceptions import InputError class NuclideFlag(yamlize.Object): """ Defines whether or not each nuclide is included in the burn chain and cross sections. Also controls which nuclides get expanded from elementals to isotopics and which natural isotopics to exclude (if any). Oftentimes, cross section library creators include some natural isotopes but not all. For example, it is common to include O16 but not O17 or O18. Each code has slightly different interpretations of this so we give the user full control here. We also try to provide useful defaults. There are lots of complications that can arise in these choices. It makes reasonable sense to use elemental compositions for things that are typically used without isotopic modifications (Fe, O, Zr, Cr, Na). If we choose to expand some or all of these to isotopics at initialization based on cross section library requirements, a single case will work fine with a given lattice physics option. However, restarting from that case with different cross section needs is challenging. .. impl:: The blueprint object that represents a nuclide flag. :id: I_ARMI_BP_NUC_FLAGS1 :implements: R_ARMI_BP_NUC_FLAGS This class creates a yaml interface for the user to specify in their blueprints which isotopes should be depleted. It is incorporated into the "nuclide flags" section of a blueprints file by being included as key-value pairs within the :py:class:`~armi.reactor.blueprints.isotopicOptions.NuclideFlags` class, which is in turn included into the overall blueprints within :py:class:`~armi.reactor.blueprints.Blueprints`. This class includes a boolean ``burn`` attribute which can be specified for any nuclide. This attribute is examined by the :py:meth:`~armi.reactor.blueprints.isotopicOptions.NuclideFlag.fileAsActiveOrInert` method to sort the nuclides into sets of depletable or not, which is typically called during construction of assemblies in :py:meth:`~armi.reactor.blueprints.Blueprints.constructAssem`. Note that while the ``burn`` attribute can be set by the user in the blueprints, other methods may also set it based on case settings (see, for instance, :py:func:`~armi.reactor.blueprints.isotopicOptions.genDefaultNucFlags`, :py:func:`~armi.reactor.blueprints.isotopicOptions.autoUpdateNuclideFlags`, and :py:func:`~armi.reactor.blueprints.isotopicOptions.getAllNuclideBasesByLibrary`). Attributes ---------- nuclideName : str The name of the nuclide burn : bool True if this nuclide should be added to the burn chain. If True, all reachable nuclides via transmutation and decay must be included as well. xs : bool True if this nuclide should be included in the cross section libraries. Effectively, if this nuclide is in the problem at all, this should be true. expandTo : list of str, optional isotope nuclideNames to expand to. For example, if nuclideName is ``O`` then this could be ``["O16", "O17"]`` to expand it into those two isotopes (but not ``O18``). The nuclides will be scaled up uniformly to account for any missing natural nuclides. """ nuclideName = yamlize.Attribute(type=str) @nuclideName.validator def nuclideName(self, value): if value not in nuclideBases.byName and value not in elements.bySymbol: allowedKeys = set(nuclideBases.byName.keys()).update(set(elements.bySymbol.keys())) raise ValueError(f"`{value}` is not a valid nuclide name, must be one of: {allowedKeys}") burn = yamlize.Attribute(type=bool) xs = yamlize.Attribute(type=bool) expandTo = yamlize.Attribute(type=yamlize.StrList, default=None) def __init__(self, nuclideName, burn, xs, expandTo): # note: yamlize does not call an __init__ method, instead it uses __new__ and setattr self.nuclideName = nuclideName self.burn = burn self.xs = xs self.expandTo = expandTo def __repr__(self): return f"<NuclideFlag name:{self.nuclideName} burn:{self.burn} xs:{self.xs}>" def fileAsActiveOrInert(self, activeSet, inertSet): """ Given a nuclide or element name, file it as either active or inert. If isotopic expansions are requested, include the isotopics rather than the NaturalNuclideBase, as the NaturalNuclideBase will never occur in such a problem. """ undefBurnChainActiveNuclides = set() nb = nuclideBases.byName[self.nuclideName] if self.expandTo: nucBases = [nuclideBases.byName[nn] for nn in self.expandTo] expanded = [nb.element] # error to expand non-elements else: nucBases = [nb] expanded = [] for nuc in nucBases: if self.burn: if not nuc.trans and not nuc.decays: # DUMPs and LFPs usually undefBurnChainActiveNuclides.add(nuc.name) activeSet.add(nuc.name) if self.xs: inertSet.add(nuc.name) return expanded, undefBurnChainActiveNuclides class NuclideFlags(yamlize.KeyedList): """An OrderedDict of ``NuclideFlags``, keyed by their ``nuclideName``.""" item_type = NuclideFlag key_attr = NuclideFlag.nuclideName class CustomIsotopic(yamlize.Map): """ User specified, custom isotopics input defined by a name (such as MOX), and key/pairs of nuclide names and numeric values consistent with the ``input format``. .. impl:: Certain material modifications will be applied using this code. :id: I_ARMI_MAT_USER_INPUT2 :implements: R_ARMI_MAT_USER_INPUT Defines a yaml construct that allows the user to define a custom isotopic vector from within their blueprints file, including a name and key-value pairs corresponding to nuclide names and their concentrations. Relies on the underlying infrastructure from the ``yamlize`` package for reading from text files, serialization, and internal storage of the data. Is implemented as part of a blueprints file by being used in key-value pairs within the :py:class:`~armi.reactor.blueprints.isotopicOptions.CustomIsotopics` class, which is imported and used as an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints` class. These isotopics are linked to a component during calls to :py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`, where the name specified in the ``isotopics`` attribute of the component blueprint is searched against the available ``CustomIsotopics`` defined in the "custom isotopics" section of the blueprints. Once linked, the :py:meth:`~armi.reactor.blueprints.isotopicOptions.CustomIsotopic.apply` method is called, which adjusts the ``massFrac`` attribute of the component's material class. """ key_type = yamlize.Typed(str) value_type = yamlize.Typed(float) name = yamlize.Attribute(type=str) inputFormat = yamlize.Attribute(key="input format", type=str) @inputFormat.validator def inputFormat(self, value): if value not in self._allowedFormats: raise ValueError(f"Cannot set `inputFormat` to `{value}`, must be one of: {self._allowedFormats}") _density = yamlize.Attribute(key="density", type=float, default=None) _allowedFormats = {"number fractions", "number densities", "mass fractions"} def __new__(cls, *args): self = yamlize.Map.__new__(cls, *args) # the density as computed by source number densities self._computedDensity = None return self def __init__(self, name, inputFormat, density): # note: yamlize does not call an __init__ method, instead it uses __new__ and setattr self._name = None self.name = name self._inputFormat = None self.inputFormat = inputFormat self.density = density self.massFracs = {} def __setitem__(self, key, value): if key not in nuclideBases.byName and key not in elements.bySymbol: allowedKeys = set(nuclideBases.byName.keys()).update(set(elements.bySymbol.keys())) raise ValueError(f"Key `{key}` is not valid, must be one of: {allowedKeys}") yamlize.Map.__setitem__(self, key, value) @property def density(self): return self._computedDensity or self._density @density.setter def density(self, value): if self._computedDensity is not None: raise AttributeError("Density was computed from number densities, and should not be set directly.") self._density = value if value is not None and value < 0: raise ValueError(f"Cannot set `density` to `{value}`, must greater than 0") @classmethod def from_yaml(cls, loader, node, rtd): """ Override the ``Yamlizable.from_yaml`` to inject custom data validation logic, and complete initialization of the object. """ self = yamlize.Map.from_yaml.__func__(cls, loader, node, rtd) try: self._initializeMassFracs() self._expandElementMassFracs() except Exception as ex: # use a YamlizingError to get line/column of erroneous input raise yamlize.YamlizingError(str(ex), node) return self @classmethod def from_yaml_key_val(cls, loader, key_node, val_node, key_attr, rtd): """ Override the ``Yamlizable.from_yaml`` to inject custom data validation logic, and complete initialization of the object. """ self = yamlize.Map.from_yaml_key_val.__func__(cls, loader, key_node, val_node, key_attr, rtd) try: self._initializeMassFracs() self._expandElementMassFracs() except Exception as ex: # use a YamlizingError to get line/column of erroneous input raise yamlize.YamlizingError(str(ex), val_node) return self def _initializeMassFracs(self): self.massFracs = dict() # defaults to 0.0, __init__ is not called if any(v < 0.0 for v in self.values()): raise ValueError(f"Custom isotopic input for {self.name} is negative") valSum = sum(self.values()) if not abs(valSum - 1.0) < 1e-5 and "fractions" in self.inputFormat: raise ValueError(f"Fractional custom isotopic input values must sum to 1.0 in: {self.name}") if self.inputFormat == "number fractions": sumNjAj = 0.0 for nuc, nj in self.items(): if nj: sumNjAj += nj * nucDir.getAtomicWeight(nuc) for nuc, value in self.items(): massFrac = value * nucDir.getAtomicWeight(nuc) / sumNjAj self.massFracs[nuc] = massFrac elif self.inputFormat == "number densities": if self._density is not None: raise InputError( f"Custom isotopic `{self.name}` is over-specified. It was provided as number densities, and but " f"density ({self.density}) was also provided. Is the input format correct?" ) M = { nuc: Ni / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * nucDir.getAtomicWeight(nuc) for nuc, Ni in self.items() } densityTotal = sum(M.values()) if densityTotal < 0: raise ValueError("Computed density is negative") for nuc, Mi in M.items(): self.massFracs[nuc] = Mi / densityTotal self._computedDensity = densityTotal elif self.inputFormat == "mass fractions": self.massFracs = dict(self) # as input else: raise ValueError(f"Unrecognized custom isotopics input format {self.inputFormat}.") def _expandElementMassFracs(self): """ Expand the custom isotopics input entries that are elementals to isotopics. This is necessary when the element name is not a elemental nuclide. Most everywhere else expects Nuclide objects (or nuclide names). This input allows a user to enter "U" which would expand to the naturally occurring uranium isotopics. This is different than the isotopic expansion done for meeting user-specified modeling options (such as an MC**2, or MCNP expecting elements or isotopes), because it translates the user input into something that can be used later on. """ elementsToExpand = [] for nucName in self.massFracs: if nucName not in nuclideBases.byName: element = elements.bySymbol.get(nucName) if element is not None: runLog.info(f"Expanding custom isotopic `{self.name}` element `{nucName}` to natural isotopics") # include all natural isotopes with None flag elementsToExpand.append((element, None)) else: raise InputError(f"Unrecognized nuclide/isotope/element in input: {nucName}") densityTools.expandElementalMassFracsToNuclides(self.massFracs, elementsToExpand) def apply(self, material): """ Apply specific isotopic compositions to a component. Generically, materials have composition-dependent bulk properties such as mass density. Note that this operation does not update these material properties. Use with care. Parameters ---------- material : armi.materials.material.Material An ARMI Material instance. """ material.massFrac = dict(self.massFracs) if self.density is not None: if not isinstance(material, materials.Custom): runLog.important( "A custom isotopic with associated density has been specified for non-`Custom` material " f"{material}. The reference density of materials in the materials library will not be changed, but " "the associated components will use the density implied by the custom isotopics.", single=True, ) # specifically, non-Custom materials only use refDensity and dLL, mat.customDensity has no effect return material.customDensity = self.density class CustomIsotopics(yamlize.KeyedList): """OrderedDict of CustomIsotopic objects, keyed by their name.""" item_type = CustomIsotopic key_attr = CustomIsotopic.name # note: yamlize does not call an __init__ method, instead it uses __new__ and setattr def apply(self, material, customIsotopicsName): """ Apply specific isotopic compositions to a component. Generically, materials have composition-dependent bulk properties such as mass density. Note that this operation does not update these material properties. Use with care. Parameters ---------- material : armi.materials.material.Material Material instance to adjust. customIsotopicName : str String corresponding to the ``CustomIsoptopic.name``. """ if customIsotopicsName not in self: raise KeyError( "The input custom isotopics do not include {}. The only present specifications are {}".format( customIsotopicsName, self.keys() ) ) custom = self[customIsotopicsName] custom.apply(material) def getDefaultNuclideFlags(): """ Return a default set of nuclides to model and deplete. Notes ----- The nuclideFlags input on blueprints has confused new users and is infrequently changed. It will be moved to be a user setting, but in any case a reasonable default should be provided. We will by default model medium-lived and longer actinides between U234 and CM247. We will include B10 and B11 without depletion, sodium, and structural elements. We will include LFPs with depletion. """ nuclideFlags = {} actinides = { "U": [234, 235, 236, 238], "NP": [237, 238], "PU": [236] + list(range(238, 243)), "AM": range(241, 244), "CM": range(242, 248), } for el, masses in actinides.items(): for mass in masses: nuclideFlags[f"{el}{mass}"] = {"burn": True, "xs": True, "expandTo": None} for fp in [35, 38, 39, 40, 41]: nuclideFlags[f"LFP{fp}"] = {"burn": True, "xs": True, "expandTo": None} for dmp in [1, 2]: nuclideFlags[f"DUMP{dmp}"] = {"burn": True, "xs": True, "expandTo": None} for boron in [10, 11]: nuclideFlags[f"B{boron}"] = {"burn": False, "xs": True, "expandTo": None} for struct in ["ZR", "C", "SI", "V", "CR", "MN", "FE", "NI", "MO", "W", "NA", "HE", "AL", "CO", "NB"]: nuclideFlags[struct] = {"burn": False, "xs": True, "expandTo": None} return nuclideFlags def eleExpandInfoBasedOnCodeENDF(cs): """ Intelligently choose elements to expand based on code and ENDF version. If settings point to a particular code and library and we know that combo requires certain elementals to be expanded, we flag them here to make the user input as simple as possible. This determines both which elementals to keep and which specific expansion subsets to use. Notes ----- This logic is expected to be moved to respective plugins in time. Returns ------- elementalsToKeep : set Set of NaturalNuclideBase instances to not expand into natural isotopics. expansions : dict Element to list of nuclides for expansion. For example: {oxygen: [oxygen16]} indicates that all oxygen should be expanded to O16, ignoring natural O17 and O18. (variables are Natural/NuclideBases) """ elementalsToKeep = set() oxygenElementals = [nuclideBases.byName["O"]] hydrogenElementals = [nuclideBases.byName[name] for name in ["H"]] endf70Elementals = [nuclideBases.byName[name] for name in ["C", "V", "ZN"]] endf71Elementals = [nuclideBases.byName[name] for name in ["C"]] endf80Elementals = [] elementalsInMC2 = set() expansionStrings = {} mc2Expansions = { "HE": ["HE4"], # neglect HE3 "O": ["O16"], # neglect O17 and O18 "W": ["W182", "W183", "W184", "W186"], # neglect W180 } mcnpExpansions = {"O": ["O16"]} for element in elements.byName.values(): # any NaturalNuclideBase that's available in MC2 libs nnb = nuclideBases.byName.get(element.symbol) if nnb and nnb.getMcc2Id(): elementalsInMC2.add(nnb) if "MCNP" in cs[CONF_NEUTRONICS_KERNEL]: expansionStrings.update(mcnpExpansions) if cs[CONF_MCNP_LIB_BASE] == "ENDF/B-V.0": # ENDF/B V.0 elementalsToKeep.update(nuclideBases.instances) # skip expansion elif cs[CONF_MCNP_LIB_BASE] == "ENDF/B-VII.0": # ENDF/B VII.0 elementalsToKeep.update(endf70Elementals) elif cs[CONF_MCNP_LIB_BASE] == "ENDF/B-VII.1": # ENDF/B VII.1 elementalsToKeep.update(endf71Elementals) elif cs[CONF_MCNP_LIB_BASE] == "ENDF/B-VIII.0": # ENDF/B VIII.0 elementalsToKeep.update(endf80Elementals) else: raise InputError( "Failed to determine nuclides for modeling. The `mcnpLibraryVersion` " f"setting value ({cs[CONF_MCNP_LIB_BASE]}) is not supported." ) elif cs[CONF_XS_KERNEL] == "SERPENT": elementalsToKeep.update(endf70Elementals) expansionStrings.update(mc2Expansions) elif cs[CONF_XS_KERNEL] in ["", "MC2v3", "MC2v3-PARTISN"]: elementalsToKeep.update(endf71Elementals) expansionStrings.update(mc2Expansions) elif cs[CONF_XS_KERNEL] == "DRAGON": # Users need to use default nuclear lib name. This is documented. dragLib = cs["dragonDataPath"] # only supports ENDF/B VII/VIII at the moment. if "7r0" in dragLib: elementalsToKeep.update(endf70Elementals) elif "7r1" in dragLib: elementalsToKeep.update(endf71Elementals) elif "8r0" in dragLib: elementalsToKeep.update(endf80Elementals) elementalsToKeep.update(hydrogenElementals) elementalsToKeep.update(oxygenElementals) else: raise ValueError(f"Unrecognized DRAGLIB name: {dragLib} Use default file name.") elif cs[CONF_XS_KERNEL] == "MC2v2": # strip out any NaturalNuclideBase with no getMcc2Id() (not on mcc-nuclides.yaml) elementalsToKeep.update(elementalsInMC2) expansionStrings.update(mc2Expansions) # convert convenient string notation to actual NuclideBase objects expansions = {} for nnb, nbs in expansionStrings.items(): expansions[nuclideBases.byName[nnb]] = [nuclideBases.byName[nb] for nb in nbs] return elementalsToKeep, expansions def genDefaultNucFlags(): """Perform all the yamlize-required type conversions.""" flagsDict = getDefaultNuclideFlags() flags = NuclideFlags() for nucName, nucFlags in flagsDict.items(): flag = NuclideFlag(nucName, nucFlags["burn"], nucFlags["xs"], nucFlags["expandTo"]) flags[nucName] = flag return flags def autoUpdateNuclideFlags(cs, nuclideFlags, inerts): """ This function is responsible for examining the fission product model treatment that is selected by the user and adding a set of nuclides to the `nuclideFlags` list. Notes ----- The reason for adding this method is that when switching between fission product modeling treatments it can be time-consuming to manually adjust the ``nuclideFlags`` inputs. See Also -------- genDefaultNucFlags """ nbs = getAllNuclideBasesByLibrary(cs) if nbs: runLog.info( "Adding explicit fission products to the nuclide flags based on the fission product model set to " f"`{cs[CONF_FP_MODEL]}`." ) for nb in nbs: nuc = nb.name if nuc in nuclideFlags or elements.byZ[nb.z] in nuclideFlags: continue nuclideFlags[nuc] = NuclideFlag(nuc, burn=False, xs=True, expandTo=[]) # inert since burn is False inerts.add(nuc) def getAllNuclideBasesByLibrary(cs): """ Return a list of nuclide bases available for cross section modeling based on the ``CONF_FISSION_PRODUCT_LIBRARY_NAME`` setting. """ nbs = [] if cs[CONF_FP_MODEL] == "explicitFissionProducts": if not cs[CONF_FISSION_PRODUCT_LIBRARY_NAME]: pass if cs[CONF_FISSION_PRODUCT_LIBRARY_NAME] == "MC2-3": nbs = nuclideBases.byMcc3Id.values() else: raise ValueError( "An option to handle the `CONF_FISSION_PRODUCT_LIBRARY_NAME` set to " f"`{cs[CONF_FISSION_PRODUCT_LIBRARY_NAME]}` has not been implemented." ) return nbs ================================================ FILE: armi/reactor/blueprints/reactorBlueprint.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Definitions of top-level reactor arrangements like the Core (default), SFP, etc. See documentation of blueprints in :ref:`bp-input-file` for more context. See example in :py:mod:`armi.reactor.blueprints.tests.test_reactorBlueprints`. This was built to replace the old system that loaded the core geometry from the ``cs['geometry']`` setting. Until the geom file-based input is completely removed, this system will attempt to migrate the core layout from geom files. When geom files are used, explicitly specifying a ``core`` system will result in an error. System Blueprints are a big step in the right direction to generalize user input, but was still mostly adapted from the old Core layout input. As such, they still only really support Core-like systems. Future work should generalize the concept of "system" to more varied scenarios. See Also -------- armi.reactor.blueprints.gridBlueprints : Method for storing system assembly layouts. """ import yamlize from armi import context, getPluginManagerOrFail, runLog from armi.reactor import geometry, grids from armi.reactor.blueprints.gridBlueprint import Triplet from armi.utils import tabulate class SystemBlueprint(yamlize.Object): """ The reactor-level structure input blueprint. .. impl:: Build core and spent fuel pool from blueprints :id: I_ARMI_BP_SYSTEMS :implements: R_ARMI_BP_SYSTEMS, R_ARMI_BP_CORE This class creates a yaml interface for the user to define systems with grids, such as cores or spent fuel pools, each having their own name, type, grid, and position in space. It is incorporated into the "systems" section of a blueprints file by being included as key-value pairs within the :py:class:`~armi.reactor.blueprints.reactorBlueprint.Systems` class, which is in turn included into the overall blueprints within :py:class:`~armi.reactor.blueprints.Blueprints`. This class includes a :py:meth:`~armi.reactor.blueprints.reactorBlueprint.SystemBlueprint.construct` method, which is typically called from within :py:func:`~armi.reactor.reactors.factory` during the initialization of the reactor object to instantiate the core and/or spent fuel pool objects. During that process, a spatial grid is constructed based on the grid blueprints specified in the "grids" section of the blueprints (see :need:`I_ARMI_BP_GRID`) and the assemblies needed to fill the lattice are built from blueprints using :py:meth:`~armi.reactor.blueprints.Blueprints.constructAssem`. Notes ----- We use string keys to link grids to objects that use them. This differs from how blocks / assembies are specified, which use YAML anchors. YAML anchors have proven to be problematic and difficult to work with. """ name = yamlize.Attribute(key="name", type=str) typ = yamlize.Attribute(key="type", type=str, default="core") gridName = yamlize.Attribute(key="grid name", type=str) origin = yamlize.Attribute(key="origin", type=Triplet, default=None) def __init__(self, name=None, gridName=None, origin=None): """ A Reactor-level structure like a core, or ex-core like SFP. Notes ----- yamlize does not call an __init__ method, instead it uses __new__ and setattr this is only needed for when you want to make this object from a non-YAML source. """ self.name = name self.gridName = gridName self.origin = origin @staticmethod def _resolveSystemType(typ: str): """Loop over all plugins that could be attached and determine if any tell us how to build a specific systems attribute. """ manager = getPluginManagerOrFail() # Only need this to handle the case we don't find the system we expect seen = set() for options in manager.hook.defineSystemBuilders(): for key, builder in options.items(): # Take the first match we find. This would allow other plugins to define a new core builder before # finding those defined by the ReactorPlugin if key == typ: return builder seen.add(key) raise ValueError( f"Could not determine an appropriate class for handling a system of type `{typ}`. " f"Supported types are {seen}." ) def construct(self, cs, bp, reactor, loadComps=True): """Build a core or ex-core grid and fill it with children. Parameters ---------- cs : :py:class:`Settings <armi.settings.Settings>` armi settings to apply bp : :py:class:`Reactor <armi.reactor.blueprints.Blueprints>` armi blueprints to apply reactor : :py:class:`Reactor <armi.reactor.reactors.Reactor>` reactor to fill loadComps : bool, optional whether to fill reactor with assemblies, as defined in blueprints, or not. Is False in :py:class:`UniformMeshGeometryConverter <armi.reactor.converters.uniformMesh.UniformMeshGeometryConverter>` within the initNewReactor() method. Returns ------- Composite A Composite object with a grid, like a Spent Fuel Pool or other ex-core structure. Raises ------ ValueError input error, no grid design provided ValueError objects were added to non-existent grid locations """ runLog.info(f"Constructing the `{self.name}`") if not bp.gridDesigns: raise ValueError("The input must define grids to construct a reactor, but does not. Update input.") gridDesign = bp.gridDesigns.get(self.gridName, None) system = self._resolveSystemType(self.typ)(self.name) # Some systems may not require a prescribed grid design. Only use one if provided if gridDesign is not None: spatialGrid = gridDesign.construct() system.spatialGrid = spatialGrid system.spatialGrid.armiObject = system reactor.add(system) # ensure the reactor is the parent spatialLocator = grids.CoordinateLocation(self.origin.x, self.origin.y, self.origin.z, None) system.spatialLocator = spatialLocator if context.MPI_RANK != 0: # Non-primary nodes get the reactor via DistributeState. return None system = self._constructComposites(cs, bp, loadComps, system, gridDesign) return system def _constructComposites(self, cs, bp, loadComps, system, gridDesign): """Fill a grid with composities, if there are any to fill. Parameters ---------- cs : Settings object. armi settings to apply bp : Blueprints object. armi blueprints to apply loadComps : bool whether to fill reactor with composities, as defined in blueprints, or not system : Composite The composite we are building. gridDesign : GridBlueprint The definition of the grid on the object. Returns ------- Composite A Composite object with a grid, like a Spent Fuel Pool or other ex-core structure. """ from armi.reactor.reactors import Core # avoid circular import if loadComps and gridDesign is not None: self._loadComposites(cs, bp, system, gridDesign.gridContents, gridDesign.orientationBOL) if isinstance(system, Core): self._modifyGeometry(system, gridDesign) summarizeMaterialData(system) system.processLoading(cs) return system def _loadComposites(self, cs, bp, container, gridContents, orientationBOL): from armi.reactor.cores import Core runLog.header(f"=========== Adding Composites to {container} ===========") badLocations = set() for locationInfo, aTypeID in gridContents.items(): # handle the hex-grid special case, where the user enters (ring, pos) i, j = locationInfo if isinstance(container, Core) and container.geomType == geometry.GeomType.HEX: loc = container.spatialGrid.indicesToRingPos(i, j) else: loc = locationInfo # correctly rotate the Composite if orientationBOL is None or loc not in orientationBOL: orientation = 0.0 else: orientation = orientationBOL[loc] # create a new Composite to add to the grid newAssembly = bp.constructAssem(cs, specifier=aTypeID, orientation=orientation) # add the Composite to the grid posi = container.spatialGrid[i, j, 0] try: container.add(newAssembly, posi) except LookupError: badLocations.add(posi) if badLocations: raise ValueError(f"Attempted to add objects to non-existent locations on the grid: {badLocations}.") # init position history param on each assembly for a in container: loc = a.getLocation() if loc in a.NOT_IN_CORE: a.p.ringPosHist = [(loc, loc)] else: try: ring, pos, _ = grids.locatorLabelToIndices(a.getLocation()) a.p.ringPosHist = [(ring, pos)] except ValueError: # some ex-core structures may not have valid locator label indices a.p.ringPosHist = [(a.NOT_CREATED_YET, a.NOT_CREATED_YET)] def _modifyGeometry(self, container, gridDesign): """Perform post-load geometry conversions like full core, edge assems.""" # all cases should have no edge assemblies. They are added ephemerally when needed from armi.reactor.converters import geometryConverters runLog.header("=========== Applying Geometry Modifications ===========") if not container.isFullCore: runLog.extra("Applying non-full core modifications") converter = geometryConverters.EdgeAssemblyChanger() converter.scaleParamsRelatedToSymmetry(container) converter.removeEdgeAssemblies(container) # now update the spatial grid dimensions based on the populated children (unless specified on input) if not gridDesign.latticeDimensions: runLog.info(f"Updating spatial grid pitch data for {container.geomType} geometry") if container.geomType == geometry.GeomType.HEX: container.spatialGrid.changePitch(container[0][0].getPitch()) elif container.geomType == geometry.GeomType.CARTESIAN: xw, yw = container[0][0].getPitch() container.spatialGrid.changePitch(xw, yw) class Systems(yamlize.KeyedList): item_type = SystemBlueprint key_attr = SystemBlueprint.name def summarizeMaterialData(container): """ Create a summary of the material objects and source data for a reactor container. Parameters ---------- container : Core object Any Core object with Blocks and Components defined. """ runLog.header(f"=========== Summarizing Source of Material Data for {container} ===========") materialNames = set() materialData = [] for c in container.iterComponents(): if c.material.name in materialNames: continue materialData.append((c.material.name, c.material.DATA_SOURCE)) materialNames.add(c.material.name) materialData = sorted(materialData) runLog.info(tabulate.tabulate(data=materialData, headers=["Material Name", "Source Location"], tableFmt="armi")) return materialData ================================================ FILE: armi/reactor/blueprints/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/reactor/blueprints/tests/test_assemblyBlueprints.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Assembly Blueprints.""" import unittest from armi import settings from armi.reactor import blueprints class TestMaterialModifications(unittest.TestCase): twoBlockInput_correct = r""" nuclide flags: U: {burn: false, xs: true} ZR: {burn: false, xs: true} blocks: fuel: &block_fuel fuel1: &component_fuel_fuel1 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 fuel2: &component_fuel_fuel2 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 assemblies: fuel a: &assembly_a specifier: IC blocks: [*block_fuel, *block_fuel] height: [1.0, 1.0] axial mesh points: [1, 1] xs types: [A, A] """ twoBlockInput_wrongMeshPoints = r""" nuclide flags: U: {burn: false, xs: true} ZR: {burn: false, xs: true} blocks: fuel: &block_fuel fuel1: &component_fuel_fuel1 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 fuel2: &component_fuel_fuel2 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 assemblies: fuel a: &assembly_a specifier: IC blocks: [*block_fuel, *block_fuel] height: [1.0, 1.0] axial mesh points: [1] xs types: [A, A] """ twoBlockInput_wrongHeights = r""" nuclide flags: U: {burn: false, xs: true} ZR: {burn: false, xs: true} blocks: fuel: &block_fuel fuel1: &component_fuel_fuel1 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 fuel2: &component_fuel_fuel2 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 assemblies: fuel a: &assembly_a specifier: IC blocks: [*block_fuel, *block_fuel] height: [1.0] axial mesh points: [1, 1] xs types: [A, A] """ twoBlockInput_wrongXSTypes = r""" nuclide flags: U: {burn: false, xs: true} ZR: {burn: false, xs: true} blocks: fuel: &block_fuel fuel1: &component_fuel_fuel1 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 fuel2: &component_fuel_fuel2 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 assemblies: fuel a: &assembly_a specifier: IC blocks: [*block_fuel, *block_fuel] height: [1.0, 1.0] axial mesh points: [1, 1] xs types: [A] """ twoBlockInput_wrongMatMods = r""" nuclide flags: U: {burn: false, xs: true} ZR: {burn: false, xs: true} blocks: fuel: &block_fuel fuel1: &component_fuel_fuel1 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 fuel2: &component_fuel_fuel2 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 assemblies: fuel a: &assembly_a specifier: IC blocks: [*block_fuel, *block_fuel] height: [1.0, 1.0] axial mesh points: [1, 1] xs types: [A, A] material modifications: U235_wt_frac: [0.5] """ def loadCustomAssembly(self, assemblyInput): yamlString = assemblyInput design = blueprints.Blueprints.load(yamlString) design._prepConstruction(settings.Settings()) return design.assemblies["fuel a"] def test_checkParamConsistency(self): """ Load assembly from a blueprint file. .. test:: Create assembly from blueprint file. :id: T_ARMI_BP_ASSEM :tests: R_ARMI_BP_ASSEM """ # make sure a good example doesn't error a = self.loadCustomAssembly(self.twoBlockInput_correct) blockAxialMesh = a.getAxialMesh() blockXSTypes = [a[0].p.xsType, a[1].p.xsType] self.assertAlmostEqual(blockAxialMesh, [1.0, 2.0]) self.assertEqual(blockXSTypes, ["A", "A"]) with self.assertRaises(ValueError): a = self.loadCustomAssembly(self.twoBlockInput_wrongMeshPoints) with self.assertRaises(ValueError): a = self.loadCustomAssembly(self.twoBlockInput_wrongHeights) with self.assertRaises(ValueError): a = self.loadCustomAssembly(self.twoBlockInput_wrongXSTypes) with self.assertRaises(ValueError): a = self.loadCustomAssembly(self.twoBlockInput_wrongMatMods) ================================================ FILE: armi/reactor/blueprints/tests/test_blockBlueprints.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for block blueprints.""" import io import unittest from armi import settings from armi.reactor import blueprints from armi.reactor.flags import Flags from armi.reactor.tests import test_blocks FULL_BP = """ blocks: fuel: &block_fuel grid name: fuelgrid fuel: shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 od: 0.7 latticeIDs: [1] clad: # same args as test_blocks (except mult) shape: Circle material: HT9 Tinput: 25.0 Thot: 450.0 id: .77 od: .80 latticeIDs: [1,2] coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 16.0 mult: 1.0 op: 16.6 intercoolant: shape: Hexagon material: Sodium Tinput: 450.0 Thot: 450.0 ip: duct.op mult: 1.0 op: 16.75 other fuel: &block_fuel_other grid name: fuelgrid flags: fuel test depletable fuel: shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 od: 0.67 latticeIDs: [1] clad: shape: Circle material: HT9 Tinput: 25.0 Thot: 450.0 id: .77 od: .80 latticeIDs: [1,2] coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 16.0 mult: 1.0 op: 16.6 intercoolant: shape: Hexagon material: Sodium Tinput: 450.0 Thot: 450.0 ip: duct.op mult: 1.0 op: 16.75 assemblies: fuel: specifier: IC blocks: [*block_fuel, *block_fuel_other] height: [25.0, 25.0] axial mesh points: [1, 1] material modifications: U235_wt_frac: [0.11, 0.11] ZR_wt_frac: [0.06, 0.06] xs types: [A, A] fuel other: flags: fuel test specifier: ID blocks: [*block_fuel, *block_fuel_other] height: [25.0, 25.0] axial mesh points: [1, 1] material modifications: U235_wt_frac: [0.11, 0.11] ZR_wt_frac: [0.06, 0.06] xs types: [A, A] grids: fuelgrid: geom: hex_corners_up symmetry: full lattice map: | - - - 1 1 1 1 - - 1 1 2 1 1 - 1 1 1 1 1 1 1 2 1 2 1 2 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 """ FULL_BP_ERRANT_ID = ( FULL_BP.split("lattice map:")[0] + """lattice map: | - - - 1 1 1 1 - - 1 1 1 1 1 - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 """ ) FULL_BP_NO_COMP = ( FULL_BP.split("lattice map:")[0] + """lattice map: | - - - 1 1 1 1 - - 1 1 1 1 1 - 1 1 1 1 1 1 1 3 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 """ ) FULL_BP_GRID = ( FULL_BP.split("lattice map:")[0] + """grid contents: ? - -3 - 3 : '1' ? - -2 - 3 : '1' ? - -1 - 3 : '1' ? - 0 - 3 : '1' ? - -3 - 2 : '1' ? - -2 - 2 : '1' ? - -1 - 2 : '2' ? - 0 - 2 : '1' ? - 1 - 2 : '1' ? - -3 - 1 : '1' ? - -2 - 1 : '1' ? - -1 - 1 : '1' ? - 0 - 1 : '1' ? - 1 - 1 : '1' ? - 2 - 1 : '1' ? - -3 - 0 : '1' ? - -2 - 0 : '3' ? - -1 - 0 : '1' ? - 0 - 0 : '2' ? - 1 - 0 : '1' ? - 2 - 0 : '3' ? - 3 - 0 : '1' ? - -2 - -1 : '1' ? - -1 - -1 : '1' ? - 0 - -1 : '1' ? - 1 - -1 : '1' ? - 2 - -1 : '1' ? - 3 - -1 : '1' ? - -1 - -2 : '1' ? - 0 - -2 : '1' ? - 1 - -2 : '2' ? - 2 - -2 : '1' ? - 3 - -2 : '1' ? - 0 - -3 : '1' ? - 1 - -3 : '1' ? - 2 - -3 : '1' ? - 3 - -3 : '1' """ ) class TestGriddedBlock(unittest.TestCase): """Tests for a block that has components in a lattice.""" def setUp(self): self.cs = settings.Settings() with io.StringIO(FULL_BP) as stream: self.blueprints = blueprints.Blueprints.load(stream) self.blueprints._prepConstruction(self.cs) def test_constructSpatialGrid(self): """Test intermediate grid construction function.""" bDesign = self.blueprints.blockDesigns["fuel"] gridDesign = bDesign._getGridDesign(self.blueprints) self.assertEqual(gridDesign.gridContents[0, 0], "2") def test_getLocatorsAtLatticePositions(self): """Ensure extraction of specifiers results in locators.""" bDesign = self.blueprints.blockDesigns["fuel"] gridDesign = bDesign._getGridDesign(self.blueprints) grid = gridDesign.construct() locators = gridDesign.getLocators(grid, ["2"]) self.assertEqual(len(locators), 5) self.assertIs(grid[locators[0].getCompleteIndices()], locators[0]) def test_blockLattice(self): """Make sure constructing a block with grid specifiers works as a whole. .. test:: Create block with blueprint file. :id: T_ARMI_BP_BLOCK :tests: R_ARMI_BP_BLOCK """ aDesign = self.blueprints.assemDesigns.bySpecifier["IC"] a = aDesign.construct(self.cs, self.blueprints) fuelBlock = a.getFirstBlock(Flags.FUEL) fuel = fuelBlock.getComponent(Flags.FUEL) self.assertTrue(fuel.spatialLocator) seen = False for locator in fuel.spatialLocator: if locator == (1, 0, 0): seen = True self.assertTrue(seen) def test_componentsNotInLattice(self): """ Ensure that we catch cases when a component is expected to be in the grid, but is not. In this case, latticeID "2" is not in the lattice. """ with self.assertRaises(ValueError) as ee: with io.StringIO(FULL_BP_ERRANT_ID) as stream: self.blueprints = blueprints.Blueprints.load(stream) self.blueprints._prepConstruction(self.cs) self.assertIn( "Check that the component's latticeIDs align with the block's grid.", ee.args[0], ) def test_latticeNotInComponents(self): """ Ensure that we catch cases when a latticeID listed in the grid is not present in any of the components on the block. In this case, latticeID "2" is not in the lattice. """ with self.assertRaises(ValueError) as ee: with io.StringIO(FULL_BP_NO_COMP) as stream: self.blueprints = blueprints.Blueprints.load(stream) self.blueprints._prepConstruction(self.cs) self.assertIn( "All IDs in the grid must appear in at least one component.", ee.args[0], ) def test_nonLatticeComponentHasRightMult(self): """Make sure non-grid components in blocks with grids get the right multiplicity.""" aDesign = self.blueprints.assemDesigns.bySpecifier["IC"] a = aDesign.construct(self.cs, self.blueprints) fuelBlock = a.getFirstBlock(Flags.FUEL) duct = fuelBlock.getComponent(Flags.DUCT) self.assertEqual(duct.getDimension("mult"), 1.0) def test_explicitFlags(self): """ Test flags are created from blueprint file. .. test:: Nuc flags can define depletable objects. :id: T_ARMI_BP_NUC_FLAGS0 :tests: R_ARMI_BP_NUC_FLAGS """ a1 = self.blueprints.assemDesigns.bySpecifier["IC"].construct(self.cs, self.blueprints) b1 = a1[0] b2 = a1[1] a2 = self.blueprints.assemDesigns.bySpecifier["ID"].construct(self.cs, self.blueprints) self.assertTrue(b1.hasFlags(Flags.FUEL, exact=True)) self.assertTrue(b2.hasFlags(Flags.FUEL | Flags.TEST | Flags.DEPLETABLE, exact=True)) self.assertEqual(a1.p.flags, Flags.FUEL) self.assertTrue(a1.hasFlags(Flags.FUEL, exact=True)) self.assertTrue(a2.hasFlags(Flags.FUEL | Flags.TEST, exact=True)) def test_densConsistentCompConstructor(self): a1 = self.blueprints.assemDesigns.bySpecifier["IC"].construct(self.cs, self.blueprints) fuelBlock = a1[0] clad = fuelBlock.getComponent(Flags.CLAD) # now construct clad programmatically like in test_Blocks programmaticBlock = test_blocks.buildSimpleFuelBlock() programaticClad = programmaticBlock.getComponent(Flags.CLAD) self.assertAlmostEqual( clad.density(), clad.material.density(Tc=clad.temperatureInC), ) self.assertAlmostEqual( clad.density(), programaticClad.density(), ) ================================================ FILE: armi/reactor/blueprints/tests/test_blueprints.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the blueprints (loading input) file.""" import io import os import pathlib import unittest import yamlize from armi import settings from armi.nucDirectory.nuclideBases import NuclideBases from armi.physics.neutronics.settings import CONF_XS_KERNEL from armi.reactor import blueprints, parameters from armi.reactor.blueprints.componentBlueprint import ComponentBlueprint from armi.reactor.blueprints.gridBlueprint import saveToStream from armi.reactor.blueprints.isotopicOptions import CustomIsotopics, NuclideFlags from armi.reactor.flags import Flags from armi.settings.fwSettings.globalSettings import CONF_INPUT_HEIGHTS_HOT from armi.tests import TEST_ROOT from armi.utils import directoryChangers, textProcessors class TestBlueprints(unittest.TestCase): """Test that the basic functionality of faithfully receiving user input to construct ARMI data model objects works as expected. Try to ensure you test for ideas and not exact matches here, to make the tests more robust. """ @classmethod def setUpClass(cls): cls.cs = settings.Settings() cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT) cls.directoryChanger.open() y = textProcessors.resolveMarkupInclusions(pathlib.Path(os.getcwd()) / "refSmallReactor.yaml") cls.blueprints = blueprints.Blueprints.load(y) cls.blueprints._prepConstruction(cls.cs) @classmethod def tearDownClass(cls): cls.directoryChanger.close() @staticmethod def __stubify(latticeMap): """Little helper method to allow lattie maps to be compared free of whitespace.""" return latticeMap.replace(" ", "").replace("-", "").replace("\n", "") def test_roundTripCompleteBP(self): """Test the round-tip of reading and writing blueprint files. .. test:: Validates the round trip of reading and writing blueprints. :id: T_ARMI_BP_TO_DB1 :tests: R_ARMI_BP_TO_DB """ # the correct lattice map latticeMap = """- - SH - SH SH - SH OC SH SH OC OC SH OC IC OC SH OC IC IC OC SH IC IC IC OC SH IC IC PC OC SH IC PC IC IC OC SH LA IC IC IC OC IC IC IC IC SH IC LB IC IC OC IC IC PC IC SH LA IC IC OC IC IC IC IC SH IC IC IC OC IC IC IC PC SH""" latticeMap = self.__stubify(latticeMap) # validate some core elements from the blueprints self.assertEqual(self.blueprints.gridDesigns["core"].symmetry, "third periodic") map0 = self.__stubify(self.blueprints.gridDesigns["core"].latticeMap) self.assertEqual(map0, latticeMap) # save the blueprint to a stream stream = io.StringIO() stream.seek(0) self.blueprints.dump(self.blueprints) saveToStream(stream, self.blueprints, True, True) stream.seek(0) with directoryChangers.TemporaryDirectoryChanger(): # save the stream to a file filePath = "test_roundTripCompleteBP.yaml" with open(filePath, "w") as fout: fout.write(stream.read()) # load the blueprint from that file again bp = blueprints.Blueprints.load(open(filePath, "r").read()) # re-validate some core elements from the blueprints self.assertEqual(bp.gridDesigns["core"].symmetry, "third periodic") map1 = self.__stubify(bp.gridDesigns["core"].latticeMap) self.assertEqual(map1, latticeMap) def test_nuclides(self): """Tests the available sets of nuclides work as expected.""" actives = set(self.blueprints.activeNuclides) inerts = set(self.blueprints.inertNuclides) self.assertEqual(actives.union(inerts), set(self.blueprints.allNuclidesInProblem)) self.assertEqual(actives.intersection(inerts), set()) def test_getAssemblyTypeBySpecifier(self): aDesign = self.blueprints.assemDesigns.bySpecifier["IC"] self.assertEqual(aDesign.name, "igniter fuel") self.assertEqual(aDesign.specifier, "IC") def test_specialIsotopicVectors(self): mox = self.blueprints.customIsotopics["MOX"] allNucsInProblem = set(self.blueprints.allNuclidesInProblem) for a in mox.keys(): self.assertIn(a, allNucsInProblem) self.assertIn("U235", mox) self.assertAlmostEqual(mox["PU239"], 0.00286038) def test_componentDimensions(self): """Tests that the user can specify the dimensions of a component with arbitrary fidelity. .. test:: A component can be correctly created from a blueprint file. :id: T_ARMI_BP_COMP :tests: R_ARMI_BP_COMP """ fuelAssem = self.blueprints.constructAssem(self.cs, name="igniter fuel") fuel = fuelAssem.getComponents(Flags.FUEL)[0] self.assertAlmostEqual(fuel.getDimension("od", cold=True), 0.86602) self.assertAlmostEqual(fuel.getDimension("id", cold=True), 0.0) self.assertAlmostEqual(fuel.getDimension("od"), 0.87763665, 4) self.assertAlmostEqual(fuel.getDimension("id"), 0.0) self.assertAlmostEqual(fuel.getDimension("mult"), 169) def test_traceNuclides(self): """Ensure that armi.reactor.blueprints.componentBlueprint.insertDepletableNuclideKeys runs. .. test:: Users marking components as depletable will affect number densities. :id: T_ARMI_BP_NUC_FLAGS1 :tests: R_ARMI_BP_NUC_FLAGS """ fuel = ( self.blueprints.constructAssem(self.cs, "igniter fuel").getFirstBlock(Flags.FUEL).getComponent(Flags.FUEL) ) self.assertIn("AM241", fuel.getNuclides()) self.assertLess(fuel.getNumberDensity("AM241"), 1e-5) class TestBlueprintsSchema(unittest.TestCase): """Test the blueprint schema checks.""" _yamlString = r"""blocks: fuel: &block_fuel fuel: &component_fuel_fuel shape: Hexagon material: UZr Tinput: 25.0 Thot: 600.0 ip: 0.0 mult: 1.0 op: 10.0 fuel2: &block_fuel2 group1: shape: Group duct: shape: Hexagon material: UZr Tinput: 25.0 Thot: 600.0 ip: 9.0 mult: 1.0 op: 10.0 matrix: shape: DerivedShape material: Graphite Tinput: 25.0 Thot: 600.0 components: freefuel: shape: Sphere material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 mult: 1.0 od: 4.0 freeclad: shape: Sphere material: HT9 Tinput: 25.0 Thot: 600.0 id: 4.0 mult: 1.0 od: 4.1 component groups: group1: freefuel: mult: 1.0 freeclad: mult: 1.0 assemblies: fuel a: &assembly_a specifier: IC blocks: [*block_fuel] height: [1.0] axial mesh points: [1] xs types: [A] fuel b: <<: *assembly_a hotChannelFactors: Reactor fuel c: &assembly_c specifier: OC blocks: [*block_fuel2] height: [1.0] axial mesh points: [1] xs types: [A] grids: pins: geom: cartesian lattice map: | 2 2 2 2 2 2 1 1 1 2 2 1 3 1 2 2 3 1 1 2 2 2 2 2 2 """ def test_noDuplicateKeysInYamlBlueprints(self): """ Prove that if you duplicate a section of a YAML blueprint file, a hard error will be thrown. """ # loop through a few different sections, to test blueprints broadly sections = ["blocks:", "components:", "component groups:"] for sectionName in sections: # modify blueprint YAML to duplicate this section yamlString = str(self._yamlString) i = yamlString.find(sectionName) lenSection = yamlString[i:].find("\n\n") section = yamlString[i : i + lenSection] yamlString = yamlString[:i] + section + yamlString[i : i + lenSection] # validate that this is now an invalid YAML blueprint with self.assertRaises(Exception): _design = blueprints.Blueprints.load(yamlString) def test_assemblyParameters(self): cs = settings.Settings() design = blueprints.Blueprints.load(self._yamlString) fa = design.constructAssem(cs, name="fuel a") fb = design.constructAssem(cs, name="fuel b") for paramDef in fa.p.paramDefs.inCategory(parameters.Category.assignInBlueprints): # Semantics of __iter__() and items() is different now in the parameter system. We use the parameter # definitions (which have a global-ish sense of `assigned`ness), so we can't tell, per-object, whether # they've been set. self.assertEqual(paramDef.default, fa.p[paramDef.name]) self.assertIn(paramDef.name, fb.p) self.assertEqual(fa.p.hotChannelFactors, "Default") self.assertEqual(fb.p.hotChannelFactors, "Reactor") def test_nuclidesMc2v2(self): """Tests that ZR is not expanded to its isotopics for this setting.""" cs = settings.Settings() newSettings = {CONF_XS_KERNEL: "MC2v2"} cs = cs.modified(newSettings=newSettings) design = blueprints.Blueprints.load(self._yamlString) design._prepConstruction(cs) self.assertTrue(set({"U238", "U235", "ZR"}).issubset(set(design.allNuclidesInProblem))) assem = design.constructAssem(cs, name="fuel a") self.assertTrue(set(assem.getNuclides()).issubset(set(design.allNuclidesInProblem))) def test_nuclidesMc2v3(self): """Tests that ZR is expanded to its isotopics for MC2v3.""" cs = settings.Settings() newSettings = {CONF_XS_KERNEL: "MC2v3"} cs = cs.modified(newSettings=newSettings) design = blueprints.Blueprints.load(self._yamlString) design._prepConstruction(cs) # 93 and 95 are not naturally occurring. zrNucs = {"ZR" + str(A) for A in range(90, 97)} - {"ZR93", "ZR95"} self.assertTrue(set({"U238", "U235"} | zrNucs).issubset(set(design.allNuclidesInProblem))) self.assertTrue(zrNucs.issubset(set(design.inertNuclides))) assem = design.constructAssem(cs, name="fuel a") # the assembly won't get non-naturally occurring nuclides nb = NuclideBases() unnaturalZr = (n.name for n in nb.elements.bySymbol["ZR"].nuclides if n.abundance == 0.0) designNucs = set(design.allNuclidesInProblem).difference(unnaturalZr) self.assertTrue(set(assem.getNuclides()).issubset(designNucs)) def test_merge(self): yamlString = r""" nuclide flags: B10: {burn: true, xs: true} B11: {burn: true, xs: true} DUMP1: {burn: true, xs: true} FE: {burn: true, xs: true} NI: {burn: true, xs: true} C: {burn: true, xs: true} MO: {burn: true, xs: true} SI: {burn: true, xs: true} CR: {burn: true, xs: true} MN: {burn: true, xs: true} NA: {burn: true, xs: true} V: {burn: true, xs: true} W: {burn: true, xs: true} blocks: nomerge block: &unmerged_block A: &comp_a shape: Circle material: B4C Tinput: 50.0 Thot: 500.0 id: 0.0 mult: 1 od: .5 Gap1: &comp_gap shape: Circle material: Void Tinput: 50.0 Thot: 500.0 id: A.od mult: 1 od: B.id B: &gcomp_b shape: Circle material: HT9 Tinput: 20.0 Thot: 600.0 id: .5 mult: 1 od: .75 Gap2: &comp_gap2 shape: Circle material: Void Tinput: 50.0 Thot: 500.0 id: B.od mult: 1 od: Clad.id Clad: &comp_clad shape: Circle material: HT9 Tinput: 20.0 Thot: 700.0 id: .75 mult: 1 od: 1.0 coolant: &comp_coolant shape: DerivedShape material: Sodium Tinput: 600.0 Thot: 600.0 duct: &comp_duct shape: Hexagon material: HT9 Tinput: 20.0 Thot: 500.0 ip: 1.2 mult: 1 op: 1.4 intercoolant: &comp_intercoolant shape: Hexagon material: Sodium Tinput: 500.0 Thot: 500.0 ip: duct.op mult: 1 op: 1.6 merge block: &merged_block A: <<: *comp_a mergeWith: Clad Gap1: *comp_gap B: <<: *gcomp_b mergeWith: Clad Gap2: *comp_gap2 Clad: *comp_clad coolant: *comp_coolant duct: *comp_duct intercoolant: *comp_intercoolant assemblies: a: &assembly_a specifier: IC blocks: [*merged_block, *unmerged_block] height: [1.0, 1.0] axial mesh points: [1, 1] xs types: [A, A] """ bp = blueprints.Blueprints.load(yamlString) a = bp.constructAssem(settings.Settings(), name="a") mergedBlock, unmergedBlock = a self.assertNotIn("A", mergedBlock.getComponentNames()) self.assertNotIn("B", mergedBlock.getComponentNames()) self.assertEqual(len(mergedBlock) + 4, len(unmergedBlock)) self.assertAlmostEqual( sum(c.getArea() for c in mergedBlock), sum(c.getArea() for c in unmergedBlock), ) mergedNucs, unmergedNucs = ( mergedBlock.getNumberDensities(), unmergedBlock.getNumberDensities(), ) errorMessage = "" for nucName in set(unmergedNucs) | set(mergedNucs): n1, n2 = unmergedNucs[nucName], mergedNucs[nucName] try: self.assertAlmostEqual(n1, n2) except AssertionError: errorMessage += "\nnuc {} not equal. unmerged: {} merged: {}".format(nucName, n1, n2) self.assertTrue(not errorMessage, errorMessage) self.assertAlmostEqual(mergedBlock.getMass(), unmergedBlock.getMass()) def test_nuclideFlags(self): with self.assertRaises(yamlize.YamlizingError): NuclideFlags.load("{potato: {burn: true, xs: true}}") with self.assertRaises(yamlize.YamlizingError): NuclideFlags.load("{U238: {burn: 12, xs: 0}}") def test_customIsotopics(self): with self.assertRaises(yamlize.YamlizingError): CustomIsotopics.load("MOX: {input format: applesauce}") with self.assertRaises(yamlize.YamlizingError): CustomIsotopics.load("MOX: {input format: number densities, density: -0.1}") with self.assertRaises(yamlize.YamlizingError): CustomIsotopics.load("MOX: {input format: number densities, density: 1.5, FAKENUC234: 0.000286}") def test_components(self): bads = [ # bad shape { "shape": "potato", "name": "name", "material": "HT9", "Tinput": 1.0, "Thot": 1.0, }, # bad merge { "shape": "circle", "name": "name", "material": "HT9", "Tinput": 1.0, "Thot": 1.0, "mergeWith": 6, }, # bad isotopics { "shape": "circle", "name": "name", "material": "HT9", "Tinput": 1.0, "Thot": 1.0, "isotopics": 4, }, # bad key { "shape": "circle", "name": "name", "material": "HT9", "Tinput": 1.0, "Thot": 1.0, 5: "od", }, # bad linked dimension { "shape": "circle", "name": "name", "material": "HT9", "Tinput": 1.0, "Thot": 1.0, "mult": "potato,mult", }, ] for bad in bads: with self.assertRaises(yamlize.YamlizingError): ComponentBlueprint.load(repr(bad)) def test_cladding_invalid(self): """Make sure cladding input components are flagged as invalid.""" bad = { "name": "cladding", "shape": "Circle", "material": "HT9", "Tinput": 1.0, "Thot": 1.0, } with self.assertRaises(yamlize.YamlizingError): ComponentBlueprint.load(repr(bad)) def test_withoutBlocks(self): # Some projects use a script to generate an input that has completely unique blocks, # so the blocks: section is not needed yamlWithoutBlocks = """ nuclide flags: U238: {burn: true, xs: true} U235: {burn: true, xs: true} LFP35: {burn: true, xs: true} U236: {burn: true, xs: true} PU239: {burn: true, xs: true} DUMP2: {burn: true, xs: true} DUMP1: {burn: true, xs: true} NP237: {burn: true, xs: true} PU238: {burn: true, xs: true} PU236: {burn: true, xs: true} LFP39: {burn: true, xs: true} PU238: {burn: true, xs: true} LFP40: {burn: true, xs: true} PU241: {burn: true, xs: true} LFP38: {burn: true, xs: true} U234: {burn: true, xs: true} AM241: {burn: true, xs: true} LFP41: {burn: true, xs: true} PU242: {burn: true, xs: true} AM243: {burn: true, xs: true} CM244: {burn: true, xs: true} CM242: {burn: true, xs: true} AM242: {burn: true, xs: true} PU240: {burn: true, xs: true} CM245: {burn: true, xs: true} NP238: {burn: true, xs: true} CM243: {burn: true, xs: true} CM246: {burn: true, xs: true} CM247: {burn: true, xs: true} ZR: {burn: false, xs: true} assemblies: fuel a: &assembly_a specifier: FF blocks: - { name: fuel, fuel: { shape: Hexagon, material: UZr, Tinput: 25.0, Thot: 600.0, ip: 0.0, mult: 1.0, op: 10.0} } height: [1.0] axial mesh points: [1] xs types: [A] fuel b: <<: *assembly_a specifier: IF """ cs = settings.Settings() design = blueprints.Blueprints.load(yamlWithoutBlocks) design.constructAssem(cs, name="fuel a") fa = design.constructAssem(cs, name="fuel a") fb = design.constructAssem(cs, name="fuel b") for a in (fa, fb): self.assertEqual(1, len(a)) self.assertEqual(1, len(a[0])) def test_topLevelComponentInput(self): """ Make sure components defined at the top level are loaded. Components can be loaded either within the block blueprint or on their own outside of blocks. This checks the latter form. We specified a 3D component in the test input (sphere) so that it has a height and therefore a volume without requiring a parent. """ cs = settings.Settings() design = blueprints.Blueprints.load(self._yamlString) # The following is needed to prep customisotopics # which is required during construction of a component design._resolveNuclides(cs) componentDesign = design.componentDesigns["freefuel"] topComponent = componentDesign.construct(design, {}, cs[CONF_INPUT_HEIGHTS_HOT]) self.assertEqual(topComponent.getDimension("od", cold=True), 4.0) self.assertGreater(topComponent.getVolume(), 0.0) self.assertGreater(topComponent.getMass("U235"), 0.0) def test_componentGroupInput(self): """Make sure component groups can be input in blueprints.""" design = blueprints.Blueprints.load(self._yamlString) componentGroup = design.componentGroups["group1"] self.assertEqual(componentGroup["freefuel"].name, "freefuel") self.assertEqual(componentGroup["freefuel"].mult, 1.0) ================================================ FILE: armi/reactor/blueprints/tests/test_componentBlueprint.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for testing componentBlueprint.""" import inspect import unittest from armi import settings from armi.reactor import blueprints from armi.reactor.flags import Flags class TestComponentBlueprint(unittest.TestCase): componentString = r""" blocks: block: &block component: flags: {flags} shape: Hexagon material: {material} # This is being used to format a string to allow for different materials to be added {isotopics} # This is being used to format a string to allow for different isotopics to be added Tinput: 25.0 Thot: 600.0 ip: 0.0 mult: 169.0 op: 0.86602 assemblies: assembly: &assembly_a specifier: IC blocks: [*block] height: [1.0] axial mesh points: [1] xs types: [A] """ def test_compInitIncompleteBurnChain(self): nuclideFlagsFuelWithBurn = ( inspect.cleandoc( r""" nuclide flags: U238: {burn: true, xs: true} U235: {burn: true, xs: true} ZR: {burn: false, xs: true} """ ) + "\n" ) bp = blueprints.Blueprints.load( nuclideFlagsFuelWithBurn + self.componentString.format(material="UZr", isotopics="", flags="") ) cs = settings.Settings() with self.assertRaises(ValueError): bp.constructAssem(cs, "assembly") def test_compInitControlCustomIso(self): nuclideFlags = ( inspect.cleandoc( """ nuclide flags: U234: {burn: true, xs: true} U235: {burn: true, xs: true} U238: {burn: true, xs: true} B10: {burn: true, xs: true} B11: {burn: true, xs: true} C: {burn: true, xs: true} DUMP1: {burn: true, xs: true} custom isotopics: B4C: input format: number densities B10: 1.0 B11: 1.0 C: 1.0 """ ) + "\n" ) bp = blueprints.Blueprints.load( nuclideFlags + self.componentString.format(material="Custom", isotopics="isotopics: B4C", flags="") ) cs = settings.Settings() _ = bp.constructAssem(cs, "assembly") def test_autoDepletable(self): nuclideFlags = ( inspect.cleandoc( """ nuclide flags: U234: {burn: true, xs: true} U235: {burn: true, xs: true} U238: {burn: true, xs: true} B10: {burn: true, xs: true} B11: {burn: true, xs: true} C: {burn: true, xs: true} DUMP1: {burn: true, xs: true} custom isotopics: B4C: input format: number densities B10: 1.0 B11: 1.0 C: 1.0 """ ) + "\n" ) bp = blueprints.Blueprints.load( nuclideFlags + self.componentString.format(material="Custom", isotopics="isotopics: B4C", flags="") ) cs = settings.Settings() a = bp.constructAssem(cs, "assembly") expectedNuclides = ["B10", "B11", "C", "DUMP1"] unexpectedNuclides = ["U234", "U325", "U238"] for nuc in expectedNuclides: self.assertIn(nuc, a[0][0].getNuclides()) for nuc in unexpectedNuclides: self.assertNotIn(nuc, a[0][0].getNuclides()) c = a[0][0] # Since we didn't supply flags, we should get the DEPLETABLE flag added # automatically, since this one has depletable nuclides self.assertEqual(c.p.flags, Flags.DEPLETABLE) # More robust test, but worse unittest.py output when it fails self.assertTrue(c.hasFlags(Flags.DEPLETABLE)) # repeat the process with some flags set explicitly bp = blueprints.Blueprints.load( nuclideFlags + self.componentString.format(material="Custom", isotopics="isotopics: B4C", flags="fuel test") ) cs = settings.Settings() a = bp.constructAssem(cs, "assembly") c = a[0][0] # Since we supplied flags, we should NOT get the DEPLETABLE flag added self.assertEqual(c.p.flags, Flags.FUEL | Flags.TEST) # More robust test, but worse unittest.py output when it fails self.assertTrue(c.hasFlags(Flags.FUEL | Flags.TEST)) def test_compInitAmericiumCustomIso(self): nuclideFlags = ( inspect.cleandoc( r""" nuclide flags: CM242: {burn: true, xs: true} PU241: {burn: true, xs: true} AM242G: {burn: true, xs: true} AM242M: {burn: true, xs: true} AM241: {burn: true, xs: true} LFP41: {burn: true, xs: true} PU240: {burn: true, xs: true} AM243: {burn: true, xs: true} NP238: {burn: true, xs: true} PU242: {burn: true, xs: true} CM243: {burn: true, xs: true} PU238: {burn: true, xs: true} DUMP2: {burn: true, xs: true} DUMP1: {burn: true, xs: true} U238: {burn: true, xs: true} CM244: {burn: true, xs: true} LFP40: {burn: true, xs: true} U236: {burn: true, xs: true} PU236: {burn: true, xs: true} U234: {burn: true, xs: true} CM245: {burn: true, xs: true} PU239: {burn: true, xs: true} NP237: {burn: true, xs: true} U235: {burn: true, xs: true} LFP39: {burn: true, xs: true} LFP35: {burn: true, xs: true} LFP38: {burn: true, xs: true} CM246: {burn: true, xs: true} CM247: {burn: true, xs: true} B10: {burn: true, xs: true} B11: {burn: true, xs: true} W186: {burn: true, xs: true} C: {burn: true, xs: true} S: {burn: true, xs: true} P: {burn: true, xs: true} custom isotopics: AM: input format: number densities AM241: 1.0 """ ) + "\n" ) bp = blueprints.Blueprints.load( nuclideFlags + self.componentString.format(material="Custom", isotopics="isotopics: AM", flags="") ) cs = settings.Settings() a = bp.constructAssem(cs, "assembly") expectedNuclides = [ "AM241", "U238", "AM243", "AM242M", "NP237", "NP238", "U234", "U235", "LFP38", "LFP39", "PU239", "PU238", "LFP35", "U236", "CM247", "CM246", "CM245", "CM244", "PU240", "PU241", "PU242", "PU236", "CM243", "CM242", "DUMP2", "LFP41", "LFP40", ] unexpectedNuclides = ["B10", "B11", "W186", "C", "S", "P"] for nuc in expectedNuclides: self.assertIn(nuc, a[0][0].getNuclides()) for nuc in unexpectedNuclides: self.assertNotIn(nuc, a[0][0].getNuclides()) def test_compInitThoriumBurnCustomIso(self): nuclideFlags = ( inspect.cleandoc( r""" nuclide flags: TH232: {burn: true, xs: true} PA233: {burn: true, xs: true} PA231: {burn: true, xs: true} U232: {burn: true, xs: true} U233: {burn: true, xs: true} CM242: {burn: true, xs: true} PU241: {burn: true, xs: true} AM242G: {burn: true, xs: true} AM242M: {burn: true, xs: true} AM241: {burn: true, xs: true} LFP41: {burn: true, xs: true} PU240: {burn: true, xs: true} AM243: {burn: true, xs: true} NP238: {burn: true, xs: true} PU242: {burn: true, xs: true} CM243: {burn: true, xs: true} PU238: {burn: true, xs: true} DUMP2: {burn: true, xs: true} DUMP1: {burn: true, xs: true} U238: {burn: true, xs: true} CM244: {burn: true, xs: true} LFP40: {burn: true, xs: true} U236: {burn: true, xs: true} PU236: {burn: true, xs: true} U234: {burn: true, xs: true} CM245: {burn: true, xs: true} PU239: {burn: true, xs: true} NP237: {burn: true, xs: true} U235: {burn: true, xs: true} LFP39: {burn: true, xs: true} LFP35: {burn: true, xs: true} LFP38: {burn: true, xs: true} CM246: {burn: true, xs: true} CM247: {burn: true, xs: true} custom isotopics: Thorium: input format: number densities TH232: 1.0 """ ) + "\n" ) bp = blueprints.Blueprints.load( nuclideFlags + self.componentString.format(material="Custom", isotopics="isotopics: Thorium", flags="") ) cs = settings.Settings() a = bp.constructAssem(cs, "assembly") expectedNuclides = ["TH232", "PA233", "PA231", "DUMP2", "LFP35"] for nuc in expectedNuclides: self.assertIn(nuc, a[0][0].getNuclides()) def test_compInitThoriumNoBurnCustomIso(self): nuclideFlags = ( inspect.cleandoc( r""" nuclide flags: TH232: {burn: false, xs: true} custom isotopics: Thorium: input format: number densities TH232: 1.0 """ ) + "\n" ) bp = blueprints.Blueprints.load( nuclideFlags + self.componentString.format(material="Custom", isotopics="isotopics: Thorium", flags="") ) cs = settings.Settings() a = bp.constructAssem(cs, "assembly") expectedNuclides = ["TH232"] for nuc in expectedNuclides: self.assertIn(nuc, a[0][0].getNuclides()) ================================================ FILE: armi/reactor/blueprints/tests/test_customIsotopics.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit test custom isotopics.""" import unittest from logging import DEBUG import numpy as np import yamlize from armi import runLog, settings from armi.materials import Fluid, Sodium from armi.physics.neutronics.settings import ( CONF_MCNP_LIB_BASE, CONF_NEUTRONICS_KERNEL, CONF_XS_KERNEL, ) from armi.reactor import blueprints from armi.reactor.blueprints import isotopicOptions from armi.reactor.flags import Flags from armi.tests import mockRunLogs from armi.utils.customExceptions import InputError from armi.utils.directoryChangers import TemporaryDirectoryChanger class TestCustomIsotopics(unittest.TestCase): yamlPreamble = r""" nuclide flags: U238: {burn: true, xs: true} U235: {burn: true, xs: true} U234: {burn: true, xs: true} ZR: {burn: false, xs: true} AL: {burn: false, xs: true} FE: {burn: false, xs: true} C: {burn: false, xs: true} NA: {burn: false, xs: true} DUMP2: {burn: true, xs: true} DUMP1: {burn: true, xs: true} LFP35: {burn: true, xs: true} PU239: {burn: true, xs: true} NP237: {burn: true, xs: true} LFP38: {burn: true, xs: true} LFP39: {burn: true, xs: true} PU240: {burn: true, xs: true} PU236: {burn: true, xs: true} PU238: {burn: true, xs: true} U236: {burn: true, xs: true} LFP40: {burn: true, xs: true} PU241: {burn: true, xs: true} AM241: {burn: true, xs: true} LFP41: {burn: true, xs: true} PU242: {burn: true, xs: true} AM243: {burn: true, xs: true} CM244: {burn: true, xs: true} CM242: {burn: true, xs: true} AM242: {burn: true, xs: true} CM245: {burn: true, xs: true} NP238: {burn: true, xs: true} CM243: {burn: true, xs: true} CM246: {burn: true, xs: true} CM247: {burn: true, xs: true} NI: {burn: true, xs: true} W: {burn: true, xs: true, expandTo: ["W182", "W183", "W184", "W186"]} MN: {burn: true, xs: true} CR: {burn: true, xs: true} V: {burn: true, xs: true} SI: {burn: true, xs: true} MO: {burn: true, xs: true} custom isotopics: uranium isotopic mass fractions: input format: mass fractions U238: 0.992742 U235: 0.007204 U234: 0.000054 density: 19.1 uranium isotopic number fractions: input format: number fractions U238: 0.992650 U235: 0.007295 U234: 0.000055 density: 19.1 uranium isotopic number densities: &u_isotopics input format: number densities U234: 2.6539102e-06 U235: 3.5254048e-04 U238: 4.7967943e-02 bad uranium isotopic mass fractions: input format: mass fractions U238: 0.992742 U235: 0.007204 U234: 0.000054 density: 0 negative uranium isotopic mass fractions: input format: mass fractions U238: 0.992742 U235: 0.007204 U234: 0.000054 density: -1 linked uranium number densities: *u_isotopics steel: input format: mass fractions FE: 0.7 C: 0.3 density: 7.0 sodium custom isotopics: input format: mass fractions NA: 1 density: 666 """ yamlGoodBlocks = r""" blocks: uzr fuel: &block_0 fuel: &basic_fuel shape: Hexagon material: UZr Tinput: 25.0 Thot: 600.0 ip: 0.0 mult: 1.0 op: 10.0 clad: shape: Circle material: HT9 Tinput: 25.0 Thot: 600.0 id: 0.0 mult: 1.0 od: 10.0 sodium1: shape: Circle material: Sodium Tinput: 100 Thot: 600 id: 0 mult: 1 od: 1 sodium2: shape: Circle material: Sodium isotopics: sodium custom isotopics Tinput: 100 Thot: 600 id: 0 mult: 1 od: 1 uranium fuel from isotopic mass fractions : &block_1 fuel: <<: *basic_fuel material: Custom isotopics: uranium isotopic mass fractions wrong material: &block_2 fuel: <<: *basic_fuel isotopics: uranium isotopic mass fractions uranium fuel from number fractions: &block_3 fuel: <<: *basic_fuel material: Custom isotopics: uranium isotopic number fractions uranium fuel from number densities: &block_4 fuel: <<: *basic_fuel material: Custom isotopics: uranium isotopic number densities uranium fuel from nd link: &block_5 fuel: <<: *basic_fuel material: Custom isotopics: linked uranium number densities fuel with no modifications: &block_6 # after a custom density has been set fuel: <<: *basic_fuel overspecified fuel: &block_7 fuel: <<: *basic_fuel material: UraniumOxide isotopics: uranium isotopic number densities density set via number density: &block_8 fuel: <<: *basic_fuel isotopics: uranium isotopic number densities steel: &block_9 clad: shape: Hexagon material: Custom isotopics: steel Tinput: 100 Thot: 600.0 ip: 0.0 mult: 169.0 op: 0.86602 assemblies: fuel a: &assembly_a specifier: IC blocks: [*block_0, *block_1, *block_2, *block_3, *block_4, *block_5, *block_6, *block_7, *block_8, *block_9] height: [10, 10, 10, 10, 10, 10, 10, 10, 10, 10] axial mesh points: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] xs types: [A, A, A, A, A, A, A, A, A, A] material modifications: TD_frac: ["", "", "", "", "", "", "", 0.1, "", ""] """ yamlBadBlocks = r""" blocks: uzr fuel: &block_0 fuel: &basic_fuel shape: Hexagon material: UZr Tinput: 100 Thot: 600.0 ip: 0.0 mult: 1.0 op: 10.0 clad: shape: Circle material: HT9 Tinput: 100 Thot: 600.0 id: 0.0 mult: 1.0 od: 10.0 custom void: &block_1 fuel: <<: *basic_fuel material: Void isotopics: uranium isotopic number densities steel: &block_2 clad: shape: Hexagon material: Custom isotopics: steel Tinput: 100 Thot: 600.0 ip: 0.0 mult: 169.0 op: 0.86602 no density uo2: &block_3 fuel: <<: *basic_fuel material: UraniumOxide isotopics: uranium isotopic number densities no density uo2: &block_4 fuel: <<: *basic_fuel material: UraniumOxide isotopics: bad uranium isotopic mass fractions no density uo2: &block_5 fuel: <<: *basic_fuel material: UraniumOxide isotopics: bad uranium isotopic mass fractions assemblies: fuel a: &assembly_a specifier: IC blocks: [*block_0, *block_1, *block_2] height: [10, 10, 10] axial mesh points: [1, 1, 1] xs types: [A, A, A] material modifications: TD_frac: ["", "", ""] fuel b: &assembly_b specifier: IC blocks: [*block_0, *block_3, *block_2] height: [10, 10, 10] axial mesh points: [1, 1, 1] xs types: [A, A, A] material modifications: TD_frac: ["", "0.0", ""] # set density to 0 to cause error in custom density fuel c: &assembly_c specifier: IC blocks: [*block_0, *block_4, *block_2] height: [10, 10, 10] axial mesh points: [1, 1, 1] xs types: [A, A, A] fuel d: &assembly_d specifier: IC blocks: [*block_0, *block_5, *block_2] height: [10, 10, 10] axial mesh points: [1, 1, 1] xs types: [A, A, A] """ # this yaml is supposed to successfully build yamlString = yamlPreamble + yamlGoodBlocks # This yaml is designed to raise an error when built yamlStringWithError = yamlPreamble + yamlBadBlocks """:meta hide-value:""" @classmethod def setUpClass(cls): cs = settings.Settings() cs = cs.modified( newSettings={ CONF_XS_KERNEL: "MC2v2", "inputHeightsConsideredHot": False, } ) cls.bp = blueprints.Blueprints.load(cls.yamlString) cls.a = cls.bp.constructAssem(cs, name="fuel a") cls.numUZrNuclides = 29 # Number of nuclides defined `nuclide flags` cls.numCustomNuclides = 28 # Number of nuclides defined in `nuclide flags` without Zr def test_unmodified(self): """Ensure that unmodified components have the correct isotopics.""" fuel = self.a[0].getComponent(Flags.FUEL) self.assertEqual(self.numUZrNuclides, len(fuel.p.numberDensities)) # NOTE: This density does not come from the material but is based on number densities. self.assertAlmostEqual(15.5, fuel.density(), 0) # i.e. it is not 19.1 def test_massFractionsAreApplied(self): """Ensure that the custom isotopics can be specified via mass fractions. .. test:: Test that custom isotopics can be specified via mass fractions. :id: T_ARMI_MAT_USER_INPUT3 :tests: R_ARMI_MAT_USER_INPUT """ fuel1 = self.a[1].getComponent(Flags.FUEL) fuel2 = self.a[2].getComponent(Flags.FUEL) self.assertEqual(self.numCustomNuclides, len(fuel1.p.numberDensities)) self.assertAlmostEqual(19.1, fuel1.density()) # keys are same keys1 = set([i for i, v in enumerate(fuel1.p.numberDensities) if v == 0.0]) keys2 = set([i for i, v in enumerate(fuel2.p.numberDensities) if v == 0.0]) self.assertEqual(keys1, keys2) def test_densAppliedToNonCustomMats(self): """Ensure that a density can be set in custom isotopics for components using library materials.""" # The template block fuel0 = self.a[0].getComponent(Flags.FUEL) # The block with custom density but not the 'Custom' material fuel2 = self.a[2].getComponent(Flags.FUEL) # A block like the template block, but made after the custom block fuel6 = self.a[6].getComponent(Flags.FUEL) # A block with custom density set via number density fuel8 = self.a[8].getComponent(Flags.FUEL) dLL = fuel2.material.linearExpansionFactor(Tc=600, T0=25) # the exponent here is 3 because inputHeightsConsideredHot = False. # if inputHeightsConsideredHot were True, then we would use a factor of 2 instead f = 1 / ((1 + dLL) ** 3) # Check that the density is set correctly on the custom density block, # and that it is not the same as the original self.assertAlmostEqual(19.1 * f, fuel2.density()) self.assertNotAlmostEqual(fuel0.density(), fuel2.density(), places=2) # Check that the custom density block has the correct material self.assertEqual("UZr", fuel2.material.name) # Check that the block with only number densities set has a new density self.assertAlmostEqual(19.1 * f, fuel8.density()) # original material density should not be changed after setting a custom density component, # so a new block without custom isotopics and density should have the same density as the original self.assertAlmostEqual(fuel6.density(), fuel0.density()) self.assertEqual(fuel6.material.name, fuel0.material.name) self.assertEqual("UZr", fuel0.material.name) def test_densAppliedToNonCustomMatsFluid(self): """ Ensure that a density can be set in custom isotopics for components using library materials, specifically in the case of a fluid component. In this case, inputHeightsConsideredHot does not matter, and the material has a zero dLL value. """ # The template block sodium1 = self.a[0].getComponentByName("sodium1") sodium2 = self.a[0].getComponentByName("sodium2") self.assertEqual(sodium1.material.name, "Sodium") self.assertEqual(sodium2.material.name, "Sodium") self.assertTrue(isinstance(sodium1.material, Fluid)) self.assertTrue(isinstance(sodium2.material, Fluid)) self.assertEqual(sodium1.p.customIsotopicsName, "") self.assertEqual(sodium2.p.customIsotopicsName, "sodium custom isotopics") # show that, even though the two components have the same material class # and the same temperatures, their densities are different self.assertNotEqual(sodium1.density(), sodium2.density()) # show that sodium1 has a density from the material class, while sodium2 # has a density from the blueprint and adjusted from Tinput -> Thot s = Sodium() self.assertAlmostEqual(sodium1.density(), s.density(Tc=600)) self.assertAlmostEqual(sodium2.density(), s.density(Tc=600) * (666 / s.density(Tc=100))) def test_customDensityLogsAndErrors(self): """Test that the right warning messages and errors are emitted when applying custom densities.""" # Check for warnings when specifying both TD_frac and custom isotopics with mockRunLogs.BufferLog() as mockLog: # we should start with a clean slate self.assertEqual("", mockLog.getStdout()) runLog.LOG.startLog("test_customDensityLogsAndErrors") runLog.LOG.setVerbosity(DEBUG) # rebuild the input to capture the logs cs = settings.Settings() cs = cs.modified(newSettings={CONF_XS_KERNEL: "MC2v2"}) bp = blueprints.Blueprints.load(self.yamlString) bp.constructAssem(cs, name="fuel a") # Check for log messages streamVal = mockLog.getStdout() self.assertIn("and a custom isotopic with density", streamVal, msg=streamVal) self.assertIn("Custom isotopics and material modifications have both", streamVal, msg=streamVal) self.assertIn("A custom material density was specified", streamVal, msg=streamVal) self.assertIn( "A custom isotopic with associated density has been specified for non-`Custom`", streamVal, msg=streamVal, ) # Check that assigning a custom density to the Void material fails cs = settings.Settings() cs = cs.modified(newSettings={CONF_XS_KERNEL: "MC2v2"}) bp = blueprints.Blueprints.load(self.yamlStringWithError) # Ensure we have some Void self.assertEqual(bp.blockDesigns["custom void"]["fuel"].material, "Void") # Can't have stuff in Void with self.assertRaises(ValueError): bp.constructAssem(cs, name="fuel a") # Try making a 0 density non-Void material by setting TD_frac to 0.0 with self.assertRaises(ValueError): bp.constructAssem(cs, name="fuel b") # Try making a material with mass fractions with a density of 0 with self.assertRaises(ValueError): bp.constructAssem(cs, name="fuel c") # Try making a material with mass fractions with a negative density with self.assertRaises(ValueError): bp.constructAssem(cs, name="fuel d") def test_numberFractions(self): """Ensure that the custom isotopics can be specified via number fractions. .. test:: Test that custom isotopics can be specified via number fractions. :id: T_ARMI_MAT_USER_INPUT4 :tests: R_ARMI_MAT_USER_INPUT """ # fuel blocks 2 and 4 should be the same, one is defined as mass fractions, and the other as number fractions fuel2 = self.a[1].getComponent(Flags.FUEL) fuel4 = self.a[3].getComponent(Flags.FUEL) self.assertAlmostEqual(fuel2.density(), fuel4.density()) keys2 = set([i for i, v in enumerate(fuel2.p.numberDensities) if v == 0.0]) keys4 = set([i for i, v in enumerate(fuel4.p.numberDensities) if v == 0.0]) self.assertEqual(keys2, keys4) np.testing.assert_almost_equal(fuel2.p.numberDensities, fuel4.p.numberDensities) def test_numberDensities(self): """Ensure that the custom isotopics can be specified via number densities. .. test:: Test that custom isotopics can be specified via number fractions. :id: T_ARMI_MAT_USER_INPUT5 :tests: R_ARMI_MAT_USER_INPUT """ # fuel blocks 2 and 5 should be the same, one is defined as mass fractions, and the other as number densities fuel2 = self.a[1].getComponent(Flags.FUEL) fuel5 = self.a[4].getComponent(Flags.FUEL) self.assertAlmostEqual(fuel2.density(), fuel5.density()) for i, nuc in enumerate(fuel2.p.nuclides): self.assertIn(nuc, fuel5.p.nuclides) j = np.where(fuel5.p.nuclides == nuc)[0][0] self.assertAlmostEqual(fuel2.p.numberDensities[i], fuel5.p.numberDensities[j]) def test_numberDensitiesAnchor(self): fuel4 = self.a[4].getComponent(Flags.FUEL) fuel5 = self.a[5].getComponent(Flags.FUEL) self.assertAlmostEqual(fuel4.density(), fuel5.density()) np.testing.assert_almost_equal(fuel4.p.numberDensities, fuel5.p.numberDensities) def test_expandedNatural(self): cs = settings.Settings() cs = cs.modified(newSettings={CONF_XS_KERNEL: "MC2v3"}) bp = blueprints.Blueprints.load(self.yamlString) a = bp.constructAssem(cs, name="fuel a") b = a[-1] c = b.getComponent(Flags.CLAD) self.assertIn("FE56", c.getNumberDensities()) # natural isotopic self.assertNotIn("FE51", c.getNumberDensities()) # un-natural self.assertNotIn("FE", c.getNumberDensities()) def test_infDiluteAreOnlyNatural(self): """Make sure nuclides specified as In-Problem but not actually in any material are only natural isotopics.""" self.assertIn("AL27", self.bp.allNuclidesInProblem) self.assertNotIn("AL26", self.bp.allNuclidesInProblem) def test_getDefaultNuclideFlags(self): # This is a bit of a silly test. We are checking what is essentially a hard coded dictionary nucDict = isotopicOptions.getDefaultNuclideFlags() entry = {"burn": True, "xs": True, "expandTo": None} self.assertEqual(nucDict["DUMP1"], entry) self.assertEqual(nucDict["CM244"], entry) self.assertEqual(nucDict["LFP38"], entry) entry = {"burn": False, "xs": True, "expandTo": None} self.assertEqual(nucDict["B10"], entry) self.assertEqual(nucDict["NI"], entry) class TestCustomIsotopicsErrors(unittest.TestCase): def test_densityMustBePositive(self): with self.assertRaises(yamlize.YamlizingError): _ = isotopicOptions.CustomIsotopic.load( r""" name: atom repellent input format: mass fractions U234: 2.6539102e-06 U235: 3.5254048e-04 U238: 4.7967943e-02 density: -0.0001 """ ) def test_nonConformantElementName(self): with self.assertRaises(yamlize.YamlizingError): _ = isotopicOptions.CustomIsotopic.load( r""" name: non-upper case input format: number densities Au: 0.01 """ ) def test_numberDensitiesCannotSpecifyDensity(self): with self.assertRaises(yamlize.YamlizingError): _ = isotopicOptions.CustomIsotopic.load( r""" name: over-specified isotopics input format: number densities AU: 0.01 density: 10.0 """ ) class TestIsotopicsMissingData(unittest.TestCase): """Custom materials must define isotopics.""" yamlBlocksBadIsotopics = r""" blocks: steel: &block_0 clad: shape: Hexagon material: Custom #isotopics: sodium custom isotopics Tinput: 25.0 Thot: 600.0 ip: 0.0 mult: 169.0 op: 0.86602 assemblies: fuel a: &assembly_a specifier: IC blocks: [*block_0] height: [10] axial mesh points: [1] xs types: [A] """ def test_customComponentsWithoutComposition(self): cs = settings.Settings() bp = blueprints.Blueprints.load(self.yamlBlocksBadIsotopics) with self.assertRaises(IOError): _a = bp.constructAssem(cs, name="fuel a") class TestNuclideFlagsExpansion(unittest.TestCase): yamlString = r""" nuclide flags: U238: {burn: false, xs: true} U235: {burn: false, xs: true} ZR: {burn: false, xs: true} AL: {burn: false, xs: true} FE: {burn: false, xs: true, expandTo: ["FE54"]} C: {burn: false, xs: true} NI: {burn: true, xs: true} MN: {burn: true, xs: true} CR: {burn: true, xs: true} V: {burn: true, xs: true} SI: {burn: true, xs: true} MO: {burn: true, xs: true} W: {burn: true, xs: true} ZN: {burn: true, xs: true} O: {burn: true, xs: true} blocks: uzr fuel: &block_0 fuel: shape: Hexagon material: UZr Tinput: 25.0 Thot: 600.0 mult: 1.0 op: 10.0 clad: shape: Circle material: HT9 Tinput: 25.0 Thot: 600.0 id: 0.0 mult: 1.0 od: 10.0 dummy: shape: Circle material: ZnO Tinput: 25.0 Thot: 600.0 id: 0.0 mult: 1.0 od: 10.0 assemblies: fuel a: specifier: IC blocks: [*block_0] height: [10] axial mesh points: [1] xs types: [A] """ def test_expandedNatural(self): cs = settings.Settings() cs = cs.modified(newSettings={CONF_XS_KERNEL: "MC2v3"}) bp = blueprints.Blueprints.load(self.yamlString) a = bp.constructAssem(cs, name="fuel a") b = a[-1] c = b.getComponent(Flags.CLAD) nd = c.getNumberDensities() self.assertIn("FE54", nd) # natural isotopic as requested self.assertNotIn("FE56", nd) # natural isotopic not requested self.assertNotIn("FE51", nd) # un-natural self.assertNotIn("FE", nd) def test_eleExpandInfoBasedOnCodeENDF(self): with TemporaryDirectoryChanger(): # Reference elements to expand by library ref_E70_elem = ["C", "V", "ZN"] ref_E71_elem = ["C"] ref_E80_elem = [] # Load settings and set neutronics kernel to MCNP cs = settings.Settings() cs = cs.modified(newSettings={CONF_NEUTRONICS_KERNEL: "MCNP"}) # Set ENDF/B-VII.0 as MCNP cross section library base cs = cs.modified(newSettings={CONF_MCNP_LIB_BASE: "ENDF/B-VII.0"}) eleToKeep, expansions = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs) E70_elem = [x.label for x in eleToKeep] # Set ENDF/B-VII.1 as MCNP cross section library base cs = cs.modified(newSettings={CONF_MCNP_LIB_BASE: "ENDF/B-VII.1"}) eleToKeep, expansions = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs) E71_elem = [x.label for x in eleToKeep] # Set ENDF/B-VIII.0 as MCNP cross section library base cs = cs.modified(newSettings={CONF_MCNP_LIB_BASE: "ENDF/B-VIII.0"}) eleToKeep, expansions = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs) E80_elem = [x.label for x in eleToKeep] # Assert equality of returned elements to reference elements self.assertEqual(sorted(E70_elem), sorted(ref_E70_elem)) self.assertEqual(sorted(E71_elem), sorted(ref_E71_elem)) self.assertEqual(sorted(E80_elem), sorted(ref_E80_elem)) # Disallowed inputs not_allowed = ["ENDF/B-VIIII.0", "ENDF/B-VI.0", "JEFF-3.3"] # Assert raise InputError in case of invalid library setting for x in not_allowed: with self.assertRaises(InputError) as context: cs = cs.modified(newSettings={CONF_MCNP_LIB_BASE: x}) _ = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs) self.assertTrue("Failed to determine nuclides for modeling" in str(context.exception)) ================================================ FILE: armi/reactor/blueprints/tests/test_gridBlueprints.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for grid blueprints.""" import io import os import unittest from armi import configure, isConfigured if not isConfigured(): configure() from armi.reactor.blueprints import Blueprints from armi.reactor.blueprints.gridBlueprint import Grids, Pitch, saveToStream from armi.utils.customExceptions import InputError from armi.utils.directoryChangers import TemporaryDirectoryChanger LATTICE_BLUEPRINT = """ control: geom: hex_corners_up symmetry: full lattice pitch: hex: 1.2 lattice map: | - - - - - - - - - 1 1 1 1 1 1 1 1 1 4 - - - - - - - - 1 1 1 1 1 1 1 1 1 1 1 - - - - - - - 1 8 1 1 1 1 1 1 1 1 1 1 - - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 7 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 6 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 pins: geom: hex symmetry: full lattice pitch: hex: 1.3 lattice map: | - - FP - FP FP - CL CL CL FP FP FP FP FP FP FP FP FP CL CL CL CL FP FP FP FP FP FP FP FP FP CL CL CL CL CL FP FP FP FP FP FP FP FP FP CL CL CL CL FP FP FP FP FP FP FP FP FP CL CL CL FP FP FP sfp: geom: cartesian symmetry: full lattice map: | 2 2 2 2 2 2 1 1 1 2 2 1 3 1 2 2 3 1 1 2 2 2 2 2 2 sfp quarter: geom: cartesian symmetry: quarter through center assembly lattice map: | 2 2 2 2 2 2 1 1 1 2 2 1 3 1 2 2 3 1 1 2 2 2 2 2 2 sfp quarter even: geom: cartesian symmetry: quarter core lattice map: | 2 2 2 2 2 2 1 1 1 2 2 1 3 1 2 2 3 1 1 2 2 2 2 2 2 sfp even: geom: cartesian symmetry: full lattice map: | 1 2 2 2 2 2 1 2 1 1 1 2 1 2 1 4 1 2 1 2 2 1 1 2 1 2 2 2 2 2 1 1 1 1 1 1 """ RZT_BLUEPRINT = """ rzt_core: geom: thetarz symmetry: eighth core periodic grid bounds: r: - 0.0 - 14.2857142857 - 28.5714285714 - 42.8571428571 - 57.1428571429 - 71.4285714286 - 85.7142857143 - 100.001 - 115.001 - 130.001 theta: - 0.0 - 0.11556368446681414 - 0.2311273689343264 - 0.34669105340061696 - 0.43870710999683127 - 0.5542707944631219 - 0.6698344789311578 - 0.7853981633974483 grid contents: [0,0]: assembly1_1 fuel [0,1]: assembly1_2 fuel [0,2]: assembly1_3 fuel [0,3]: assembly1_4 fuel [0,4]: assembly1_5 fuel [0,5]: assembly1_6 fuel [0,6]: assembly1_7 fuel [1,0]: assembly2_1 fuel [1,1]: assembly2_2 fuel [1,2]: assembly2_3 fuel [1,3]: assembly2_4 fuel [1,4]: assembly2_5 fuel [1,5]: assembly2_6 fuel [1,6]: assembly2_7 fuel [2,0]: assembly3_1 fuel [2,1]: assembly3_2 fuel [2,2]: assembly3_3 fuel [2,3]: assembly3_4 fuel [2,4]: assembly3_5 fuel [2,5]: assembly3_6 fuel [2,6]: assembly3_7 fuel [3,0]: assembly4_1 fuel [3,1]: assembly4_2 fuel [3,2]: assembly4_3 fuel [3,3]: assembly4_4 fuel [3,4]: assembly4_5 fuel [3,5]: assembly4_6 fuel [3,6]: assembly4_7 fuel [4,0]: assembly5_1 fuel [4,1]: assembly5_2 fuel [4,2]: assembly5_3 fuel [4,3]: assembly5_4 fuel [4,4]: assembly5_5 fuel [4,5]: assembly5_6 fuel [4,6]: assembly5_7 fuel [5,0]: assembly6_1 fuel [5,1]: assembly6_2 fuel [5,2]: assembly6_3 fuel [5,3]: assembly6_4 fuel [5,4]: assembly6_5 fuel [5,5]: assembly6_6 fuel [5,6]: assembly6_7 fuel [6,0]: assembly7_1 fuel [6,1]: assembly7_2 fuel [6,2]: assembly7_3 fuel [6,3]: assembly7_4 fuel [6,4]: assembly7_5 fuel [6,5]: assembly7_6 fuel [6,6]: assembly7_7 fuel [7,0]: assembly8_1 fuel [7,1]: assembly8_2 fuel [7,2]: assembly8_3 fuel [7,3]: assembly8_4 fuel [7,4]: assembly8_5 fuel [7,5]: assembly8_6 fuel [7,6]: assembly8_7 fuel [8,0]: assembly9_1 fuel [8,1]: assembly9_2 fuel [8,2]: assembly9_3 fuel [8,3]: assembly9_4 fuel [8,4]: assembly9_5 fuel [8,5]: assembly9_6 fuel [8,6]: assembly9_7 fuel """ SMALL_HEX = """core: geom: hex symmetry: third periodic lattice map: | F F F F F F F pins: geom: hex symmetry: full lattice map: | - - FP - FP FP - CL CL CL FP FP FP FP FP FP FP FP FP CL CL CL CL FP FP FP FP FP FP FP FP FP CL CL CL CL CL FP FP FP FP FP FP FP FP FP CL CL CL CL FP FP FP FP FP FP FP FP FP CL CL CL FP FP FP """ TINY_GRID = """core: geom: hex lattice map: grid bounds: symmetry: full grid contents: ? - 0 - 0 : IF """ BIG_FULL_HEX_CORE = """core: geom: hex symmetry: full lattice map: | - - - - - - SS SS - - - - SS SS SS SS SS - - - - SS DD DD DD DD SS - - - SS DD DD DD DD DD SS - - - SS DD DD DD DD DD DD SS - - SS DD DD DD DD DD DD DD SS - - SS DD DD DD DD DD DD DD DD SS - - SS DD DD DD RB DD DD DD SS - - SS DD DD RB RB RB RB DD DD SS - SS DD DD RB RB FF RB RB DD DD SS - SS SS DD RB FF FF FF FF RB DD DD SS - SS DD RB FF FF FF FF FF RB DD RR - SS DD DD FF FF PC PC PC FF DD DD SS SS SS DD RB FF II PC FF FF RB DD DD SS - SS DD RB FF SS II II PC FF RB DD RR SS DD DD FF II II II II II FF DD DD SS - SS DD RB II II II II II II RB DD SS SS DD RB FF RC II SS II II FF RB DD SS SS DD DD FF II II II RC PC II FF DD DD SS SS DD RB II PC II II II PC II RB DD SS SS DD RB FF II II II II II II FF RB DD SS SS DD FF II II WW II II II II FF DD SS SS DD RB FF II II WW XX PC II FF RB DD SS SS DD FF PC II BB AA YY SS DC FF DD SS SS DD RB FF II RC CC ZZ II II FF RB DD SS SS DD FF II II II II II II II FF DD SS SS DD RB FF II II II II II II FF RB DD SS SS DD RB II II II II RC II II RB DD SS SS DD DD FF PC II SS II II PC FF DD DD SS SS DD RB II II II II II II II RB DD SS - SS DD FF II PC II II II II FF DD SS SS DD RB FF II II PC II II FF RB DD SS - SS DD RB FF SS II II PC FF RB DD SS SS SS DD RB FF II II II FF RB DD SS SS - SS DD DD FF FF II II FF FF DD DD SS - SS DD RB FF FF FF FF FF RB DD SS - SS SS DD RB FF FF FF FF RB DD SS SS - SS DD DD RB RB RB RB RB DD DD SS - SS DD DD RB RB RB RB DD DD SS - SS DD DD DD DD DD DD DD SS SS DD DD DD DD DD DD DD DD SS SS DD DD DD DD DD DD DD SS SS DD DD DD DD DD DD SS SS DD DD DD DD DD SS SS DD DD DD DD SS SS SS SS SS SS - SS SS - """ class TestGridBPRoundTrip(unittest.TestCase): def setUp(self): self.grids = Grids.load(SMALL_HEX) def test_contents(self): self.assertIn("core", self.grids) def test_roundTrip(self): """ Test saving blueprint data to a stream. .. test:: Grid blueprints can be written to disk. :id: T_ARMI_BP_TO_DB0 :tests: R_ARMI_BP_TO_DB """ stream = io.StringIO() saveToStream(stream, self.grids, False, True) stream.seek(0) gridBp = Grids.load(stream) self.assertIn("third", gridBp["core"].symmetry) def test_tinyMap(self): """ Test that a lattice map can be defined, written, and read in from blueprint file. .. test:: Define a lattice map in reactor core. :id: T_ARMI_BP_GRID1 :tests: R_ARMI_BP_GRID """ grid = Grids.load(TINY_GRID) stream = io.StringIO() saveToStream(stream, grid, full=True, tryMap=True) stream.seek(0) text = stream.read() self.assertIn("IF", text) stream.seek(0) gridBp = Grids.load(stream) self.assertIn("full", gridBp["core"].symmetry) self.assertIn("IF", gridBp["core"].latticeMap) class TestGridBPRoundTripFull(unittest.TestCase): def test_fullMap(self): """ Test that a lattice map can be defined, written, and read in from blueprint file. .. test:: Define a lattice map in reactor core. :id: T_ARMI_BP_GRID2 :tests: R_ARMI_BP_GRID """ grid = Grids.load(BIG_FULL_HEX_CORE) gridDesign = grid["core"] _ = gridDesign.construct() # test before the round-trip self.assertEqual(gridDesign.gridContents[0, 0], "AA") self.assertEqual(gridDesign.gridContents[-2, 1], "BB") self.assertEqual(gridDesign.gridContents[-1, 0], "CC") self.assertEqual(gridDesign.gridContents[-1, 1], "WW") self.assertEqual(gridDesign.gridContents[1, 0], "XX") self.assertEqual(gridDesign.gridContents[2, -1], "YY") self.assertEqual(gridDesign.gridContents[1, -1], "ZZ") self.assertEqual(gridDesign.gridContents[-3, 1], "RC") self.assertEqual(gridDesign.gridContents[3, -1], "PC") # perform a roundtrip stream = io.StringIO() saveToStream(stream, grid, full=True, tryMap=True) stream.seek(0) gridBp = Grids.load(stream) gridDesign = gridBp["core"] _ = gridDesign.construct() # test again after the round-trip self.assertEqual(gridDesign.gridContents[0, 0], "AA") self.assertEqual(gridDesign.gridContents[-2, 1], "BB") self.assertEqual(gridDesign.gridContents[-1, 0], "CC") self.assertEqual(gridDesign.gridContents[-1, 1], "WW") self.assertEqual(gridDesign.gridContents[1, 0], "XX") self.assertEqual(gridDesign.gridContents[2, -1], "YY") self.assertEqual(gridDesign.gridContents[1, -1], "ZZ") self.assertEqual(gridDesign.gridContents[-3, 1], "RC") self.assertEqual(gridDesign.gridContents[3, -1], "PC") class TestGridBlueprintsSection(unittest.TestCase): """Tests for lattice blueprint section.""" def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() self.grids = Grids.load(LATTICE_BLUEPRINT.format(self._testMethodName)) def tearDown(self): self.td.__exit__(None, None, None) def test_simpleRead(self): gridDesign = self.grids["control"] grid = gridDesign.construct() self.assertAlmostEqual(grid.pitch, 1.2) self.assertEqual(gridDesign.gridContents[-8, 0], "6") gridDesign = self.grids["pins"] grid = gridDesign.construct() self.assertAlmostEqual(grid.pitch, 1.3) self.assertEqual(gridDesign.gridContents[-4, 0], "FP") self.assertEqual(gridDesign.gridContents[-3, 3], "CL") # Cartesian full, odd gridDesign2 = self.grids["sfp"] _ = gridDesign2.construct() self.assertEqual(gridDesign2.gridContents[1, 1], "1") self.assertEqual(gridDesign2.gridContents[0, 0], "3") self.assertEqual(gridDesign2.gridContents[-1, -1], "3") # Cartesian quarter, odd gridDesign3 = self.grids["sfp quarter"] grid = gridDesign3.construct() self.assertEqual(gridDesign3.gridContents[0, 0], "2") self.assertEqual(gridDesign3.gridContents[1, 1], "3") self.assertEqual(gridDesign3.gridContents[2, 2], "3") self.assertEqual(gridDesign3.gridContents[3, 3], "1") self.assertTrue(grid.symmetry.isThroughCenterAssembly) # cartesian quarter, even not through center gridDesign3 = self.grids["sfp quarter even"] grid = gridDesign3.construct() self.assertFalse(grid.symmetry.isThroughCenterAssembly) # Cartesian full, even/odd hybrid gridDesign4 = self.grids["sfp even"] grid = gridDesign4.construct() self.assertEqual(gridDesign4.gridContents[0, 0], "4") self.assertEqual(gridDesign4.gridContents[-1, -1], "2") self.assertEqual(gridDesign4.gridContents[2, 2], "2") self.assertEqual(gridDesign4.gridContents[-3, -3], "1") with self.assertRaises(KeyError): self.assertEqual(gridDesign4.gridContents[-4, -3], "1") def test_pitchBasics(self): # use only hex input p = Pitch(123, 0, 0, 0) self.assertEqual(p.hex, 123) self.assertEqual(p.x, 0) self.assertEqual(p.y, 0) self.assertEqual(p.z, 0) # use only X, Y, Z inputs p = Pitch(0, 1, 2, 3) self.assertEqual(p.hex, 1) self.assertEqual(p.x, 1) self.assertEqual(p.y, 2) self.assertEqual(p.z, 3) def test_pitchEdgeCases(self): with self.assertRaises(InputError): # cannot mix hex with x,y,z pitch Pitch(1, 2, 3, 4) with self.assertRaises(InputError): # SOMETHING needs to be non-zero Pitch(0, 0, 0, 0) def test_simpleReadLatticeMap(self): """Read lattice map and create a grid. .. test:: Define a lattice map in reactor core. :id: T_ARMI_BP_GRID0 :tests: R_ARMI_BP_GRID """ from armi.reactor.blueprints.tests.test_blockBlueprints import FULL_BP # Cartesian full, even/odd hybrid gridDesign4 = self.grids["sfp even"] _grid = gridDesign4.construct() # test that we can correctly save this to a YAML bp = Blueprints.load(FULL_BP) filePath = "TestGridBlueprintsSection__test_simpleReadLatticeMap.log" with open(filePath, "w") as stream: saveToStream(stream, bp, True) # test that the output looks valid, and includes a lattice map with open(filePath, "r") as f: outText = f.read() self.assertIn("blocks:", outText) self.assertIn("shape: Circle", outText) self.assertIn("assemblies:", outText) self.assertIn("flags: fuel test", outText) self.assertIn("grid contents:", outText) self.assertIn("lattice map:", outText) before, after = outText.split("lattice map:") self.assertGreater(len(before), 100) self.assertGreater(len(after), 20) self.assertIn("1 2 1 2 1 2 1", after, msg="lattice map not showing up") self.assertNotIn("- -3", after, msg="grid contents are showing up when they shouldn't") self.assertNotIn("readFromLatticeMap", outText) self.assertTrue(os.path.exists(filePath)) def test_simpleReadNoLatticeMap(self): from armi.reactor.blueprints.tests.test_blockBlueprints import FULL_BP_GRID # Cartesian full, even/odd hybrid gridDesign4 = self.grids["sfp even"] _grid = gridDesign4.construct() # test that we can correctly save this to a YAML bp = Blueprints.load(FULL_BP_GRID) filePath = "TestGridBlueprintsSection__test_simpleReadNoLatticeMap.log" with open(filePath, "w") as stream: saveToStream(stream, bp, True) # test that the output looks valid, and includes a lattice map with open(filePath, "r") as f: outText = f.read() self.assertIn("blocks:", outText) self.assertIn("shape: Circle", outText) self.assertIn("assemblies:", outText) self.assertIn("flags: fuel test", outText) self.assertIn("grid contents:", outText) self.assertIn("lattice map:", outText) before, after = outText.split("grid contents:") self.assertGreater(len(before), 100) self.assertGreater(len(after), 20) self.assertIn("- -3", after, msg="grid contents not showing up") self.assertNotIn("1 3 1 2 1 3 1", after, msg="lattice map showing up when it shouldn't") self.assertNotIn("readFromLatticeMap", outText) self.assertTrue(os.path.exists(filePath)) class TestRZTGridBlueprint(unittest.TestCase): """Tests for R-Z-Theta grid inputs.""" def setUp(self): self.grids = Grids.load(RZT_BLUEPRINT) def test_construct(self): gridDesign = self.grids["rzt_core"] grid = gridDesign.construct() self.assertEqual(gridDesign.gridContents[2, 2], "assembly3_3 fuel") self.assertEqual( grid.indicesOfBounds(57.1428571429, 71.4285714286, 0.5542707944631219, 0.6698344789311578), (5, 4, 0), ) ================================================ FILE: armi/reactor/blueprints/tests/test_materialModifications.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for material modifications.""" import unittest from numpy.testing import assert_allclose from armi import materials, settings from armi.reactor import blueprints from armi.reactor.blueprints.blockBlueprint import BlockBlueprint class TestMaterialModifications(unittest.TestCase): uZrInput = r""" nuclide flags: U: {burn: false, xs: true} ZR: {burn: false, xs: true} blocks: fuel: &block_fuel fuel1: &component_fuel_fuel1 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 fuel2: &component_fuel_fuel2 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 assemblies: fuel a: &assembly_a specifier: IC blocks: [*block_fuel] height: [1.0] axial mesh points: [1] xs types: [A] """ b4cInput = r""" nuclide flags: B: {burn: false, xs: true} C: {burn: false, xs: true} blocks: poison: &block_poison poison: shape: Hexagon material: B4C Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 assemblies: assem a: &assembly_a specifier: IC blocks: [*block_poison] height: [1.0] axial mesh points: [1] xs types: [A] """ def loadUZrAssembly(self, materialModifications): return self._loadAssembly(self.uZrInput, materialModifications, "fuel a") @staticmethod def _loadAssembly(bpBase: str, materialModifications: str, assem: str): yamlString = bpBase + "\n" + materialModifications design = blueprints.Blueprints.load(yamlString) design._prepConstruction(settings.Settings()) return design.assemblies[assem] def loadB4CAssembly(self, materialModifications: str): return self._loadAssembly(self.b4cInput, materialModifications, "assem a") def test_noMaterialModifications(self): a = self.loadUZrAssembly("") # mass fractions should be whatever UZr is uzr = materials.UZr() fuelComponent = a[0][0] totalMass = fuelComponent.getMass() for nucName in uzr.massFrac: massFrac = fuelComponent.getMass(nucName) / totalMass assert_allclose(uzr.massFrac[nucName], massFrac) def test_u235_wt_frac_modification(self): """Test constructing a component where the blueprints specify a material modification for one nuclide. .. test:: A material modification can be applied to all the components in an assembly. :id: T_ARMI_MAT_USER_INPUT0 :tests: R_ARMI_MAT_USER_INPUT """ a = self.loadUZrAssembly( """ material modifications: U235_wt_frac: [0.20] """ ) fuelComponent = a[0][0] u235 = fuelComponent.getMass("U235") u = fuelComponent.getMass("U") assert_allclose(0.20, u235 / u) fuelComponent = a[0][1] u235 = fuelComponent.getMass("U235") u = fuelComponent.getMass("U") assert_allclose(0.20, u235 / u) def test_u235_wt_frac_byComponent_modification1(self): """Test constructing a component where the blueprints specify a material modification for one nuclide, for just one component. .. test:: A material modification can be applied to one component in an assembly. :id: T_ARMI_MAT_USER_INPUT1 :tests: R_ARMI_MAT_USER_INPUT """ a = self.loadUZrAssembly( """ material modifications: by component: fuel1: U235_wt_frac: [0.20] U235_wt_frac: [0.30] """ ) fuelComponent = a[0][0] u235 = fuelComponent.getMass("U235") u = fuelComponent.getMass("U") assert_allclose(0.20, u235 / u) fuelComponent = a[0][1] u235 = fuelComponent.getMass("U235") u = fuelComponent.getMass("U") assert_allclose(0.30, u235 / u) def test_u235_wt_frac_byComponent_modification2(self): """Test constructing a component where the blueprints specify a material modification for one nuclide, for multiple components. .. test:: A material modification can be applied to multiple components in an assembly. :id: T_ARMI_MAT_USER_INPUT2 :tests: R_ARMI_MAT_USER_INPUT """ a = self.loadUZrAssembly( """ material modifications: by component: fuel1: U235_wt_frac: [0.20] fuel2: U235_wt_frac: [0.50] U235_wt_frac: [0.30] """ ) fuelComponent = a[0][0] u235 = fuelComponent.getMass("U235") u = fuelComponent.getMass("U") assert_allclose(0.20, u235 / u) fuelComponent = a[0][1] u235 = fuelComponent.getMass("U235") u = fuelComponent.getMass("U") assert_allclose(0.50, u235 / u) def test_materialModificationLength(self): """If the wrong number of material modifications are defined, there is an error.""" with self.assertRaises(ValueError): _a = self.loadUZrAssembly( """ material modifications: by component: fuel1: U235_wt_frac: [0.2] U235_wt_frac: [0.11, 0.22, 0.33, 0.44] """ ) def test_invalidComponentModification(self): with self.assertRaises(ValueError): _a = self.loadUZrAssembly( """ material modifications: by component: invalid component: U235_wt_frac: [0.2] """ ) def test_zrWtFracModification(self): a = self.loadUZrAssembly( """ material modifications: ZR_wt_frac: [0.077] """ ) fuelComponent = a[0][0] totalMass = fuelComponent.getMass() zr = fuelComponent.getMass("ZR") assert_allclose(0.077, zr / totalMass) def test_bothU235ZrWtFracModification(self): a = self.loadUZrAssembly( """ material modifications: ZR_wt_frac: [0.077] U235_wt_frac: [0.20] """ ) fuelComponent = a[0][0] # check u235 enrichment u235 = fuelComponent.getMass("U235") u = fuelComponent.getMass("U") assert_allclose(0.20, u235 / u) # check zr frac totalMass = fuelComponent.getMass() zr = fuelComponent.getMass("ZR") assert_allclose(0.077, zr / totalMass) def test_checkByComponentMaterialInput(self): a = self.loadUZrAssembly("") materialInput = {"fake_material": {"ZR_wt_frac": 0.5}} with self.assertRaises(ValueError): BlockBlueprint._checkByComponentMaterialInput(a, materialInput) def test_filterMaterialInput(self): a = self.loadUZrAssembly("") materialInput = { "byBlock": {"ZR_wt_frac": 0.1, "U235_wt_frac": 0.1}, "fuel1": {"U235_wt_frac": 0.2}, "fuel2": {"ZR_wt_frac": 0.3, "U235_wt_frac": 0.3}, } componentDesign = a[0][0] filteredMaterialInput, _ = BlockBlueprint._filterMaterialInput(materialInput, componentDesign) filteredMaterialInput_reference = {"ZR_wt_frac": 0.1, "U235_wt_frac": 0.2} self.assertEqual(filteredMaterialInput, filteredMaterialInput_reference) def test_invalidMatModName(self): """ This test shows that we can detect invalid material modification names when they are specified on an assembly blueprint. We happen to know that ZR_wt_frac is a valid modification for the UZr material class, so we use that in the first call to prove that things initially work fine. """ a = self.loadUZrAssembly( """ material modifications: ZR_wt_frac: [1] by component: fuel2: ZR_wt_frac: [0] """ ) # just to prove that the above works fine before we modify it self.assertAlmostEqual(a[0][0].getMassFrac("ZR"), 1) self.assertAlmostEqual(a[0][1].getMassFrac("ZR"), 0) with self.assertRaises(ValueError): a = self.loadUZrAssembly( """ material modifications: this_is_a_fake_name: [1] by component: fuel2: ZR_wt_frac: [0] """ ) with self.assertRaises(ValueError): a = self.loadUZrAssembly( """ material modifications: ZR_wt_frac: [1] by component: fuel2: this_is_a_fake_name: [0] """ ) def test_invalidMatModType(self): """ This test shows that we can detect material modifications that are invalid because of their values, not just their names. We happen to know that ZR_wt_frac is a valid modification for UZr, so we use that in the first call to prove that things initially work fine. """ a = self.loadUZrAssembly( """ material modifications: ZR_wt_frac: [1] """ ) # just to prove that the above works fine before we modify it self.assertAlmostEqual(a[0][0].getMassFrac("ZR"), 1) with self.assertRaises(ValueError) as ee: a = self.loadUZrAssembly( """ material modifications: ZR_wt_frac: [this_is_a_value_of_incompatible_type] """ ) self.assertIn( "Something went wrong in applying the material modifications", ee.args[0], ) def test_matModsUpTheMRO(self): """ Make sure that valid/invalid material modifications are searched up the MRO for a material class. """ _a = self.loadUZrAssembly( """ material modifications: ZR_wt_frac: [1] class1_wt_frac: [1] class1_custom_isotopics: [dummy] class2_custom_isotopics: [dummy] by component: fuel2: ZR_wt_frac: [0] class1_wt_frac: [1] class1_custom_isotopics: [dummy] class2_custom_isotopics: [dummy] custom isotopics: dummy: input format: mass fractions density: 1 U: 1 """ ) with self.assertRaises(ValueError): _a = self.loadUZrAssembly( """ material modifications: ZR_wt_frac: [1] klass1_wt_frac: [1] klass1_custom_isotopics: [dummy] klass2_custom_isotopics: [dummy] by component: fuel2: ZR_wt_frac: [0] klass1_wt_frac: [1] klass1_custom_isotopics: [dummy] klass2_custom_isotopics: [dummy] custom isotopics: dummy: input format: mass fractions density: 1 U: 1 """ ) def test_theoreticalDensity(self): """Test the theoretical density can be loaded from material modifications.""" mods = """ material modifications: TD_frac: [0.5] """ a = self.loadB4CAssembly(mods) comp = a[0][0] mat = comp.material self.assertEqual(mat.getTD(), 0.5) self.assertEqual(comp.p.theoreticalDensityFrac, 0.5) ================================================ FILE: armi/reactor/blueprints/tests/test_reactorBlueprints.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for reactor blueprints.""" import logging import os import unittest from armi import runLog, settings from armi.reactor import blueprints, reactors from armi.reactor.blueprints import gridBlueprint, reactorBlueprint from armi.reactor.blueprints.tests import test_customIsotopics from armi.reactor.composites import Composite from armi.reactor.excoreStructure import ExcoreStructure from armi.reactor.reactors import Core, loadFromCs from armi.reactor.spentFuelPool import SpentFuelPool from armi.settings.caseSettings import Settings from armi.testing import TESTING_ROOT from armi.tests import mockRunLogs CORE_BLUEPRINT = """ core: grid name: core origin: x: 0.0 y: 10.1 z: 1.1 sfp: type: sfp grid name: sfp origin: x: 0.0 y: 12.1 z: 1.1 evst: type: excore grid name: evst origin: x: 0.0 y: 100.0 z: 0.0 """ GRIDS = """ core: geom: hex symmetry: third core periodic grid contents: [0, 0]: IC [1, 1]: IC orientationBOL: [1, 1]: 60.0 [3, 2]: 120.0 sfp: lattice pitch: x: 25.0 y: 25.0 geom: cartesian symmetry: full lattice map: | IC IC IC IC orientationBOL: [0, 0]: 60.0 [0, -1]: 120.0 evst: lattice pitch: x: 32.0 y: 32.0 geom: cartesian symmetry: full lattice map: | IC IC IC IC """ SMALL_YAML = """ systems: core: grid name: core origin: x: 0.0 y: 0.0 z: 0.0 sfp: type: sfp grid name: sfp origin: x: 1000.0 y: 1000.0 z: 1000.0 evst: type: excore grid name: evst origin: x: 2000.0 y: 2000.0 z: 2000.0 grids: core: geom: hex symmetry: third core periodic grid contents: [0, 0]: IC [1, 1]: IC sfp: lattice pitch: x: 25.0 y: 25.0 geom: cartesian symmetry: full lattice map: | IC IC IC IC evst: lattice pitch: x: 32.0 y: 32.0 geom: hex symmetry: full """ class TestReactorBlueprints(unittest.TestCase): """Tests for reactor blueprints.""" def setUp(self): # add testMethodName to avoid I/O collisions during parallel testing self.systemDesigns = reactorBlueprint.Systems.load(CORE_BLUEPRINT) self.gridDesigns = gridBlueprint.Grids.load(GRIDS) def test_simpleRead(self): self.assertAlmostEqual(self.systemDesigns["core"].origin.y, 10.1) self.assertAlmostEqual(self.systemDesigns["sfp"].origin.y, 12.1) self.assertAlmostEqual(self.systemDesigns["evst"].origin.y, 100) def _setupReactor(self): fnames = [self._testMethodName + n for n in ["geometry.yaml", "sfp-geom.yaml"]] for fn in fnames: with open(fn, "w") as f: f.write(SMALL_YAML) cs = settings.Settings() bp = blueprints.Blueprints.load(test_customIsotopics.TestCustomIsotopics.yamlString) bp.systemDesigns = self.systemDesigns bp.gridDesigns = self.gridDesigns reactor = reactors.Reactor(cs.caseTitle, bp) core = bp.systemDesigns["core"].construct(cs, bp, reactor) sfp = bp.systemDesigns["sfp"].construct(cs, bp, reactor) evst = bp.systemDesigns["evst"].construct(cs, bp, reactor) for fn in fnames: os.remove(fn) return core, sfp, evst def test_construct(self): """Actually construct some reactor systems. .. test:: Create core and spent fuel pool with blueprint. :id: T_ARMI_BP_SYSTEMS :tests: R_ARMI_BP_SYSTEMS .. test:: Create core object with blueprint. :id: T_ARMI_BP_CORE :tests: R_ARMI_BP_CORE """ core, sfp, evst = self._setupReactor() self.assertEqual(len(core), 2) self.assertEqual(len(sfp), 4) self.assertEqual(len(evst), 4) self.assertIsInstance(core, Core) self.assertIsInstance(sfp, SpentFuelPool) self.assertIsInstance(evst, ExcoreStructure) def test_materialDataSummary(self): """Test that the material data summary for the core is valid as a printout to the stdout.""" expectedMaterialData = [ ("Custom", "ARMI"), ("HT9", "ARMI"), ("Sodium", "ARMI"), ("UZr", "ARMI"), ] core, _sfp, _evst = self._setupReactor() materialData = reactorBlueprint.summarizeMaterialData(core) for actual, expected in zip(materialData, expectedMaterialData): self.assertEqual(actual, expected) def test_excoreStructure(self): _core, _sfp, evst = self._setupReactor() self.assertIsInstance(evst, ExcoreStructure) self.assertEqual(evst.parent.__class__.__name__, "Reactor") self.assertEqual(evst.spatialGrid.__class__.__name__, "CartesianGrid") # add one composite object and validate comp1 = Composite("thing1") loc = evst.spatialGrid[(0, 0, 0)] self.assertEqual(len(evst.getChildren()), 4) evst.add(comp1, loc) self.assertEqual(len(evst.getChildren()), 5) def test_spentFuelPool(self): _core, sfp, evst = self._setupReactor() self.assertIsInstance(sfp, SpentFuelPool) self.assertEqual(sfp.parent.__class__.__name__, "Reactor") self.assertEqual(sfp.spatialGrid.__class__.__name__, "CartesianGrid") self.assertEqual(sfp.numColumns, 2) # add one assembly and validate self.assertEqual(len(sfp.getChildren()), 4) sfp.add(evst.getChildren()[0]) self.assertEqual(len(sfp.getChildren()), 5) def test_orientationBOL(self): core, sfp, _evst = self._setupReactor() # test for hex core a0 = core.getAssembly(locationString="001-001") self.assertAlmostEqual(a0.p.orientation[2], 60.0, delta=1e-9) a1 = core.getAssembly(locationString="003-002") self.assertAlmostEqual(a1.p.orientation[2], 120.0, delta=1e-9) # test cartesian, non-core a0 = sfp.getAssembly("A0005") self.assertAlmostEqual(a0.p.orientation[2], 60.0, delta=1e-9) a1 = sfp.getAssembly("A0003") self.assertAlmostEqual(a1.p.orientation[2], 120.0, delta=1e-9) def test_fullCoreAreNotConverted(self): """Prove that geometries aren't being converted when reading in a full-core BP.""" cs = Settings(os.path.join(TESTING_ROOT, "reactors", "smallHexReactor", "smallHexReactor.yaml")) runLog.setVerbosity(logging.INFO) with mockRunLogs.BufferLog() as log: self.assertEqual("", log.getStdout()) r = loadFromCs(cs) # ensure that, for full core, only the correct parts of the geom modification are hit self.assertIn("Applying Geometry Modifications", log.getStdout()) self.assertIn("Updating spatial grid", log.getStdout()) self.assertNotIn("Applying non-full core", log.getStdout()) a = r.core.getAssemblyWithStringLocation("003-012") self.assertIn("fuel assembly", str(a).lower()) b = a[2] self.assertIn("fuel", str(b).lower()) self.assertEqual(b.p.molesHmBOL, b.getHMMoles()) ================================================ FILE: armi/reactor/components/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Components package contains components and shapes. These objects hold the dimensions, temperatures, composition, and shape of reactor primitives. .. _component-class-diagram: .. pyreverse:: armi.reactor.components -A -k --ignore=componentParameters.py :align: center :alt: Component class diagram :width: 100% Class inheritance diagram for :py:mod:`armi.reactor.components`. """ # ruff: noqa: F405, I001 import math import numpy as np from armi import runLog from armi.reactor.components.component import * # noqa: F403 from armi.reactor.components.basicShapes import * # noqa: F403 from armi.reactor.components.complexShapes import * # noqa: F403 from armi.reactor.components.volumetricShapes import * # noqa: F403 def factory(shape, bcomps, kwargs): """ Build a new component object. Parameters ---------- shape : str lowercase string corresponding to the component type name bcomps : list(Component) list of "sibling" components. This list is used to find component links, which are of the form ``<name>.<dimension``. kwargs : dict dictionary of inputs for the Component subclass's ``__init__`` method. """ try: class_ = ComponentType.TYPES[shape] except KeyError: raise ValueError( "Unrecognized component shape: '{}'\nValid component names are {}".format( shape, ", ".join(ComponentType.TYPES.keys()) ) ) _removeDimensionNameSpaces(kwargs) try: return class_(components=bcomps, **kwargs) except TypeError: # TypeError raised when kwarg is missing. We add extra information # to the error to indicate which component needs updating. runLog.error(f"Potentially invalid kwargs {kwargs} for {class_} of shape {shape}. Check input.") raise def _removeDimensionNameSpaces(attrs): """Some components use spacing in their dimension names, but can't internally.""" for key in list(attrs.keys()): if " " in key: clean = key.replace(" ", "_") attrs[clean] = attrs.pop(key) # Below are a few component base classes class NullComponent(Component): """Returns zero for all dimensions.""" def __cmp__(self, other): """Be smaller than everything.""" return -1 def __lt__(self, other): return True def __bool__(self): """Handles truth testing.""" return False __nonzero__ = __bool__ # Python2 compatibility def getBoundingCircleOuterDiameter(self, Tc=None, cold=False): return None def getDimension(self, key, Tc=None, cold=False): return 0.0 class UnshapedComponent(Component): """ A component with undefined dimensions. Useful for situations where you just want to enter the area directly. For instance, when you want to model neutronic behavior of an assembly based on only knowing the area fractions of each material in the assembly. See Also -------- DerivedShape : Useful to just fill leftover space in a block with a material """ pDefs = componentParameters.getUnshapedParameterDefinitions() def __init__( self, name, material, Tinput, Thot, area=np.nan, modArea=None, isotopics=None, mergeWith=None, components=None, ): Component.__init__( self, name, material, Tinput, Thot, area=area, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions(components, modArea=modArea) def getComponentArea(self, cold=False, Tc=None): """ Get the area of this component in cm^2. Parameters ---------- cold : bool, optional If True, compute the area with as-input dimensions, instead of thermally-expanded. Tc : float, optional Temperature in C to compute the area at """ if cold and Tc is not None: raise ValueError(f"Cannot compute component area at {Tc} and cold dimensions simultaneously.") coldArea = self.p.area if cold: return coldArea if Tc is None: Tc = self.temperatureInC return self.getThermalExpansionFactor(Tc) ** 2 * coldArea def getBoundingCircleOuterDiameter(self, Tc=None, cold=False): """ Approximate it as circular and return the radius. This is the smallest it can possibly be. Since this is used to determine the outer component, it will never be allowed to be the outer one. Parameters ---------- Tc : float Ignored for this component cold : bool, optional If True, compute the area with as-input dimensions, instead of thermally-expanded. Notes ----- Tc is not used in this method for this particular component. """ return 2 * math.sqrt(self.getComponentArea(cold=cold) / math.pi) def getCircleInnerDiameter(self, Tc=None, cold=False): """ Component is unshaped; assume it is circular and there is no ID (return 0.0). Parameters ---------- Tc : float, optional Ignored for this component cold : bool, optional Ignored for this component """ return 0.0 @staticmethod def fromComponent(otherComponent): """ Build a new UnshapedComponent that has area equal to that of another component. This can be used to "freeze" a DerivedShape, among other things. Notes ----- Components created in this manner will not thermally expand beyond the expanded area of the original component, but will retain their hot temperature. """ newC = UnshapedComponent( name=otherComponent.name, material=otherComponent.material, Tinput=otherComponent.temperatureInC, Thot=otherComponent.temperatureInC, area=otherComponent.getComponentArea(), ) return newC class UnshapedVolumetricComponent(UnshapedComponent): """ A component with undefined dimensions. Useful for situations where you just want to enter the volume directly. """ is3D = True def __init__( self, name, material, Tinput, Thot, area=np.nan, op=None, isotopics=None, mergeWith=None, components=None, volume=np.nan, ): Component.__init__( self, name, material, Tinput, Thot, area=area, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions(components, op=op, userDefinedVolume=volume) def getComponentArea(self, cold=False, Tc=None): return self.getVolume() / self.parent.getHeight() def getComponentVolume(self): """Get the volume of the component in cm^3.""" return self.getDimension("userDefinedVolume") def setVolume(self, val): self.setDimension("userDefinedVolume", val) self.clearCache() class ZeroMassComponent(UnshapedVolumetricComponent): """ A component that never has mass -- it always returns zero for getMass and getNumberDensity. Useful for situations where you want to give a block integrated flux, but ensure mass is never added to it See Also -------- armi.reactor.batch.makeMgFluxBlock """ def getNumberDensity(self, *args, **kwargs): """Always return 0 because this component has not mass.""" return 0.0 def setNumberDensity(self, *args, **kwargs): """Never add mass.""" pass class PositiveOrNegativeVolumeComponent(UnshapedVolumetricComponent): """ A component that may have negative mass for removing mass from batches. See Also -------- armi.reactor.batch.makeMassAdditionComponent """ def _checkNegativeVolume(self, volume): """Allow negative areas.""" pass class DerivedShape(UnshapedComponent): """ This a component that does have specific dimensions, but they're complicated. Notes ----- - This component type is "derived" through the addition or subtraction of other shaped components (e.g. Coolant) - Because its area and volume are defined by other components, a DerivedShape's area and volume may change as the other components thermally expand. However the DerivedShape cannot drive thermal expansion itself, even if it is a solid component with non-zero thermal expansion coefficient """ def getBoundingCircleOuterDiameter(self, Tc=None, cold=False): """ The bounding circle for a derived component. Notes ----- This is used to sort components relative to one another. There can only be one derived component per block, this is generally the coolant inside a duct. Under most circumstances, the volume (or area) of coolant will be greater than any other (single) component (i.e. a single pin) within the assembly. So, sorting based on the Dh of the DerivedShape will result in somewhat expected results. """ if self.parent is None: # since this is only used for comparison, and it must be smaller than at # least one component, make it 0 instead of infinity. return 0.0 else: # area = pi r**2 = pi d**2 / 4 => d = sqrt(4*area/pi) return math.sqrt(4.0 * self.getComponentArea() / math.pi) def computeVolume(self): """Cannot compute volume until it is derived. .. impl:: The volume of a DerivedShape depends on the solid shapes surrounding them. :id: I_ARMI_COMP_FLUID0 :implements: R_ARMI_COMP_FLUID Computing the volume of a ``DerivedShape`` means looking at the solid materials around it, and finding what shaped space is left over in between them. This method calls the method ``_deriveVolumeAndArea``, which makes use of the fact that the ARMI reactor data model is hierarchical. It starts by finding the parent of this object, and then finding the volume of all the other objects at this level. Whatever is left over, is the volume of this object. Obviously, you can only have one ``DerivedShape`` child of any parent for this logic to work. """ return self._deriveVolumeAndArea() def getMaxVolume(self): """ The maximum volume of the parent Block. Returns ------- vol : float volume in cm^3. """ return self.parent.getMaxArea() * self.parent.getHeight() def _deriveVolumeAndArea(self): """ Derive the volume and area of a ``DerivedShape``. Notes ----- If a parent exists, this will iterate over it and then determine both the volume and area based on its context within the scope of the parent object by considering the volumes and areas of the surrounding components. Since some components are volumetric shapes, this must consider the volume so that it wraps around in all three dimensions. But there are also situations where we need to handle zero-height blocks with purely 2D components. Thus we track area and volume fractions here when possible. """ if self.parent is None: raise ValueError(f"Cannot compute volume/area of {self} without a parent object.") # Determine the volume/areas of the non-derived shape components within the parent. siblingVolume = 0.0 siblingArea = 0.0 for sibling in self.parent: if sibling is self: continue elif not self and isinstance(sibling, DerivedShape): raise ValueError(f"More than one ``DerivedShape`` component in {self.parent} is not allowed.") siblingVolume += sibling.getVolume() try: if siblingArea is not None: siblingArea += sibling.getArea() except Exception: siblingArea = None remainingVolume = self.getMaxVolume() - siblingVolume if siblingArea: remainingArea = self.parent.getMaxArea() - siblingArea # Check for negative if remainingVolume < 0: msg = ( f"The component areas in {self.parent} exceed the maximum " "allowable volume based on the geometry. Check that the " "geometry is defined correctly.\n" f"Maximum allowable volume: {self.getMaxVolume()} " f"cm^3\nVolume of all non-derived shape components: {siblingVolume} cm^3\n" ) runLog.error(msg) raise ValueError(f"Negative area/volume errors occurred for {self.parent}. Check log for errors.") height = self.parent.getHeight() if not height: # special handling for 0-height blocks if not remainingArea: raise ValueError(f"Cannot derive area in 0-height block {self.parent}") self.p.area = remainingArea else: self.p.area = remainingVolume / height return remainingVolume def getVolume(self): """ Get volume of derived shape. The DerivedShape must pay attention to all of the companion objects, because if they change, this changes. However it's inefficient to always recompute the derived volume, so we have to rely on the parent to know if anything has changed. Since each parent is only allowed one DerivedShape, we can reset the update flag here. Returns ------- float volume of component in cm^3. """ if self.parent.derivedMustUpdate: # tell _updateVolume to update it during the below getVolume call self.p.volume = None self.parent.derivedMustUpdate = False vol = UnshapedComponent.getVolume(self) return vol def getComponentArea(self, cold=False, Tc=None): """ Get the area of this component in cm^2. Parameters ---------- cold : bool, optional If True, compute the area with as-input dimensions, instead of thermally-expanded. Tc : float, optional Temperature in C to compute the area at """ if cold and Tc is not None: raise ValueError(f"Cannot compute component area at {Tc} and cold dimensions simultaneously.") if cold: # At cold temp, the DerivedShape has the area of the parent minus the other siblings parentArea = self.parent.getMaxArea() # NOTE: Here we assume there is one-and-only-one DerivedShape in each Component siblings = sum([c.getArea(cold=True) for c in self.parent if not isinstance(c, DerivedShape)]) return parentArea - siblings if Tc is not None: # The DerivedShape has the area of the parent minus the other siblings parentArea = self.parent.getMaxArea() # NOTE: Here we assume there is one-and-only-one DerivedShape in each Component siblings = sum([c.getArea(Tc=Tc) for c in self.parent if not isinstance(c, DerivedShape)]) return parentArea - siblings if self.parent.derivedMustUpdate: self.computeVolume() return self.p.area ================================================ FILE: armi/reactor/components/basicShapes.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Components represented by basic shapes. Many reactor components can be described in 2D by circles, hexagons, rectangles, etc. These are defined in this subpackage. """ import math from armi.reactor.components import ShapedComponent, componentParameters class Circle(ShapedComponent): """A Circle. .. impl:: Circle shaped Component :id: I_ARMI_COMP_SHAPES0 :implements: R_ARMI_COMP_SHAPES This class provides the implementation of a Circle Component. This includes setting key parameters such as its material, temperature, and dimensions. It also includes a method to retrieve the area of a Circle Component via the ``getComponentArea`` method. """ is3D = False THERMAL_EXPANSION_DIMS = {"od", "id"} pDefs = componentParameters.getCircleParameterDefinitions() def __init__( self, name, material, Tinput, Thot, od, id=0.0, mult=1.0, modArea=None, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions(components, od=od, id=id, mult=mult, modArea=modArea) def getBoundingCircleOuterDiameter(self, Tc=None, cold=False): return max(self.getDimension("id", Tc, cold), self.getDimension("od", Tc, cold)) def getCircleInnerDiameter(self, Tc=None, cold=False): return min(self.getDimension("id", Tc, cold), self.getDimension("od", Tc, cold)) def getComponentArea(self, cold=False, Tc=None): """Computes the area for the circle component in cm^2.""" idiam = self.getDimension("id", cold=cold, Tc=Tc) od = self.getDimension("od", cold=cold, Tc=Tc) mult = self.getDimension("mult", cold=cold, Tc=Tc) area = math.pi * (od**2 - idiam**2) / 4.0 area *= mult return area def isEncapsulatedBy(self, other): """Return True if this ring lies completely inside the argument component.""" otherID, otherOD = other.getDimension("id"), other.getDimension("od") myID, myOD = self.getDimension("id"), self.getDimension("od") return otherID <= myID < otherOD and otherID < myOD <= otherOD class Hexagon(ShapedComponent): """A Hexagon. This hexagonal shape has a hexagonal hole cut out of the center of it. By default, that inner hole has a diameter of zero, making this a solid object with no hole. .. impl:: Hexagon shaped Component :id: I_ARMI_COMP_SHAPES1 :implements: R_ARMI_COMP_SHAPES This class provides the implementation of a hexagonal Component. This includes setting key parameters such as its material, temperature, and dimensions. It also includes methods for retrieving geometric dimension information unique to hexagons such as the ``getPitchData`` method. """ is3D = False pDefs = componentParameters.getHexagonParameterDefinitions() THERMAL_EXPANSION_DIMS = {"ip", "op"} def __init__( self, name, material, Tinput, Thot, op, ip=0.0, mult=1.0, modArea=None, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions(components, op=op, ip=ip, mult=mult, modArea=modArea) def getBoundingCircleOuterDiameter(self, Tc=None, cold=False): sideLength = self.getDimension("op", Tc, cold) / math.sqrt(3) return 2.0 * sideLength def getCircleInnerDiameter(self, Tc=None, cold=False): sideLength = self.getDimension("ip", Tc, cold) / math.sqrt(3) return 2.0 * sideLength def getComponentArea(self, cold=False, Tc=None): """Computes the area for the hexagon component in cm^2.""" op = self.getDimension("op", cold=cold, Tc=Tc) ip = self.getDimension("ip", cold=cold, Tc=Tc) mult = self.getDimension("mult") area = math.sqrt(3.0) / 2.0 * (op**2 - ip**2) area *= mult return area def getPitchData(self): """ Return the pitch data that should be used to determine block pitch. Notes ----- This pitch data should only be used if this is the pitch defining component in a block. The block is responsible for determining which component in it is the pitch defining component. """ return self.getDimension("op") class Rectangle(ShapedComponent): """A Rectangle. .. impl:: Rectangle shaped Component :id: I_ARMI_COMP_SHAPES2 :implements: R_ARMI_COMP_SHAPES This class provides the implementation for a rectangular Component. This includes setting key parameters such as its material, temperature, and dimensions. It also includes methods for computing geometric information related to rectangles, such as the ``getBoundingCircleOuterDiameter`` and ``getPitchData`` methods. """ is3D = False THERMAL_EXPANSION_DIMS = {"lengthInner", "lengthOuter", "widthInner", "widthOuter"} pDefs = componentParameters.getRectangleParameterDefinitions() def __init__( self, name, material, Tinput, Thot, lengthOuter=None, lengthInner=0.0, widthOuter=None, widthInner=0.0, mult=None, modArea=None, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions( components, lengthOuter=lengthOuter, lengthInner=lengthInner, widthOuter=widthOuter, widthInner=widthInner, mult=mult, modArea=modArea, ) def getBoundingCircleOuterDiameter(self, Tc=None, cold=False): lengthO = self.getDimension("lengthOuter", Tc, cold=cold) widthO = self.getDimension("widthOuter", Tc, cold=cold) return math.sqrt(widthO**2 + lengthO**2) def getCircleInnerDiameter(self, Tc=None, cold=False): lengthI = self.getDimension("lengthInner", Tc, cold=cold) widthI = self.getDimension("widthInner", Tc, cold=cold) return math.sqrt(widthI**2 + lengthI**2) def getComponentArea(self, cold=False, Tc=None): """Computes the area of the rectangle in cm^2.""" lengthO = self.getDimension("lengthOuter", cold=cold, Tc=Tc) widthO = self.getDimension("widthOuter", cold=cold, Tc=Tc) lengthI = self.getDimension("lengthInner", cold=cold, Tc=Tc) widthI = self.getDimension("widthInner", cold=cold, Tc=Tc) mult = self.getDimension("mult") area = mult * (lengthO * widthO - lengthI * widthI) return area def isLatticeComponent(self): """Return true if the component is a `lattice component` containing void material and zero area.""" return self.containsVoidMaterial() and self.getArea() == 0.0 def getPitchData(self): """ Return the pitch data that should be used to determine block pitch. Notes ----- For rectangular components there are two pitches, one for each dimension. This pitch data should only be used if this is the pitch defining component in a block. The block is responsible for determining which component in it is the pitch defining component. """ return (self.getDimension("lengthOuter"), self.getDimension("widthOuter")) class SolidRectangle(Rectangle): """Solid rectangle component.""" is3D = False THERMAL_EXPANSION_DIMS = {"lengthOuter", "widthOuter"} def __init__( self, name, material, Tinput, Thot, lengthOuter=None, widthOuter=None, mult=None, modArea=None, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions( components, lengthOuter=lengthOuter, widthOuter=widthOuter, mult=mult, modArea=modArea, ) # these need to be set so that we don't try to write NoDefaults to the database. # Ultimately, it makes more sense to have the non-Solid Rectangle inherit from # this (and probably be called a HollowRectangle or RectangularShell or # whatever), since a solid rectangle is more generic of the two. Then the # Parameter definitions for the hollow rectangle could inherit from the ones, # adding the inner dimensions so that we wouldn't need to do this here. self.p.lengthInner = 0 self.p.widthInner = 0 def getComponentArea(self, cold=False, Tc=None): """Computes the area of the solid rectangle in cm^2.""" lengthO = self.getDimension("lengthOuter", cold=cold, Tc=Tc) widthO = self.getDimension("widthOuter", cold=cold, Tc=Tc) mult = self.getDimension("mult") area = mult * (lengthO * widthO) return area class Square(Rectangle): """Square component that can be solid or hollow. .. impl:: Square shaped Component :id: I_ARMI_COMP_SHAPES3 :implements: R_ARMI_COMP_SHAPES This class provides the implementation for a square Component. This class subclasses the ``Rectangle`` class because a square is a type of rectangle. This includes setting key parameters such as its material, temperature, and dimensions. """ is3D = False def __init__( self, name, material, Tinput, Thot, widthOuter=None, widthInner=0.0, mult=None, modArea=None, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions( components, lengthOuter=widthOuter, widthOuter=widthOuter, widthInner=widthInner, lengthInner=widthInner, mult=mult, modArea=modArea, ) def getComponentArea(self, cold=False, Tc=None): """Computes the area of the square in cm^2.""" widthO = self.getDimension("widthOuter", cold=cold, Tc=Tc) widthI = self.getDimension("widthInner", cold=cold, Tc=Tc) mult = self.getDimension("mult") area = mult * (widthO * widthO - widthI * widthI) return area def getBoundingCircleOuterDiameter(self, Tc=None, cold=False): widthO = self.getDimension("widthOuter", Tc, cold=cold) return math.sqrt(widthO**2 + widthO**2) def getCircleInnerDiameter(self, Tc=None, cold=False): widthI = self.getDimension("widthInner", Tc, cold=cold) return math.sqrt(widthI**2 + widthI**2) def getPitchData(self): """ Return the pitch data that should be used to determine block pitch. Notes ----- For rectangular components there are two pitches, one for each dimension. This pitch data should only be used if this is the pitch defining component in a block. The block is responsible for determining which component in it is the pitch defining component. """ # both dimensions are the same for a square. return (self.getDimension("widthOuter"), self.getDimension("widthOuter")) class Triangle(ShapedComponent): """ Triangle with defined base and height. .. impl:: Triangle shaped Component :id: I_ARMI_COMP_SHAPES4 :implements: R_ARMI_COMP_SHAPES This class provides the implementation for defining a triangular Component. This includes setting key parameters such as its material, temperature, and dimensions. It also includes providing a method for retrieving the area of a Triangle Component via the ``getComponentArea`` method. Notes ----- The exact angles of the triangle are undefined. The exact side lengths and angles are not critical to calculation of component area, so area can still be calculated. """ is3D = False THERMAL_EXPANSION_DIMS = {"base", "height"} pDefs = componentParameters.getTriangleParameterDefinitions() def __init__( self, name, material, Tinput, Thot, base=None, height=None, mult=None, modArea=None, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions(components, base=base, height=height, mult=mult, modArea=modArea) def getComponentArea(self, cold=False, Tc=None): """Computes the area of the triangle in cm^2.""" base = self.getDimension("base", cold=cold, Tc=Tc) height = self.getDimension("height", cold=cold, Tc=Tc) mult = self.getDimension("mult") area = mult * base * height / 2.0 return area ================================================ FILE: armi/reactor/components/complexShapes.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Components represented by complex shapes, and typically less widely used.""" import math from armi.reactor.components import ShapedComponent, basicShapes, componentParameters class HoledHexagon(basicShapes.Hexagon): """Hexagon with n uniform circular holes hollowed out of it. .. impl:: Holed hexagon shaped Component :id: I_ARMI_COMP_SHAPES5 :implements: R_ARMI_COMP_SHAPES This class provides an implementation for a holed hexagonal Component. This includes setting key parameters such as its material, temperature, and dimensions. It also provides the capability to retrieve the diameter of the inner hole via the ``getCircleInnerDiameter`` method. """ THERMAL_EXPANSION_DIMS = {"op", "holeOD", "holeRadFromCenter"} pDefs = componentParameters.getHoledHexagonParameterDefinitions() def __init__( self, name, material, Tinput, Thot, op, holeOD, nHoles, holeRadFromCenter=0.0, mult=1.0, modArea=None, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions( components, op=op, holeOD=holeOD, nHoles=nHoles, holeRadFromCenter=holeRadFromCenter, mult=mult, modArea=modArea, ) def getComponentArea(self, cold=False, Tc=None): """Computes the area for the hexagon with n number of circular holes in cm^2.""" op = self.getDimension("op", cold=cold, Tc=Tc) holeOD = self.getDimension("holeOD", cold=cold, Tc=Tc) nHoles = self.getDimension("nHoles", cold=cold, Tc=Tc) mult = self.getDimension("mult") hexArea = math.sqrt(3.0) / 2.0 * (op**2) circularArea = nHoles * math.pi * ((holeOD / 2.0) ** 2) area = mult * (hexArea - circularArea) return area def getCircleInnerDiameter(self, Tc=None, cold=False): """ For the special case of only one single hole, returns the diameter of that hole. For any other case, returns 0.0 because an "circle inner diameter" becomes undefined. """ if self.getDimension("nHoles") == 1: return self.getDimension("holeOD", Tc, cold) else: return 0.0 class HexHoledCircle(basicShapes.Circle): """Circle with a single uniform hexagonal hole hollowed out of it.""" THERMAL_EXPANSION_DIMS = {"od", "holeOP"} pDefs = componentParameters.getHexHoledCircleParameterDefinitions() def __init__( self, name, material, Tinput, Thot, od, holeOP, mult=1.0, modArea=None, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions(components, od=od, holeOP=holeOP, mult=mult, modArea=modArea) def getComponentArea(self, cold=False, Tc=None): r"""Computes the area for the circle with one hexagonal hole.""" od = self.getDimension("od", cold=cold, Tc=Tc) holeOP = self.getDimension("holeOP", cold=cold, Tc=Tc) mult = self.getDimension("mult") hexArea = math.sqrt(3.0) / 2.0 * (holeOP**2) circularArea = math.pi * ((od / 2.0) ** 2) area = mult * (circularArea - hexArea) return area def getCircleInnerDiameter(self, Tc=None, cold=False): """Returns the diameter of the hole equal to the hexagon outer pitch.""" return self.getDimension("holeOP", Tc, cold) class FilletedHexagon(basicShapes.Hexagon): """ A hexagon with a hexagonal hole cut out of the center of it, where the corners of both the outer and inner hexagons are rounded, with independent radii of curvature. By default, the inner hole has a diameter of zero, making this a solid object with no hole. """ THERMAL_EXPANSION_DIMS = {"iR", "oR", "ip", "op"} pDefs = componentParameters.getFilletedHexagonParameterDefinitions() def __init__( self, name, material, Tinput, Thot, op, ip=0.0, iR=0.0, oR=0.0, mult=1.0, modArea=None, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions(components, op=op, ip=ip, iR=iR, oR=oR, mult=mult, modArea=modArea) @staticmethod def _area(D, r): """Helper function, to calculate the area of a hexagon with rounded corners.""" if D <= 0.0: return 0.0 area = 1.0 - (1.0 - (math.pi / (2.0 * math.sqrt(3)))) * (2 * r / D) ** 2 area *= (math.sqrt(3.0) / 2.0) * D**2 return area def getComponentArea(self, cold=False, Tc=None): """Computes the area for the rounded hexagon component in cm^2.""" op = self.getDimension("op", cold=cold, Tc=Tc) ip = self.getDimension("ip", cold=cold, Tc=Tc) oR = self.getDimension("oR", cold=cold, Tc=Tc) iR = self.getDimension("iR", cold=cold, Tc=Tc) mult = self.getDimension("mult") area = self._area(op, oR) - self._area(ip, iR) area *= mult return area class HoledRectangle(basicShapes.Rectangle): """Rectangle with one circular hole in it.""" THERMAL_EXPANSION_DIMS = {"lengthOuter", "widthOuter", "holeOD"} pDefs = componentParameters.getHoledRectangleParameterDefinitions() def __init__( self, name, material, Tinput, Thot, holeOD, lengthOuter=None, widthOuter=None, mult=1.0, modArea=None, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions( components, lengthOuter=lengthOuter, widthOuter=widthOuter, holeOD=holeOD, mult=mult, modArea=modArea, ) def getComponentArea(self, cold=False, Tc=None): """Computes the area (in cm^2) for the the rectangle with one hole in it.""" length = self.getDimension("lengthOuter", cold=cold, Tc=Tc) width = self.getDimension("widthOuter", cold=cold, Tc=Tc) rectangleArea = length * width holeOD = self.getDimension("holeOD", cold=cold, Tc=Tc) circularArea = math.pi * ((holeOD / 2.0) ** 2) mult = self.getDimension("mult") area = mult * (rectangleArea - circularArea) return area def getCircleInnerDiameter(self, Tc=None, cold=False): """Returns the ``holeOD``.""" return self.getDimension("holeOD", Tc, cold) class HoledSquare(basicShapes.Square): """Square with one circular hole in it. .. impl:: Holed square shaped Component :id: I_ARMI_COMP_SHAPES6 :implements: R_ARMI_COMP_SHAPES This class provides an implementation for a holed square Component. This includes setting key parameters such as its material, temperature, and dimensions. It also includes methods to retrieve geometric dimension information unique to holed squares via the ``getComponentArea`` and ``getCircleInnerDiameter`` methods. """ THERMAL_EXPANSION_DIMS = {"widthOuter", "holeOD"} pDefs = componentParameters.getHoledRectangleParameterDefinitions() def __init__( self, name, material, Tinput, Thot, holeOD, widthOuter=None, mult=1.0, modArea=None, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions(components, widthOuter=widthOuter, holeOD=holeOD, mult=mult, modArea=modArea) def getComponentArea(self, cold=False, Tc=None): """Computes the area (in cm^2) for the the square with one hole in it.""" width = self.getDimension("widthOuter", cold=cold, Tc=Tc) rectangleArea = width**2 holeOD = self.getDimension("holeOD", cold=cold, Tc=Tc) circularArea = math.pi * ((holeOD / 2.0) ** 2) mult = self.getDimension("mult") area = mult * (rectangleArea - circularArea) return area def getCircleInnerDiameter(self, Tc=None, cold=False): """Returns the ``holeOD``.""" return self.getDimension("holeOD", Tc, cold) class Helix(ShapedComponent): """A spiral wire component used to model a pin wire-wrap. .. impl:: Helix shaped Component :id: I_ARMI_COMP_SHAPES7 :implements: R_ARMI_COMP_SHAPES This class provides the implementation for a helical Component. This includes setting key parameters such as its material, temperature, and dimensions. It also includes the ``getComponentArea`` method to retrieve the area of a helix. Helixes can be used for wire wrapping around fuel pins in fast reactor designs. Notes ----- http://mathworld.wolfram.com/Helix.html In a single rotation with an axial climb of P, the length of the helix will be a factor of 2*pi*sqrt(r^2+c^2)/2*pi*c longer than vertical length L. P = 2*pi*c. - od: outer diameter of the helix wire - id: inner diameter of the helix wire (if non-zero, helix wire is annular.) - axialPitch: vertical distance between wraps. Is also the axial distance required to complete a full 2*pi rotation. - helixDiameter: The helix diameter is the distance from the center of the wire-wrap on one side to the center of the wire-wrap on the opposite side (can be visualized if the axial pitch is 0.0 - creates a circle). """ is3D = False THERMAL_EXPANSION_DIMS = {"od", "id", "axialPitch", "helixDiameter"} pDefs = componentParameters.getHelixParameterDefinitions() def __init__( self, name, material, Tinput, Thot, od, axialPitch, helixDiameter, mult=1.0, id=0.0, modArea=None, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions( components, od=od, axialPitch=axialPitch, mult=mult, helixDiameter=helixDiameter, id=id, modArea=modArea, ) def getBoundingCircleOuterDiameter(self, Tc=None, cold=False): """The diameter of a circle which is encompassed by the exterior of the wire-wrap.""" return self.getDimension("helixDiameter", Tc, cold=cold) + self.getDimension("od", Tc, cold) def getCircleInnerDiameter(self, Tc=None, cold=False): """The diameter of a circle which is encompassed by the interior of the wire-wrap. This should be equal to the outer diameter of the pin in which the wire is wrapped around. """ return self.getDimension("helixDiameter", Tc, cold=cold) - self.getDimension("od", Tc, cold) def getComponentArea(self, cold=False, Tc=None): """Computes the area for the helix in cm^2.""" ap = self.getDimension("axialPitch", cold=cold, Tc=Tc) hd = self.getDimension("helixDiameter", cold=cold, Tc=Tc) id = self.getDimension("id", cold=cold, Tc=Tc) od = self.getDimension("od", cold=cold, Tc=Tc) mult = self.getDimension("mult") c = ap / (2.0 * math.pi) helixFactor = math.sqrt((hd / 2.0) ** 2 + c**2) / c area = mult * math.pi * ((od / 2.0) ** 2 - (id / 2.0) ** 2) * helixFactor return area ================================================ FILE: armi/reactor/components/component.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Components represent geometric objects within an assembly such as fuel, bond, coolant, ducts, wires, etc. This module contains the abstract definition of a Component. """ import copy import re from typing import Union import numpy as np from armi import materials, runLog from armi.bookkeeping import report from armi.materials import custom, material, void from armi.reactor import composites, flags, parameters from armi.reactor.components import componentParameters from armi.utils import densityTools from armi.utils.units import C_TO_K COMPONENT_LINK_REGEX = re.compile(r"^\s*(.+?)\s*\.\s*(.+?)\s*$") _NICE_DIM_NAMES = { "id": "Inner Diameter (cm)", "od": "Outer Diameter (cm)", "ip": "Inner Pitch (cm)", "op": "Outer Pitch (cm)", "mult": "Multiplicity", "axialPitch": "Axial Pitch (cm)", "helixDiameter": "Helix Diameter (cm)", "length": "Length (cm)", "height": "Height (cm)", "width": "Width (cm)", "areaMod": "Area Mod. Factor", } class _DimensionLink(tuple): """ A linked dimension, where one component uses a dimension from another. Useful when the boundaries are physically shared and should move together. The tuple contains (linkedComponent, linkedDimensionName). In equating two components, we need the linked dimensions to resolve responsibly/precisely. """ def getLinkedComponent(self): """Return the linked component.""" return self[0] def resolveDimension(self, Tc=None, cold=False): """Return the current value of the linked dimension.""" linkedComponent = self[0] dimID = self[1] return linkedComponent.getDimension(dimID, Tc=Tc, cold=cold) def __eq__(self, other): otherDimension = other.resolveDimension() if isinstance(other, _DimensionLink) else other return self.resolveDimension() == otherDimension def __ne__(self, other): return not self.__eq__(other) def __str__(self): """Return a string representation of a dimension link. These look like ``otherComponentName.otherDimensionName``. For example, if a link were to a ``fuel`` component's ``od`` param, the link would render as ``fuel.od``. """ return f"{self[0].name}.{self[1]}" class ComponentType(composites.CompositeModelType): """ ComponetType is a metaclass for storing and initializing Component subclass types. The construction of Component subclasses is being done through factories for ease of user input. As a consequence, the ``__init__`` methods' arguments need to be known in order to conform them to the correct format. Additionally, the constructors arguments can be used to determine the Component subclasses dimensions. Warning ------- The import-time metaclass-based component subclass registration was a good idea, but in practice has caused significant confusion and trouble. We will replace this soon with an explicit plugin-based component subclass registration system. """ TYPES = dict() #: :meta hide-value: NON_DIMENSION_NAMES = ( "Tinput", "Thot", "isotopics", "mergeWith", "material", "name", "components", "area", ) def __new__(cls, name, bases, attrs): newType = composites.CompositeModelType.__new__(cls, name, bases, attrs) ComponentType.TYPES[name.lower()] = newType # the co_varnames attribute contains arguments and then locals so we must # restrict it to just the arguments. signature = newType.__init__.__code__.co_varnames[1 : newType.__init__.__code__.co_argcount] # INIT_SIGNATURE and DIMENSION_NAMES are in the same order as the method signature newType.INIT_SIGNATURE = tuple(signature) newType.DIMENSION_NAMES = tuple(k for k in newType.INIT_SIGNATURE if k not in ComponentType.NON_DIMENSION_NAMES) return newType class Component(composites.Composite, metaclass=ComponentType): """ A primitive object in a reactor that has definite area/volume, material and composition. Could be fuel pins, cladding, duct, wire wrap, etc. One component object may represent multiple physical components via the ``multiplicity`` mechanism. .. impl:: Define a physical piece of a reactor. :id: I_ARMI_COMP_DEF :implements: R_ARMI_COMP_DEF The primitive object in an ARMI reactor is a Component. A Component is comprised of a shape and composition. This class serves as a base class which all Component types within ARMI are built upon. All primitive shapes (such as a square, circle, holed hexagon, helix etc.) are derived from this base class. Fundamental capabilities of this class include the ability to store parameters and attributes which describe the physical state of each Component within the ARMI data model. .. impl:: Order Components by their outermost diameter (using the < operator). :id: I_ARMI_COMP_ORDER :implements: R_ARMI_COMP_ORDER Determining Component order by outermost diameters is implemented via the ``__lt__()`` method, which is used to control ``sort()`` as the standard approach in Python. However, ``__lt__()`` does not show up in the API. Attributes ---------- temperatureInC : float Current temperature of component in celsius. inputTemperatureInC : float Reference temperature in C at which dimension definitions were input temperatureInC : float Temperature in C to which dimensions were thermally-expanded upon input. material : str or material.Material The material object that makes up this component and give it its thermo-mechanical properties. """ DIMENSION_NAMES = tuple() # will be assigned by ComponentType INIT_SIGNATURE = tuple() # will be assigned by ComponentType is3D = False # flag to show that area is 2D by default _COMP_REPORT_GROUPS = { "intercoolant": report.INTERCOOLANT_DIMS, "bond": report.BOND_DIMS, "duct": report.DUCT_DIMS, "coolant": report.COOLANT_DIMS, "clad": report.CLAD_DIMS, "fuel": report.FUEL_DIMS, "wire": report.WIRE_DIMS, "liner": report.LINER_DIMS, "gap": report.GAP_DIMS, } _TOLERANCE = 1e-10 THERMAL_EXPANSION_DIMS = set() pDefs = componentParameters.getComponentParameterDefinitions() material: materials.Material def __init__( self, name, material, Tinput, Thot, area=None, isotopics="", mergeWith="", components=None, ): if components and name in components: raise ValueError(f"Non-unique component name {name} repeated in same block.") composites.Composite.__init__(self, str(name)) self.p.area = area self.inputTemperatureInC = Tinput self.temperatureInC = Thot self.material = None self.setProperties(material) self.applyMaterialMassFracsToNumberDensities() # not necessary when duplicating self.setType(name) self.p.mergeWith = mergeWith self.p.customIsotopicsName = isotopics @property def temperatureInC(self): """Return the hot temperature in Celsius.""" return self.p.temperatureInC @temperatureInC.setter def temperatureInC(self, value): """Set the hot temperature in Celsius.""" self.p.temperatureInC = value @property def temperatureInK(self): """Current hot temperature in Kelvin.""" return self.temperatureInC + C_TO_K def __lt__(self, other): """ True if a circle encompassing this object has a smaller diameter than one encompassing another component. If the bounding circles for both components have identical size, then revert to checking the inner diameter of each component for sorting. This allows sorting because the Python sort functions only use this method. """ thisOD = self.getBoundingCircleOuterDiameter(cold=True) thatOD = other.getBoundingCircleOuterDiameter(cold=True) try: if thisOD == thatOD: thisID = self.getCircleInnerDiameter(cold=True) thatID = other.getCircleInnerDiameter(cold=True) return thisID < thatID else: return thisOD < thatOD except (NotImplementedError, Exception) as e: if isinstance(e, NotImplementedError): raise NotImplementedError(f"getCircleInnerDiameter not implemented for at least one of {self}, {other}") else: raise ValueError( f"Components 1 ({self} with OD {thisOD}) and 2 ({other} and OD {thatOD}) cannot be ordered because " "their bounding circle outer diameters are not comparable." ) def __setstate__(self, state): composites.Composite.__setstate__(self, state) self.material.parent = self def _linkAndStoreDimensions(self, components, **dims): """Link dimensions to another component.""" for key, val in dims.items(): self.setDimension(key, val) if components: self.resolveLinkedDims(components) def resolveLinkedDims(self, components): """Convert dimension link strings to actual links. .. impl:: The volume of some defined shapes depend on the solid components surrounding them. :id: I_ARMI_COMP_FLUID1 :implements: R_ARMI_COMP_FLUID Some Components are fluids and are thus defined by the shapes surrounding them. This method cycles through each dimension defining the border of this Component and converts the name of that Component to a link to the object itself. This series of links is then used downstream to resolve dimensional information. """ for dimName in self.DIMENSION_NAMES: value = self.p[dimName] if not isinstance(value, str): continue match = COMPONENT_LINK_REGEX.search(value) if match: try: name = match.group(1) comp = components[name] linkedKey = match.group(2) self.p[dimName] = _DimensionLink((comp, linkedKey)) except Exception: if value.count(".") > 1: raise ValueError( f"Name of {self} has a period in it. " f"Components cannot not have periods in their names: `{value}`" ) else: raise KeyError(f"Bad component link `{dimName}` defined as `{value}` in {self}") def setLink(self, key, otherComp, otherCompKey): """Set the dimension link.""" self.p[key] = _DimensionLink((otherComp, otherCompKey)) def setProperties(self, properties): """Apply thermo-mechanical properties of a Material.""" if isinstance(properties, str): mat = materials.resolveMaterialClassByName(properties)() # note that the material will not be expanded to natural isotopics # here because the user-input blueprints information is not available else: mat = properties self.material = mat self.material.parent = self self.clearLinkedCache() def applyMaterialMassFracsToNumberDensities(self): """ Set the hot number densities for the component based on material mass fractions/density. Notes ----- - the density returned accounts for the expansion of the component due to the difference in self.inputTemperatureInC and self.temperatureInC - After the expansion, the density of the component should reflect the 3d density of the material """ # note, that this is not the actual material density, but rather 2D expanded # `density` is 3D density # call getProperty to cache and improve speed density = self.material.getProperty("pseudoDensity", Tc=self.temperatureInC) self.p.numberDensities = densityTools.getNDensFromMasses(density, self.material.massFrac) # Sometimes material thermal expansion depends on its parent's composition (e.g. Pu frac) so # setting number densities can sometimes change thermal expansion behavior. Call again so # the material has access to its parent's comp when providing the reference initial density. densityBasedOnParentComposition = self.material.getProperty("pseudoDensity", Tc=self.temperatureInC) self.p.nuclides, self.p.numberDensities = densityTools.getNDensFromMasses( densityBasedOnParentComposition, self.material.massFrac ) # material needs to be expanded from the material's cold temp to hot, # not components cold temp, so we don't use mat.linearExpansionFactor or # component.getThermalExpansionFactor. # Materials don't typically define the temperature for which their references # density is defined so linearExpansionPercent must be called coldMatAxialExpansionFactor = 1.0 + self.material.linearExpansionPercent(Tc=self.temperatureInC) / 100 self.changeNDensByFactor(1.0 / coldMatAxialExpansionFactor) def adjustDensityForHeightExpansion(self, newHot): """ Change the densities in cases where height of the block/component is changing with expansion. Notes ----- Call before setTemperature since we need old hot temp. This works well if there is only 1 solid component. If there are multiple components expanding at different rates during thermal expansion this becomes more complicated and, and axial expansion should be used. Multiple expansion rates cannot trivially be accommodated. """ self.changeNDensByFactor(1.0 / self.getHeightFactor(newHot)) def getHeightFactor(self, newHot): """ Return the factor by which height would change by if we did 3D expansion. Notes ----- Call before setTemperature since we need old hot temp. """ return self.getThermalExpansionFactor(Tc=newHot, T0=self.temperatureInC) def getProperties(self): """Return the active Material object defining thermo-mechanical properties. .. impl:: Material properties are retrievable. :id: I_ARMI_COMP_MAT0 :implements: R_ARMI_COMP_MAT This method returns the material object that is assigned to the Component. .. impl:: Components have one-and-only-one material. :id: I_ARMI_COMP_1MAT :implements: R_ARMI_COMP_1MAT This method returns the material object that is assigned to the Component. """ return self.material @property def liquidPorosity(self): return self.parent.p.liquidPorosity @liquidPorosity.setter def liquidPorosity(self, porosity): self.parent.p.liquidPorosity = porosity @property def gasPorosity(self): return self.parent.p.gasPorosity @gasPorosity.setter def gasPorosity(self, porosity): self.parent.p.gasPorosity = porosity def __copy__(self): """Duplicate a component, used for breaking fuel into separate components.""" linkedDims = self._getLinkedDimsAndValues() newC = copy.deepcopy(self) self._restoreLinkedDims(linkedDims) newC._restoreLinkedDims(linkedDims) return newC def setLumpedFissionProducts(self, lfpCollection): """Sets lumped fission product collection on a lfp compatible material if possible.""" try: self.getProperties().setLumpedFissionProducts(lfpCollection) except AttributeError: # This material doesn't setLumpedFissionProducts because it's a regular # material, not a lumpedFissionProductCompatable material pass def getArea(self, cold=False, Tc=None): """ Get the area of a Component in cm^2. .. impl:: Get a dimension of a Component. :id: I_ARMI_COMP_VOL0 :implements: R_ARMI_COMP_VOL This method returns the area of a Component. See Also -------- block.getVolumeFractions: component coolant is typically the "leftover" and is calculated and set here """ area = self.getComponentArea(cold=cold, Tc=Tc) if self.p.get("modArea", None): comp, arg = self.p.modArea if arg == "sub": area -= comp.getComponentArea(cold=cold, Tc=Tc) elif arg == "add": area += comp.getComponentArea(cold=cold, Tc=Tc) else: raise ValueError(f"Option {arg} does not exist") self._checkNegativeArea(area, cold) return area def getVolume(self): """ Return the volume [cm^3] of the Component. .. impl:: Get a dimension of a Component. :id: I_ARMI_COMP_VOL1 :implements: R_ARMI_COMP_VOL This method returns the volume of a Component. Notes ----- ``self.p.volume`` is not set until this method is called, so under most circumstances it is probably not safe to access ``self.p.volume`` directly. This is because not all components (e.g., ``DerivedShape``) can compute their volume during initialization. """ if self.p.volume is None: self._updateVolume() if self.p.volume is None: raise ValueError(f"{self} has undefined volume.") return self.p.volume def clearCache(self): """ Invalidate the volume so that it will be recomputed from current dimensions upon next access. The updated value will be based on its shape and current dimensions. If there is a parent container and that container contains a DerivedShape, then that must be updated as well since its volume may be changing. See Also -------- clearLinkedCache: Clears cache of components that depend on this component's dimensions. """ self.p.volume = None if self.parent: self.parent.derivedMustUpdate = True def _updateVolume(self): """Recompute and store volume.""" self.p.volume = self.computeVolume() def computeVolume(self): """Compute volume.""" if not self.is3D: volume = self.getArea() * self.parent.getHeight() else: volume = self.getComponentVolume() self._checkNegativeVolume(volume) return volume def _checkNegativeArea(self, area, cold): """ Check for negative area and warn/error when appropriate. Negative component area is allowed for Void materials (such as gaps) which may be placed between components that will overlap during thermal expansion (such as liners and cladding and annular fuel). Overlapping is allowed to maintain conservation of atoms while sticking close to the as-built geometry. Modules that need true geometries will have to handle this themselves. """ if np.isnan(area): return if area < 0.0: if (cold and not self.containsVoidMaterial()) or self.containsSolidMaterial(): negAreaFailure = ( f"Component {self} with {self.material} has cold negative area of {area} cm^2. " "This can be caused by component overlap with component dimension linking or by invalid inputs." ) raise ArithmeticError(negAreaFailure) def _checkNegativeVolume(self, volume): """Check for negative volume. See Also -------- self._checkNegativeArea """ if np.isnan(volume): return if volume < 0.0 and self.containsSolidMaterial(): negVolFailure = ( f"Component {self} with {self.material} has cold negative volume of {volume} cm^3. " "This can be caused by component overlap with component dimension linking or by invalid inputs." ) raise ArithmeticError(negVolFailure) def containsVoidMaterial(self): """Returns True if component material is void.""" return isinstance(self.material, void.Void) def containsSolidMaterial(self): """Returns True if the component material is a solid.""" return not isinstance(self.material, material.Fluid) def getComponentArea(self, cold=False, Tc=None): """ Get the area of this component in cm^2. Parameters ---------- cold : bool, optional Compute the area with as-input dimensions instead of thermally-expanded Tc : float, optional Temperature to compute the area at """ raise NotImplementedError def getComponentVolume(self): return self.p.volume def setVolume(self, val): raise NotImplementedError def setArea(self, val): raise NotImplementedError def setTemperature(self, temperatureInC): r""" Adjust temperature of this component. This will cause thermal expansion or contraction of solid or liquid components and will accordingly adjust number densities to conserve mass. Liquids still have a number density adjustment, but some mass tends to expand in or out of the bounding area. Since some composites have multiple materials in them that thermally expand differently, the axial dimension is generally left unchanged. Hence, this a 2-D thermal expansion. Number density change is proportional to mass density change :math:`\frac{d\rho}{\rho}`. A multiplicative factor :math:`f_N` to apply to number densities when going from T to T' is as follows: .. math:: N^{\prime} = N \cdot f_N \\ \frac{dN}{N} = f_N - 1 Since :math:`\frac{dN}{N} \sim\frac{d\rho}{\rho}`, we have: .. math:: f_N = \frac{d\rho}{\rho} + 1 = \frac{\rho^{\prime}}{\rho} """ prevTemp, self.temperatureInC = self.temperatureInC, float(temperatureInC) f = self.material.getThermalExpansionDensityReduction(prevTemp, self.temperatureInC) self.changeNDensByFactor(f) self.clearLinkedCache() def getNuclides(self): """ Return nuclides in this component. This includes anything that has been specified in here, including trace nuclides. """ if self.p.nuclides is None: return [] return [nucName.decode() for nucName in self.p.nuclides] def getNumberDensity(self, nucName): """ Get the number density of nucName, return zero if it does not exist here. Parameters ---------- nucName : str Nuclide name Returns ------- number density : float number density in atoms/bn-cm. """ i = np.where(self.p.nuclides == nucName.encode())[0] if i.size > 0: return self.p.numberDensities[i[0]] else: return 0.0 def getNuclideNumberDensities(self, nucNames: list[str]) -> list[float]: """Return a list of number densities for the nuc names requested.""" if isinstance(nucNames, (list, tuple, np.ndarray)): byteNucs = np.asanyarray(nucNames, dtype="S6") else: byteNucs = [nucName.encode() for nucName in nucNames] if self.p.numberDensities is None: return np.zeros(len(byteNucs), dtype=np.float64) # trivial case where nucNames is the full set of nuclides in the same order if np.array_equal(byteNucs, self.p.nuclides): return np.array(self.p.numberDensities) if len(byteNucs) < len(self.p.nuclides) / 10: return self._getNumberDensitiesArray(byteNucs) nDensDict = dict(zip(self.p.nuclides, self.p.numberDensities)) return [nDensDict.get(nuc, 0.0) for nuc in byteNucs] def _getNumberDensitiesArray(self, byteNucs): """ Get number densities using direct array lookup. When only a small subset of nuclide number densities are requested, it is likely faster to lookup the index for each nuclide than to recreate the entire dictionary for a lookup. Parameters ---------- byteNucs : np.ndarray, dtype="S6" List of nuclides for which to retrieve number densities, as encoded byte strings """ ndens = np.zeros(len(byteNucs), dtype=np.float64) nuclides = self.p.nuclides numberDensities = self.p.numberDensities # if it's just a small subset of nuclides, use np.where for direct index lookup for i, nuc in enumerate(byteNucs): j = np.where(nuclides == nuc)[0] if j.size > 0: ndens[i] = numberDensities[j[0]] return ndens def _getNdensHelper(self): nucs = self.getNuclides() return dict(zip(nucs, self.p.numberDensities)) if len(nucs) > 0 else {} def setName(self, name): """Components use name for type and name.""" composites.Composite.setName(self, name) self.setType(name) def setNumberDensity(self, nucName, val): """ Set heterogeneous number density. .. impl:: Setting nuclide fractions. :id: I_ARMI_COMP_NUCLIDE_FRACS0 :implements: R_ARMI_COMP_NUCLIDE_FRACS The method allows a user or plugin to set the number density of a Component. It also indicates to other processes that may depend on a Component's status about this change via the ``assigned`` attribute. Parameters ---------- nucName : str nuclide to modify val : float Number density to set in atoms/bn-cm (heterogeneous) """ self.updateNumberDensities({nucName: val}) def setNumberDensities(self, numberDensities): """ Set one or more multiple number densities. Clears out any number density not listed. .. impl:: Setting nuclide fractions. :id: I_ARMI_COMP_NUCLIDE_FRACS1 :implements: R_ARMI_COMP_NUCLIDE_FRACS The method allows a user or plugin to set the number densities of a Component. In contrast to the ``setNumberDensity`` method, it sets all densities within a Component. Parameters ---------- numberDensities : dict nucName: ndens pairs. Notes ----- We don't just call setNumberDensity for each nuclide because we don't want to call ``getVolumeFractions`` for each nuclide (it's inefficient). """ self.updateNumberDensities(numberDensities, wipe=True) def updateNumberDensities(self, numberDensities, wipe=False): """ Set one or more multiple number densities. Leaves unlisted number densities alone. Parameters ---------- numberDensities : dict nucName: ndens pairs. wipe : bool, optional Controls whether the old number densities are wiped. Any nuclide densities not provided in numberDensities will be effectively set to 0.0. Notes ----- Sometimes volume/dimensions change due to number density change when the material thermal expansion depends on the component's composition (e.g. its plutonium fraction). In this case, changing the density will implicitly change the area/volume. Since it is difficult to predict the new dimensions, and perturbation/depletion calculations almost exclusively assume constant volume, the densities sent are automatically adjusted to conserve mass with the original dimensions. That is, the component's densities are not exactly as passed, but whatever they would need to be to preserve volume integrated number densities (moles) from the pre-perturbed component's volume/dimensions. This has no effect if the material thermal expansion has no dependence on component composition. If this is not desired, `self.p.numberDensities` and `self.p.nuclides` can be set directly. """ # prepare to change the densities with knowledge that dims could change due to material # thermal expansion dependence on composition if self.p.numberDensities is not None and self.p.numberDensities.size > 0: dLLprev = self.material.linearExpansionPercent(Tc=self.temperatureInC) / 100.0 materialExpansion = True else: dLLprev = 0.0 materialExpansion = False try: vol = self.getVolume() except (AttributeError, TypeError): # Either no parent to get height or parent's height is None. Which would be # AttributeError and TypeError respectively, but other errors could be possible. vol = None area = self.getArea() # change the densities if wipe: self.p.nuclides = np.asanyarray(list(numberDensities.keys()), dtype="S6") self.p.numberDensities = np.array(list(numberDensities.values())) else: newNucs = [] newNumDens = [] nucs = self.p.nuclides ndens = self.p.numberDensities for nucName, dens in numberDensities.items(): i = np.where(nucs == nucName.encode())[0] if i.size > 0: ndens[i[0]] = dens else: newNucs.append(nucName.encode()) newNumDens.append(dens) self.p.nuclides = np.append(nucs, newNucs) self.p.numberDensities = np.append(ndens, newNumDens) # check if thermal expansion changed dLLnew = self.material.linearExpansionPercent(Tc=self.temperatureInC) / 100.0 if dLLprev != dLLnew and materialExpansion: # the thermal expansion changed so the volume change is happening at same time as # density change was requested. Attempt to make mass consistent with old dims (since the # density change was for the old volume and otherwise mass wouldn't be conserved). self.clearLinkedCache() # enable recalculation of volume, otherwise it uses cached if vol is not None: factor = vol / self.getVolume() else: factor = area / self.getArea() self.changeNDensByFactor(factor) # since we are updating the object the param points to but not the param itself, we have to # inform the param system to flag it as modified so it syncs during ``syncMpiState``. self.p.assigned = parameters.SINCE_ANYTHING self.p.paramDefs["numberDensities"].assigned = parameters.SINCE_ANYTHING def changeNDensByFactor(self, factor): """Change the number density of all nuclides within the object by a multiplicative factor.""" if self.p.numberDensities is not None: self.p.numberDensities *= factor self._changeOtherDensParamsByFactor(factor) def _changeOtherDensParamsByFactor(self, factor): """Change the number density of all nuclides within the object by a multiplicative factor.""" if self.p.detailedNDens is not None: self.p.detailedNDens *= factor # Update pinNDens if self.p.pinNDens is not None: self.p.pinNDens *= factor def getEnrichment(self): """Get the mass enrichment of this component, as defined by the material.""" return self.getMassEnrichment() def getMassEnrichment(self): """ Get the mass enrichment of this component, as defined by the material. Notes ----- Getting mass enrichment on any level higher than this is ambiguous because you may have enriched boron in one pin and uranium in another and blending those doesn't make sense. """ if self.material.enrichedNuclide is None: raise ValueError(f"Cannot get enrichment of {self.material} because `enrichedNuclide` is not defined.") enrichedNuclide = self.nuclideBases.byName[self.material.enrichedNuclide] baselineNucNames = [nb.name for nb in enrichedNuclide.element.nuclides] massFracs = self.getMassFracs() massFracEnrichedElement = sum( massFrac for nucName, massFrac in massFracs.items() if nucName in baselineNucNames ) try: return massFracs.get(self.material.enrichedNuclide, 0.0) / massFracEnrichedElement except ZeroDivisionError: return 0.0 def getMass(self, nuclideNames: Union[None, str, list[str]] = None) -> float: r""" Determine the mass in grams of nuclide(s) and/or elements in this object. .. math:: \text{mass} = \frac{\sum_i (N_i \cdot V \cdot A_i)}{N_A \cdot 10^{-24}} where :math:`N_i` is number density of nuclide i in (1/bn-cm), :math:`V` is the object volume in :math:`cm^3` :math:`N_A` is Avogadro's number in 1/moles, :math:`A_i` is the atomic weight of of nuclide i in grams/mole Parameters ---------- nuclideNames : str, optional The nuclide/element specifier to get the mass of in the object. If omitted, total mass is returned. Returns ------- mass : float The mass in grams. """ volume = self.getVolume() / (self.parent.getSymmetryFactor() if self.parent else 1.0) if nuclideNames is None: nDens = self._getNdensHelper() else: nuclideNames = self._getNuclidesFromSpecifier(nuclideNames) # densities comes from self.p.numberDensities if len(nuclideNames) > 0: densities = self.getNuclideNumberDensities(nuclideNames) nDens = dict(zip(nuclideNames, densities)) else: nDens = {} return densityTools.calculateMassDensity(nDens) * volume def setDimension(self, key, val, retainLink=False, cold=True): """ Set a single dimension on the component. .. impl:: Set a Component dimension, considering thermal expansion. :id: I_ARMI_COMP_EXPANSION1 :implements: R_ARMI_COMP_EXPANSION Dimensions should be set considering the impact of thermal expansion. This method allows for a user or plugin to set a dimension and indicate if the dimension is for a cold configuration or not. If it is not for a cold configuration, the thermal expansion factor is considered when setting the dimension. If the ``retainLink`` argument is ``True``, any Components linked to this one will also have its dimensions changed consistently. After a dimension is updated, the ``clearLinkedCache`` method is called which sets the volume of this Component to ``None``. This ensures that when the volume is next accessed it is recomputed using the updated dimensions. Parameters ---------- key : str The dimension key (op, ip, mult, etc.) val : float The value to set on the dimension retainLink : bool, optional If True, the val will be applied to the dimension of linked component which indirectly changes this component's dimensions. cold : bool, optional If True sets the component cold dimension to the specified value. """ if not key: return if retainLink and self.dimensionIsLinked(key): linkedComp, linkedDimName = self.p[key] linkedComp.setDimension(linkedDimName, val, cold=cold) else: if not cold: expansionFactor = self.getThermalExpansionFactor() if key in self.THERMAL_EXPANSION_DIMS else 1.0 val /= expansionFactor self.p[key] = val self.clearLinkedCache() def getDimension(self, key, Tc=None, cold=False): """ Return a specific dimension at temperature as determined by key. .. impl:: Retrieve a dimension at a specified temperature. :id: I_ARMI_COMP_DIMS :implements: R_ARMI_COMP_DIMS Due to thermal expansion, Component dimensions depend on their temperature. This method retrieves a dimension from the Component at a particular temperature, if provided. If the Component is a LinkedComponent then the dimensions are resolved to ensure that any thermal expansion that has occurred to the Components that the LinkedComponent depends on is reflected in the returned dimension. Parameters ---------- key : str The dimension key (op, ip, mult, etc.) Tc : float Temperature in C. If None, the current temperature of the component is used. cold : bool, optional If true, will return cold (input) value of the requested dimension """ dimension = self.p[key] if isinstance(dimension, _DimensionLink): return dimension.resolveDimension(Tc=Tc, cold=cold) if not dimension or cold or key not in self.THERMAL_EXPANSION_DIMS: return dimension return self.getThermalExpansionFactor(Tc) * dimension def getBoundingCircleOuterDiameter(self, Tc=None, cold=False): """Abstract bounding circle method that should be overwritten by each shape subclass.""" raise NotImplementedError def getCircleInnerDiameter(self, Tc=None, cold=False): """Abstract inner circle method that should be overwritten by each shape subclass. Notes ----- The inner circle is meaningful for annular shapes, i.e., circle with non-zero ID, hexagon with non-zero IP, etc. For shapes with corners (e.g., hexagon, rectangle, etc) the inner circle intersects the corners of the inner bound, opposed to intersecting the "flats". """ raise NotImplementedError def dimensionIsLinked(self, key): """True if a the specified dimension is linked to another dimension.""" return key in self.p and isinstance(self.p[key], _DimensionLink) def getDimensionNamesLinkedTo(self, otherComponent): """Find dimension names linked to the other component in this component.""" dimNames = [] for dimName in self.DIMENSION_NAMES: isLinked = self.dimensionIsLinked(dimName) if isLinked and self.p[dimName].getLinkedComponent() is otherComponent: dimNames.append((dimName, self.p[dimName][1])) return dimNames def clearLinkedCache(self): """Clear this cache and any other dependent volumes.""" self.clearCache() if self.parent: # changes in dimensions can affect cached variables such as pitch self.parent.cached = {} for c in self.getLinkedComponents(): # no clearCache since parent already updated derivedMustUpdate in self.clearCache() c.p.volume = None def getLinkedComponents(self): """Find other components that are linked to this component.""" dependents = [] for child in self.parent: for dimName in child.DIMENSION_NAMES: isLinked = child.dimensionIsLinked(dimName) if isLinked and child.p[dimName].getLinkedComponent() is self: dependents.append(child) return dependents def getThermalExpansionFactor(self, Tc=None, T0=None): """ Retrieves the material thermal expansion fraction. .. impl:: Calculates radial thermal expansion factor. :id: I_ARMI_COMP_EXPANSION0 :implements: R_ARMI_COMP_EXPANSION This method enables the calculation of the thermal expansion factor for a given material. If the material is solid, the difference between ``T0`` and ``Tc`` is used to calculate the thermal expansion factor. If a solid material does not have a linear expansion factor defined and the temperature difference is greater than a predetermined tolerance, an error is raised. Thermal expansion of fluids or custom materials is neglected, currently. Parameters ---------- Tc : float, optional Adjusted temperature to get the thermal expansion factor at relative to the reference temperature Returns ------- Thermal expansion factor as a percentage (1.0 + dLL), where dLL is the linear expansion factor. """ if isinstance(self.material, (material.Fluid, custom.Custom)): return 1.0 # No thermal expansion of fluids or custom materials if T0 is None: T0 = self.inputTemperatureInC if Tc is None: Tc = self.temperatureInC dLL = self.material.linearExpansionFactor(Tc=Tc, T0=T0) if not dLL and abs(Tc - T0) > self._TOLERANCE: runLog.error( f"Linear expansion percent may not be implemented in the {self.material} material class.\n" "This method needs to be implemented on the material to allow thermal expansion." f".\nReference temperature: {T0}, Adjusted temperature: {Tc}, Temperature difference: {(Tc - T0)}, " f"Specified tolerance: {self._TOLERANCE}", single=True, ) raise RuntimeError( f"Linear expansion percent may not be implemented in the {self.material} material class." ) return 1.0 + dLL def printContents(self, includeNuclides=True): """Print a listing of the dimensions and composition of this component.""" runLog.important(self) runLog.important(self.setDimensionReport()) if includeNuclides: for nuc in self.getNuclides(): runLog.important(f"{nuc:10s} {self.getNumberDensity(nuc):.7e}") def setDimensionReport(self): """Gives a report of the dimensions of this component.""" reportGroup = None for componentType, componentReport in self._COMP_REPORT_GROUPS.items(): if componentType in self.getName(): reportGroup = componentReport break if not reportGroup: return f"No report group designated for {self.getName()} component." reportGroup.header = [ "", f"Tcold ({self.inputTemperatureInC})", f"Thot ({self.temperatureInC})", ] dimensions = { k: self.p[k] for k in self.DIMENSION_NAMES if k not in ("modArea", "area") and self.p[k] is not None } # py3 cannot format None # Set component name and material report.setData("Name", [self.getName(), ""], reportGroup) report.setData("Material", [self.getProperties().name, ""], reportGroup) for dimName in dimensions: niceName = _NICE_DIM_NAMES.get(dimName, dimName) refVal = self.getDimension(dimName, cold=True) hotVal = self.getDimension(dimName) try: report.setData(niceName, [refVal, hotVal], reportGroup) except ValueError: runLog.warning(f"{self} has an invalid dimension for {dimName}. refVal: {refVal} hotVal: {hotVal}") # calculate thickness if applicable. suffix = None if "id" in dimensions: suffix = "d" elif "ip" in dimensions: suffix = "p" if suffix: coldIn = self.getDimension(f"i{suffix}", cold=True) hotIn = self.getDimension(f"i{suffix}") coldOut = self.getDimension(f"o{suffix}", cold=True) hotOut = self.getDimension(f"o{suffix}") if suffix and coldIn > 0.0: hotThick = (hotOut - hotIn) / 2.0 coldThick = (coldOut - coldIn) / 2.0 vals = ( "Thickness (cm)", f"{coldThick:.7f}", f"{hotThick:.7f}", ) report.setData(vals[0], [vals[1], vals[2]], reportGroup) return report.ALL[reportGroup] def updateDims(self, key="", val=None): self.setDimension(key, val) def mergeNuclidesInto(self, compToMergeWith): """ Set another component's number densities to reflect this one merged into it. You must also modify the geometry of the other component and remove this component to conserve atoms. """ # record pre-merged number densities and areas aMe = self.getArea() # if negative-area gap, treat is as 0.0 and return if aMe <= 0.0: return aMerge = compToMergeWith.getArea() meNDens = {nucName: aMe / aMerge * self.getNumberDensity(nucName) for nucName in self.getNuclides()} mergeNDens = {nucName: compToMergeWith.getNumberDensity(nucName) for nucName in compToMergeWith.getNuclides()} # set the new homogenized number densities from both. Allow overlapping nuclides. for nucName in set(meNDens) | set(mergeNDens): compToMergeWith.setNumberDensity(nucName, (meNDens.get(nucName, 0.0) + mergeNDens.get(nucName, 0.0))) def iterComponents(self, typeSpec=None, exact=False): if self.hasFlags(typeSpec, exact): yield self def backUp(self): """ Create and store a backup of the state. This needed to be overridden due to linked components which actually have a parameter value of another ARMI component. """ linkedDims = self._getLinkedDimsAndValues() composites.Composite.backUp(self) self._restoreLinkedDims(linkedDims) def restoreBackup(self, paramsToApply): """ Restore the parameters from previously created backup. This needed to be overridden due to linked components which actually have a parameter value of another ARMI component. """ linkedDims = self._getLinkedDimsAndValues() composites.Composite.restoreBackup(self, paramsToApply) self._restoreLinkedDims(linkedDims) def _getLinkedDimsAndValues(self): linkedDims = [] for dimName in self.DIMENSION_NAMES: # backUp and restore are called in tight loops, getting the value and checking here is # faster than calling self.dimensionIsLinked because that requires and extra # p.__getitem__ try: val = self.p[dimName] except Exception: raise RuntimeError( f"Could not find parameter {dimName} defined for {self}. Is the desired Component class?" ) if isinstance(val, _DimensionLink): linkedDims.append((self.p.paramDefs[dimName].fieldName, val)) del self.p[dimName] return linkedDims def _restoreLinkedDims(self, linkedDims): # force update without setting the ".assigned" flag for fieldName, value in linkedDims: setattr(self.p, fieldName, value) def adjustMassEnrichment(self, massFraction): """ Change the mass fraction of this component. The nuclides to adjust are defined by the material. This changes whichever nuclides are to be enriched vs. the baseline nuclides of that element while holding mass constant. For example it might adjust boron or uranium enrichment. Conceptually, you could hold number of atoms, volume, or mass constant during this operation. Historically ARMI adjusted mass fractions which was meant to keep mass constant. If you have 20 mass % Uranium and adjust the enrichment, you will still have 20% Uranium mass. But, the actual mass actually might change a bit because the enriched nuclide weighs less. See Also -------- Material.enrichedNuclide """ if self.material.enrichedNuclide is None: raise ValueError(f"Cannot adjust enrichment of {self.material} because `enrichedNuclide` is not defined.") enrichedNuclide = self.nuclideBases.byName[self.material.enrichedNuclide] baselineNucNames = [nb.name for nb in enrichedNuclide.element.nuclides] massFracsBefore = self.getMassFracs() massFracEnrichedElement = sum( massFrac for nucName, massFrac in massFracsBefore.items() if nucName in baselineNucNames ) adjustedMassFracs = {self.material.enrichedNuclide: massFracEnrichedElement * massFraction} baselineNucNames.remove(self.material.enrichedNuclide) massFracTotalUnenriched = massFracEnrichedElement - massFracsBefore[self.material.enrichedNuclide] for baseNucName in baselineNucNames: # maintain relative mass fractions of baseline nuclides. frac = massFracsBefore.get(baseNucName, 0.0) / massFracTotalUnenriched if not frac: continue adjustedMassFracs[baseNucName] = massFracEnrichedElement * (1 - massFraction) * frac self.setMassFracs(adjustedMassFracs) def getMgFlux(self, adjoint=False, average=False, gamma=False): """ Return the multigroup neutron flux in [n/cm^2/s]. The first entry is the first energy group (fastest neutrons). Each additional group is the next energy group, as set in the ISOTXS library. Parameters ---------- adjoint : bool, optional Return adjoint flux instead of real average : bool, optional If True, will return average flux between latest and previous. Does not work for pin detailed. gamma : bool, optional Whether to return the neutron flux or the gamma flux. Returns ------- flux : np.ndarray multigroup neutron flux in [n/cm^2/s] """ if average: raise NotImplementedError("Component has no method for producing average MG flux -- tryusing blocks") volume = self.getVolume() / self.parent.getSymmetryFactor() return self.getIntegratedMgFlux(adjoint=adjoint, gamma=gamma) / volume def getIntegratedMgFlux(self, adjoint=False, gamma=False): """ Return the multigroup neutron tracklength in [n-cm/s]. The first entry is the first energy group (fastest neutrons). Each additional group is the next energy group, as set in the ISOTXS library. Parameters ---------- adjoint : bool, optional Return adjoint flux instead of real gamma : bool, optional Whether to return the neutron flux or the gamma flux. Returns ------- integratedFlux : multigroup neutron tracklength in [n-cm/s] """ if self.p.pinNum is None: # no pin-level flux is available if not self.parent: return np.zeros(1) volumeFraction = (self.getVolume() / self.parent.getSymmetryFactor()) / self.parent.getVolume() return volumeFraction * self.parent.getIntegratedMgFlux(adjoint, gamma) # pin-level flux is available. Note that it is NOT integrated on the param level. if gamma: if adjoint: raise ValueError("Adjoint gamma flux is currently unsupported.") else: pinFluxes = self.parent.p.pinMgFluxesGamma else: if adjoint: pinFluxes = self.parent.p.pinMgFluxesAdj else: pinFluxes = self.parent.p.pinMgFluxes return pinFluxes[self.p.pinNum - 1] * self.getVolume() / self.parent.getSymmetryFactor() def getPinMgFluxes(self, adjoint: bool = False, gamma: bool = False) -> np.ndarray[tuple[int, int], float]: """Retrieves the pin multigroup fluxes for the component. Parameters ---------- adjoint : bool, optional Return adjoint flux instead of real gamma : bool, optional Whether to return the neutron flux or the gamma flux. Returns ------- np.ndarray A ``(N, nGroup)`` array of pin multigroup fluxes, where ``N`` is the equivalent to the multiplicity of the component (``self.p.mult``) and ``nGroup`` is the number of energy groups of the flux. Raises ------ ValueError If the location(s) of the component are not aligned with pin indices from the block. This would happen if this component is not actually a pin. """ # If we get a None, for a non-pin thing, the exception block at the bottom will catch # that and inform the user. so we don't need to add extra guard rails here indexMap = self.getPinIndices() # Get the parameter name we are trying to retrieve if gamma: if adjoint: raise ValueError("Adjoint gamma flux is currently unsupported.") else: param = "pinMgFluxesGamma" else: if adjoint: param = "pinMgFluxesAdj" else: param = "pinMgFluxes" try: return self.parent.p[param][indexMap] except Exception as ee: msg = f"Failure getting {param} from {self} via parent {self.parent}" runLog.error(msg) runLog.error(ee) raise ValueError(msg) from ee def getPinIndices(self) -> np.ndarray[tuple[int], np.uint16]: """Find the indices for the locations where this component can be found in the block. Returns ------- np.array[int] The indices in various Block-level pin methods, e.g., :meth:`armi.reactor.blocks.Block.getPinLocations`, that correspond to this component. Raises ------ ValueError If this does not have pin indices. This can be the case for components that live on blocks without spatial grids, or if they do not share lattice sites, via ``spatialLocator`` with other pins. See Also -------- :meth`:armi.reactor.blocks.HexBlock.assignPinIndices` """ ix = self.p.pinIndices if isinstance(ix, np.ndarray): return ix # Find a sibling that has pin indices and has the same spatial locator as us withPinIndices = (c for c in self.parent if c is not self and c.p.pinIndices is not None) for sibling in withPinIndices: if sibling.spatialLocator == self.spatialLocator: return sibling.p.pinIndices msg = f"{self} on {self.parent} has no pin indices." raise ValueError(msg) def density(self) -> float: """Returns the mass density of the object in g/cc.""" density = composites.Composite.density(self) if not density and not isinstance(self.material, void.Void): # possible that there are no nuclides in this component yet. In that case, # defer to Material. Material.density is wrapped to warn if it's attached # to a parent. Avoid that by calling the inner function directly density = self.material.density.__wrapped__(self.material, Tc=self.temperatureInC) return density def getLumpedFissionProductCollection(self): """ Get collection of LFP objects. Will work for global or block-level LFP models. Returns ------- lfps : LumpedFissionProduct lfpName keys , lfp object values See Also -------- armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct """ if self.parent: return self.parent.getLumpedFissionProductCollection() else: return composites.ArmiObject.getLumpedFissionProductCollection(self) def getMicroSuffix(self): return self.parent.getMicroSuffix() def getPitchData(self): """ Return the pitch data that should be used to determine block pitch. Notes ----- This pitch data should only be used if this is the pitch defining component in a block. The block is responsible for determining which component in it is the pitch defining component. """ raise NotImplementedError( f"Method not implemented on component {self}. " "Please implement if this component type can be a pitch defining component." ) def getFuelMass(self) -> float: """Return the mass in grams if this is a fueled component.""" return self.getMass() if self.hasFlags(flags.Flags.FUEL) else 0.0 def finalizeLoadingFromDB(self): """Apply any final actions after creating the component from database. This should **only** be called internally by the database loader. Otherwise some properties could be doubly applied. This exists because the theoretical density is initially defined as a material modification, and then stored as a Material attribute. When reading from blueprints, the blueprint loader sets the theoretical density parameter from the Material attribute. Component parameters are also set when reading from the database. But, we need to set the Material attribute so routines that fetch a material's density property account for the theoretical density. """ self.material.adjustTD(self.p.theoreticalDensityFrac) class ShapedComponent(Component): """A component with well-defined dimensions.""" ================================================ FILE: armi/reactor/components/componentParameters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Component parameter definitions.""" import numpy as np from armi.reactor import parameters from armi.reactor.parameters import ParamLocation from armi.reactor.parameters.parameterDefinitions import isNumpyArray, isNumpyF32Array from armi.utils import units def getComponentParameterDefinitions(): """Return the base Component parameters.""" pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb: pb.defParam("volume", units=f"{units.CM}^3", description="Volume of this object.") pb.defParam( "area", units=f"{units.CM}^2", description="Cross sectional area of this component.", ) pb.defParam( "mult", units=units.UNITLESS, description="The multiplicity of this component, i.e. how many of them there are.", default=1, ) pb.defParam( "mergeWith", units=units.UNITLESS, description="Label of other component to merge with", ) pb.defParam( "type", units=units.UNITLESS, description="The name of this object as input on the blueprints", ) pb.defParam( "temperatureInC", units=units.DEGC, description="Component temperature in {}".format(units.DEGC), ) pb.defParam( "numberDensities", setter=isNumpyArray("numberDensities"), units=f"#/(bn*{units.CM})", description="Number densities of each nuclide.", ) pb.defParam( "nuclides", setter=isNumpyArray("nuclides"), units=units.UNITLESS, description="Nuclide names corresponding to numberDensities array.", ) pb.defParam( "detailedNDens", setter=isNumpyArray("detailedNDens"), units=f"atoms/(bn*{units.CM})", description=( "High-fidelity number density vector with up to thousands of nuclides. " "Used in high-fi depletion runs where low-fi depletion may also be occurring. " "This param keeps the hi-fi and low-fi depletion values from interfering." ), saveToDB=True, default=None, ) pb.defParam( "pinNDens", setter=isNumpyF32Array("pinNDens"), units=f"atoms/(bn*{units.CM})", description="Pin-wise number densities of each nuclide.", location=ParamLocation.AVERAGE, saveToDB=True, categories=["depletion", parameters.Category.pinQuantities], default=None, ) pb.defParam( "percentBu", units=f"{units.PERCENT_FIMA}", description="Burnup as a percentage of initial (heavy) metal atoms.", default=0.0, ) pb.defParam( "pinPercentBu", setter=isNumpyArray("pinPercentBu"), units=units.PERCENT_FIMA, description="Pin-wise burnup as a percentage of initial (heavy) metal atoms.", default=None, ) pb.defParam( "buRate", units=f"{units.PERCENT_FIMA}/{units.DAYS}", # This is very related to power, but normalized to %FIMA. description=( "Current rate of burnup accumulation. Useful for estimating times when burnup limits may be exceeded." ), ) pb.defParam( "enrichmentBOL", units=units.UNITLESS, description="Enrichment during fabrication (mass fraction)", default=0.0, ) pb.defParam( "massHmBOL", units=units.GRAMS, description="Mass of heavy metal at BOL", default=None, ) pb.defParam( "customIsotopicsName", units=units.UNITLESS, description="Label of isotopics applied to this component.", ) pb.defParam( "modArea", units=units.UNITLESS, description="A (component, operation) tuple used to add/subtract area (in " "cm^2) from another components area. See c.getArea()", ) pb.defParam( "zrFrac", units=units.UNITLESS, description="Original Zr frac of this, used for material properties.", ) pb.defParam( "pinNum", units=units.UNITLESS, description="Pin number of this component in some mesh. Starts at 1.", default=None, ) def _assignTDFrac(self, val): if val > 1 or val < 0: raise ValueError(f"Theoretical density fraction must be in range [0,1], got {val}") self._p_theoreticalDensityFrac = val pb.defParam( "theoreticalDensityFrac", description=( "Fractional value between zero and one, inclusive, for the theoretical density " "of the material stored on this component." ), units=units.UNITLESS, default=1, setter=_assignTDFrac, ) pb.defParam( "molesHmBOL", units=units.MOLES, default=0.0, description="Total number of moles of heavy metal at BOL.", ) def _validatePinIndices(self, val): if val is not None: # holds [0, 65_535] so at most, 65_535 pins per block self._p_pinIndices = np.array(val, dtype=np.uint16) else: self._p_pinIndices = None pb.defParam( "pinIndices", default=None, description=( "Indices within data arrays where values for this component are stored. " "The array is zero indexed and structured such that the j-th pin on this " "component can be found at ``Block.getPinLocations()[pinIndices[j]]``. " ), units=units.UNITLESS, setter=_validatePinIndices, ) return pDefs def getCircleParameterDefinitions(): """Return parameters for Circle.""" pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb: pb.defParam("od", units=units.CM, description="Outer diameter") pb.defParam("id", units=units.CM, description="Inner diameter", default=0.0) pb.defParam("op", units=units.CM, description="Outer pitch") return pDefs def getHexagonParameterDefinitions(): """Return parameters for Hexagon.""" pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb: pb.defParam("ip", units=units.CM, description="Inner pitch", default=0.0) pb.defParam("op", units=units.CM, description="Outer pitch") return pDefs def getHoledHexagonParameterDefinitions(): """Return parameters for HoledHexagon.""" pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb: pb.defParam("holeOD", units=units.CM, description="Diameter of interior hole(s)") pb.defParam("nHoles", units=units.UNITLESS, description="Number of interior holes") pb.defParam( "holeRadFromCenter", units=units.CM, description="Distance from the center of the hexagon to the center of the holes assuming the hole centers " "all lie on a circle.", default=0.0, ) return pDefs def getHexHoledCircleParameterDefinitions(): """Return parameters for HexHoledCircle.""" pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb: pb.defParam("holeOP", units=units.CM, description="Pitch of interior hole") return pDefs def getFilletedHexagonParameterDefinitions(): """Return parameters for FilletedHexagon.""" pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb: pb.defParam("iR", units=units.CM, description="Radius of curvature of the inner corners") pb.defParam("oR", units=units.CM, description="Radius of curvature of the outer corners") return pDefs def getHoledRectangleParameterDefinitions(): """Return parameters for HoledRectangle.""" pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb: pb.defParam("holeOD", units=units.CM, description="Diameter of interior hole") return pDefs def getHelixParameterDefinitions(): """Return parameters for Helix.""" pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb: pb.defParam("od", units=units.CM, description="Outer diameter") pb.defParam("id", units=units.CM, description="Inner diameter", default=0.0) pb.defParam("op", units=units.CM, description="Outer pitch") pb.defParam( "axialPitch", units=units.CM, description="Axial pitch of helix in helical shapes.", ) pb.defParam("helixDiameter", units=units.CM, description="Diameter of helix") return pDefs def getRectangleParameterDefinitions(): """Return parameters for Rectangle.""" pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb: pb.defParam("lengthInner", units=units.CM, description="Inner length") pb.defParam("lengthOuter", units=units.CM, description="Outer length") pb.defParam("widthInner", units=units.CM, description="Inner width") pb.defParam("widthOuter", units=units.CM, description="Outer width") return pDefs def getCubeParameterDefinitions(): """Return parameters for Cube.""" pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb: pb.defParam( "lengthInner", units=units.CM, default=0.0, description="Inner length dimension (if the cube is hollow).", ) pb.defParam("lengthOuter", units=units.CM, description="Outermost length dimension") pb.defParam( "widthInner", units=units.CM, default=0.0, description="Inner width dimension (if the cube is hollow).", ) pb.defParam("widthOuter", units=units.CM, description="Outermost width dimension") pb.defParam( "heightInner", units=units.CM, default=0.0, description="Inner height dimension (if the cube is hollow).", ) pb.defParam("heightOuter", units=units.CM, description="Outermost height dimension") return pDefs def getTriangleParameterDefinitions(): """Return parameters for Triangle.""" pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb: pb.defParam("base", units=units.CM, description="Length of the base of the triangle") pb.defParam("height", units=units.CM, description="Height of the triangle") return pDefs def getUnshapedParameterDefinitions(): """Return parameters for UnshapedComponent.""" pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb: pb.defParam("op", units=units.CM, description="Outer pitch") pb.defParam( "userDefinedVolume", units=f"{units.CM}^3", description="Volume of this object.", ) return pDefs def getRadialSegmentParameterDefinitions(): """Return parameters for RadialSegment.""" pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb: pb.defParam( "inner_theta", units=units.RADIANS, description="Starting axial position, in radians.", ) pb.defParam( "outer_theta", units=units.RADIANS, description="Ending axial position, in radians.", ) pb.defParam( "inner_radius", units=units.CM, description="Starting radial position; this can be zero.", ) pb.defParam("outer_radius", units=units.CM, description="Ending radial position.") pb.defParam("height", units=units.CM, description="Height of the 3D radial segment.") pb.defParam( "azimuthal_differential", units=units.RADIANS, description="Perturbation in the azimuthal dimension (see inner_theta and outer_theta).", ) pb.defParam( "radius_differential", units=units.UNITLESS, description="Perturbation in the radial dimension (see inner_radius and outer_radius).", ) pb.defParam( "inner_axial", units=units.UNITLESS, description="Perturbation in the axial dimension (picture outer_axial = inner_axial + height).", ) pb.defParam( "outer_axial", units=units.UNITLESS, description="Perturbation result in the axial dimension (picture outer_axial = inner_axial + height).", ) return pDefs ================================================ FILE: armi/reactor/components/tests/__init__.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/reactor/components/tests/test_basicShapes.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit testing file for basic shapes.""" import math import unittest from armi.materials import resolveMaterialClassByName from armi.reactor.components.basicShapes import ( Circle, Hexagon, Rectangle, SolidRectangle, Square, Triangle, ) class TestBasicShapes(unittest.TestCase): """Class for testing basic shapes.""" @classmethod def setUpClass(cls): cls.material = resolveMaterialClassByName("HT9")() def test_circleArea(self): od = 2.0 id = 1.5 comp = Circle("Test", material=self.material, Tinput=20, Thot=300, od=od, id=id, mult=2) self.assertAlmostEqual(comp.getComponentArea(cold=True), math.pi * (od**2 / 4 - id**2 / 4) * 2) self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0)) odHot = comp.getDimension("od") idHot = comp.getDimension("id") self.assertAlmostEqual( comp.getComponentArea(cold=False), math.pi * (odHot**2 / 4 - idHot**2 / 4) * 2, ) self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300)) def test_hexagonArea(self): op = 2.0 ip = 1.5 comp = Hexagon("Test", material=self.material, Tinput=20, Thot=300, op=op, ip=ip, mult=2) self.assertAlmostEqual(comp.getComponentArea(cold=True), math.sqrt(3.0) * (op**2 - ip**2)) self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0)) opHot = comp.getDimension("op") ipHot = comp.getDimension("ip") self.assertAlmostEqual( comp.getComponentArea(cold=False), math.sqrt(3.0) * (opHot**2 - ipHot**2), ) self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300)) def test_rectangleArea(self): lo = 2.0 li = 1.5 wo = 2.5 wi = 1.25 comp = Rectangle( "Test", material=self.material, Tinput=20, Thot=300, lengthOuter=lo, lengthInner=li, widthOuter=wo, widthInner=wi, mult=2, ) self.assertAlmostEqual(comp.getComponentArea(cold=True), 2 * (lo * wo - li * wi)) self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0)) loHot = comp.getDimension("lengthOuter") liHot = comp.getDimension("lengthInner") woHot = comp.getDimension("widthOuter") wiHot = comp.getDimension("widthInner") self.assertAlmostEqual(comp.getComponentArea(cold=False), 2 * (loHot * woHot - liHot * wiHot)) self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300)) def test_solidRectangleArea(self): lo = 2.0 wo = 2.5 comp = SolidRectangle( "Test", material=self.material, Tinput=20, Thot=300, lengthOuter=lo, widthOuter=wo, mult=2, ) self.assertAlmostEqual(comp.getComponentArea(cold=True), 2 * lo * wo) self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0)) loHot = comp.getDimension("lengthOuter") woHot = comp.getDimension("widthOuter") self.assertAlmostEqual(comp.getComponentArea(cold=False), 2 * loHot * woHot) self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300)) def test_squareArea(self): wo = 2.5 wi = 1.25 comp = Square( "Test", material=self.material, Tinput=20, Thot=300, widthOuter=wo, widthInner=wi, mult=2, ) self.assertAlmostEqual(comp.getComponentArea(cold=True), 2 * (wo**2 - wi**2)) self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0)) woHot = comp.getDimension("widthOuter") wiHot = comp.getDimension("widthInner") self.assertAlmostEqual(comp.getComponentArea(cold=False), 2 * (woHot**2 - wiHot**2)) self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300)) def test_triangleArea(self): base = 2.5 height = 1.25 comp = Triangle( "Test", material=self.material, Tinput=20, Thot=300, base=base, height=height, mult=2, ) self.assertAlmostEqual(comp.getComponentArea(cold=True), base * height) self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0)) baseHot = comp.getDimension("base") heightHot = comp.getDimension("height") self.assertAlmostEqual(comp.getComponentArea(cold=False), baseHot * heightHot) self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300)) ================================================ FILE: armi/reactor/components/tests/test_complexShapes.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit testing file for basic shapes.""" import math import unittest from armi.materials import resolveMaterialClassByName from armi.reactor.components.complexShapes import ( HexHoledCircle, HoledHexagon, HoledRectangle, HoledSquare, ) class TestComplexShapes(unittest.TestCase): """Class for testing complex shapes.""" @classmethod def setUpClass(cls): cls.material = resolveMaterialClassByName("HT9")() @staticmethod def circArea(d): return math.pi * (d / 2) ** 2 @staticmethod def hexArea(op): return math.sqrt(3.0) / 2.0 * op**2 @staticmethod def rectArea(l, w): return l * w def test_holedHexagon(self): op = 2.0 holeOD = 0.5 nHoles = 2 comp = HoledHexagon( "TestHoledHexagon", material=self.material, Tinput=20, Thot=300, op=op, holeOD=holeOD, nHoles=nHoles, mult=2, ) self.assertAlmostEqual( comp.getComponentArea(cold=True), (self.hexArea(op) - nHoles * self.circArea(holeOD)) * 2, ) self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0)) opHot = comp.getDimension("op") holeODHot = comp.getDimension("holeOD") self.assertAlmostEqual( comp.getComponentArea(cold=False), (self.hexArea(opHot) - nHoles * self.circArea(holeODHot)) * 2, ) self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300)) # Test that holeRadFromCenter does not change the area. comp2 = HoledHexagon( "TestHoledHexagonHoleRadFromCenter", material=self.material, Tinput=20, Thot=300, op=op, holeOD=holeOD, nHoles=nHoles, holeRadFromCenter=(op + holeOD) / 2, mult=2, ) self.assertAlmostEqual(comp.getComponentArea(cold=True), comp2.getComponentArea(cold=True)) self.assertAlmostEqual(comp.getComponentArea(cold=False), comp2.getComponentArea(cold=False)) compHoleRadFromCenter = HoledHexagon( "TestHoledHexagon33", material=self.material, Tinput=20, Thot=300, op=op, holeOD=holeOD, nHoles=nHoles, holeRadFromCenter=0.5, mult=2, ) self.assertEqual(compHoleRadFromCenter.getDimension("holeRadFromCenter", cold=True, Tc=500), 0.5) self.assertGreater(compHoleRadFromCenter.getDimension("holeRadFromCenter", cold=False, Tc=500), 0.5) def test_holedRectangle(self): lo = 2.0 wo = 3.0 holeOD = 0.5 comp = HoledRectangle( "Test", material=self.material, Tinput=20, Thot=300, lengthOuter=lo, widthOuter=wo, holeOD=holeOD, mult=2, ) self.assertAlmostEqual( comp.getComponentArea(cold=True), (self.rectArea(lo, wo) - self.circArea(holeOD)) * 2, ) self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0)) loHot = comp.getDimension("lengthOuter") woHot = comp.getDimension("widthOuter") holeODHot = comp.getDimension("holeOD") self.assertAlmostEqual( comp.getComponentArea(cold=False), (self.rectArea(loHot, woHot) - self.circArea(holeODHot)) * 2, ) self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300)) def test_holedSquare(self): wo = 3.0 holeOD = 0.5 comp = HoledSquare( "Test", material=self.material, Tinput=20, Thot=300, widthOuter=wo, holeOD=holeOD, mult=2, ) self.assertAlmostEqual( comp.getComponentArea(cold=True), (self.rectArea(wo, wo) - self.circArea(holeOD)) * 2, ) self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0)) woHot = comp.getDimension("widthOuter") holeODHot = comp.getDimension("holeOD") self.assertAlmostEqual( comp.getComponentArea(cold=False), (self.rectArea(woHot, woHot) - self.circArea(holeODHot)) * 2, ) self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300)) def test_hexHoledCircle(self): od = 3.0 holeOP = 0.5 comp = HexHoledCircle( "Test", material=self.material, Tinput=20, Thot=300, od=od, holeOP=holeOP, mult=2, ) self.assertAlmostEqual( comp.getComponentArea(cold=True), (self.circArea(od) - self.hexArea(holeOP)) * 2, ) self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0)) odHot = comp.getDimension("od") holeOPHot = comp.getDimension("holeOP") self.assertAlmostEqual( comp.getComponentArea(cold=False), (self.circArea(odHot) - self.hexArea(holeOPHot)) * 2, ) self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300)) ================================================ FILE: armi/reactor/components/volumetricShapes.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Three-dimensional shapes.""" import math from armi.reactor.components import ShapedComponent, componentParameters class Sphere(ShapedComponent): """A spherical component.""" is3D = True THERMAL_EXPANSION_DIMS = {} # Just usurp the Circle parameters. This may lead to issues at some point in things like the DB # interface, but for now, they are the same params, so why not? pDefs = componentParameters.getCircleParameterDefinitions() def __init__( self, name, material, Tinput, Thot, od=None, id=None, mult=None, modArea=None, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions(components, od=od, id=id, mult=mult, modArea=modArea) def getBoundingCircleOuterDiameter(self, Tc=None, cold=False): """Abstract bounding circle method that should be overwritten by each shape subclass.""" return self.getDimension("od") def getComponentArea(self, cold=False, Tc=None): """Compute an average area over the height.""" from armi.reactor.blocks import Block # avoid circular import if Tc is not None: raise NotImplementedError(f"Cannot calculate area at specified temperature: {Tc}") block = self.getAncestor(lambda c: isinstance(c, Block)) return self.getComponentVolume(cold) / block.getHeight() def getComponentVolume(self, cold=False): """Computes the volume of the sphere in cm^3.""" od = self.getDimension("od", cold=cold) iD = self.getDimension("id", cold=cold) mult = self.getDimension("mult") vol = mult * 4.0 / 3.0 * math.pi * ((od / 2.0) ** 3 - (iD / 2.0) ** 3) return vol class Cube(ShapedComponent): """More correctly, a rectangular cuboid. Optionally, there may be a centric cuboid volume cut out of center of this shape. """ is3D = True THERMAL_EXPANSION_DIMS = {} pDefs = componentParameters.getCubeParameterDefinitions() def __init__( self, name, material, Tinput, Thot, lengthOuter=None, lengthInner=None, widthOuter=None, widthInner=None, heightOuter=None, heightInner=None, mult=None, modArea=None, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions( components, lengthOuter=lengthOuter, lengthInner=lengthInner, widthOuter=widthOuter, widthInner=widthInner, heightOuter=heightOuter, heightInner=heightInner, mult=mult, modArea=modArea, ) def getComponentArea(self, cold=False, Tc=None): raise NotImplementedError("Cannot compute area of a cube component.") def getComponentVolume(self): """Computes the volume of the cube in cm^3.""" lengthO = self.getDimension("lengthOuter") widthO = self.getDimension("widthOuter") heightO = self.getDimension("heightOuter") lengthI = self.getDimension("lengthInner") widthI = self.getDimension("widthInner") heightI = self.getDimension("heightInner") mult = self.getDimension("mult") vol = mult * (lengthO * widthO * heightO - lengthI * widthI * heightI) return vol class RadialSegment(ShapedComponent): r"""A RadialSegement represents a volume element with thicknesses in the azimuthal, radial and axial directions. This a 3D projection of a 2D shape that is an angular slice of a ring or circle. The 2D shape is like the one below, with an inner and outer position for the theta and the radius: Image:: Y ^ - | - | -XXXX\ | - \XXXXXXX\ | theta |XXXXXXX| |-----------------------> radius, X | | """ is3D = True THERMAL_EXPANSION_DIMS = {} pDefs = componentParameters.getRadialSegmentParameterDefinitions() def __init__( self, name, material, Tinput, Thot, inner_radius=None, outer_radius=None, height=None, mult=None, inner_theta=0, outer_theta=math.pi * 2, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions( components, inner_radius=inner_radius, outer_radius=outer_radius, height=height, mult=mult, inner_theta=inner_theta, outer_theta=outer_theta, ) def getComponentArea(self, refVolume=None, refHeight=None, cold=False, Tc=None): if Tc is not None: raise NotImplementedError(f"Cannot calculate area at specified temperature: {Tc}") if refHeight: return ( (self.getDimension("height", cold=cold) / refHeight) * self.getDimension("mult") * ( math.pi * ( self.getDimension("outer_radius", cold=cold) ** 2 - self.getDimension("inner_radius", cold=cold) ** 2 ) * ( (self.getDimension("outer_theta", cold=cold) - self.getDimension("inner_theta", cold=cold)) / (math.pi * 2.0) ) ) ) if refVolume: return (self.getComponentVolume() / refVolume) / self.getDimension("height") else: return self.getComponentVolume() / self.getDimension("height") def getComponentVolume(self): mult = self.getDimension("mult") outerRad = self.getDimension("outer_radius") innerRad = self.getDimension("inner_radius") outerTheta = self.getDimension("outer_theta") innerTheta = self.getDimension("inner_theta") height = self.getDimension("height") radialArea = math.pi * (outerRad**2 - innerRad**2) aziFraction = (outerTheta - innerTheta) / (math.pi * 2.0) vol = mult * radialArea * aziFraction * height return vol def getBoundingCircleOuterDiameter(self, Tc=None, cold=False): return 2.0 * self.getDimension("outer_radius", Tc, cold) def getCircleInnerDiameter(self, Tc=None, cold=False): return 2.0 * self.getDimension("inner_radius", Tc, cold) class DifferentialRadialSegment(RadialSegment): """ This component class represents a volume element with thicknesses in the azimuthal, radial and axial directions. Furthermore it has dependent dimensions: (outer theta, outer radius, outer axial) that can be updated depending on the 'differential' in the corresponding directions. This component class is super useful for defining ThRZ reactors and perturbing its dimensions using the optimization modules See Also -------- geometry purturbation: armi.physics.optimize.OptimizationInterface.modifyCase (ThRZReflectorThickness,ThRZActiveHeight,ThRZActiveRadius) mesh updating: armi.reactor.reactors.Reactor.importGeom """ is3D = True THERMAL_EXPANSION_DIMS = {} def __init__( self, name, material, Tinput, Thot, inner_radius=None, radius_differential=None, inner_axial=None, height=None, inner_theta=0, azimuthal_differential=2 * math.pi, mult=1, isotopics=None, mergeWith=None, components=None, ): ShapedComponent.__init__( self, name, material, Tinput, Thot, isotopics=isotopics, mergeWith=mergeWith, components=components, ) self._linkAndStoreDimensions( components, inner_radius=inner_radius, radius_differential=radius_differential, inner_axial=inner_axial, height=height, inner_theta=inner_theta, azimuthal_differential=azimuthal_differential, mult=mult, ) self.updateDims() def updateDims(self, key="", val=None): """ Update the dimensions of differential radial segment component. Notes ----- Can be used to update any dimension on the component, but outer_radius, outer_axial, and outer_theta are always updated. See Also -------- armi.reactor.blocks.Block.updateComponentDims """ self.setDimension(key, val) self.setDimension( "outer_radius", self.getDimension("inner_radius") + self.getDimension("radius_differential"), ) self.setDimension( "outer_axial", self.getDimension("inner_axial") + self.getDimension("height"), ) self.setDimension( "outer_theta", self.getDimension("inner_theta") + self.getDimension("azimuthal_differential"), ) def getComponentArea(self, refVolume=None, refHeight=None, cold=False, Tc=None): if Tc is not None: raise NotImplementedError(f"Cannot calculate area at specified temperature: {Tc}") self.updateDims() return RadialSegment.getComponentArea(self, refVolume=None, refHeight=None, cold=False) def getComponentVolume(self): self.updateDims() return RadialSegment.getComponentVolume(self) ================================================ FILE: armi/reactor/composites.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains the basic composite pattern underlying the reactor package. This follows the principles of the `Composite Design Pattern <https://en.wikipedia.org/wiki/Composite_pattern>`_ to allow the construction of a part/whole hierarchy representing a physical nuclear reactor. The composite objects act somewhat like lists: they can be indexed, iterated over, appended, extended, inserted, etc. Each member of the hierarchy knows its children and its parent, so full access to the hierarchy is available from everywhere. This design was chosen because of the close analogy of the model to the physical nature of nuclear reactors. Warning ------- Because each member of the hierarchy is linked to the entire tree, it is often unsafe to save references to individual members; it can cause large and unexpected memory inefficiencies. See Also -------- :doc:`/developer/index`. """ import collections import itertools import operator import timeit from typing import ( TYPE_CHECKING, Callable, Dict, Iterator, List, Optional, Tuple, Type, Union, ) import numpy as np from armi import context, runLog, utils from armi.nucDirectory import nucDir, nuclideBases from armi.physics.neutronics.fissionProductModel import fissionProductModel from armi.reactor import grids, parameters from armi.reactor.flags import Flags, TypeSpec from armi.reactor.parameters import resolveCollections from armi.utils import densityTools, tabulate, units from armi.utils.densityTools import calculateNumberDensity from armi.utils.flags import auto if TYPE_CHECKING: from armi.reactor.components.component import Component class FlagSerializer(parameters.Serializer): """ Serializer implementation for Flags. This operates by converting each set of Flags (too large to fit in a uint64) into a sequence of enough uint8 elements to represent all flags. These constitute a dimension of a 2-D numpy array containing all Flags for all objects provided to the ``pack()`` function. """ version = "1" @staticmethod def pack(data): """ Flags are represented as a 2D numpy array of uint8 (single-byte, unsigned integers), where each row contains the bytes representing a single Flags instance. We also store the list of field names so that we can verify that the reader and the writer can agree on the meaning of each bit. Under the hood, this calls the private implementation providing the :py:class:`armi.reactor.flags.Flags` class as the target output class. """ return FlagSerializer._packImpl(data, Flags) @staticmethod def _packImpl(data, flagCls: Type[utils.Flag]): """ Implement the pack operation given a target output Flag class. This is kept separate from the public interface to permit testing of the functionality without having to do unholy things to ARMI's actual set of ``reactor.flags.Flags``. """ npa = np.array([b for f in data for b in f.to_bytes()], dtype=np.uint8).reshape((len(data), flagCls.width())) return npa, {"flag_order": flagCls.sortedFields()} @staticmethod def _remapBits(inp: int, mapping: Dict[int, int]): """ Given an input bitfield, map each bit to the appropriate new bit position based on the passed mapping. Parameters ---------- inp : int input bitfield mapping : dict dictionary mapping from old bit position -> new bit position """ f = 0 for bit in itertools.count(): if (1 << bit) > inp: break if (1 << bit) & inp: f = f | (1 << mapping[bit]) return f @classmethod def unpack(cls, data, version, attrs): """ Reverse the pack operation. This will allow for some degree of conversion from old flags to a new set of flags, as long as all of the source flags still exist in the current set of flags. Under the hood, this calls the private implementation providing the :py:class:`armi.reactor.flags.Flags` class as the target output class. """ return cls._unpackImpl(data, version, attrs, Flags) @classmethod def _unpackImpl(cls, data, version, attrs, flagCls: Type[utils.Flag]): """ Implement the unpack operation given a target output Flag class. This is kept separate from the public interface to permit testing of the functionality without having to do unholy things to ARMI's actual set of ``reactor.flags.Flags``. If the set of flags for the currently-configured App match the input set of flags, they are read in directly, which is good and cheap. However, if the set of flags differ from the input and the current App, we will try to convert them (as long as all of the input flags exist in the current App). Conversion is done by forming a map from all input bit positions to the current-App bit positions of the same meaning. E.g., if FUEL flag used to be the 3rd bit position, but now it is the 6th bit position, the map will contain ``map[3] = 6``. Then for each bitfield that is read in, each bit position is queried and if present, mapped to the proper corresponding new bit position. The result of this mapping is used to construct the Flags object. """ flagOrderPassed = attrs["flag_order"] flagOrderNow = flagCls.sortedFields() if version != cls.version: raise ValueError( f"The FlagSerializer version used to pack the data ({version}) does not match " f"the current version ({cls.version})! This database either needs to be migrated, " "or on-the-fly inter-version conversion needs to be implemented." ) flagSetIn = set(flagOrderPassed) flagSetNow = set(flagOrderNow) # Make sure that all of the old flags still exist if not flagSetIn.issubset(flagSetNow): missingFlags = flagSetIn - flagSetNow runLog.warning( "The set of flags in the database includes unknown flags. For convenience, we will " f"add these to the system: {missingFlags}" ) flagCls.extend({k: auto() for k in missingFlags}) flagOrderNow = flagCls.sortedFields() if all(i == j for i, j in zip(flagOrderPassed, flagOrderNow)): out = [flagCls.from_bytes(row.tobytes()) for row in data] else: newFlags = {i: flagOrderNow.index(oldFlag) for (i, oldFlag) in enumerate(flagOrderPassed)} out = [flagCls(cls._remapBits(int.from_bytes(row.tobytes(), byteorder="little"), newFlags)) for row in data] return out def _defineBaseParameters(): """ Return parameter definitions that all ArmiObjects must have to function properly. For now, this pretty much just includes ``flags``, since these are used throughout the composite model to filter which objects are considered when traversing the reactor model. Note also that the base ParameterCollection class also has a ``serialNum`` parameter. These are defined in different locations, since serialNum is a guaranteed feature of a ParameterCollection (for serialization to the database and history tracking), while the ``flags`` parameter is more a feature of the composite model. .. important:: Notice that the ``flags`` parameter is not written to the database. This is for a couple of reasons: * Flags are derived from an ArmiObject's name. Since the name is stored on the DB, it is possible to recover the flags from that. * Storing flags to the DB may be complicated, since it is easier to imagine a number of flags that is greater than the width of natively-supported integer types, requiring some extra tricks to store the flags in an HDF5 file. * Allowing flags to be modified by plugins further complicates things, in that it is important to ensure that the meaning of all bits in the flag value are consistent between a database state and the current ARMI environment. This may require encoding these meanings in to the database as some form of metadata. """ pDefs = parameters.ParameterDefinitionCollection() pDefs.add( parameters.Parameter( "flags", units=units.UNITLESS, description="The type specification of this object", location=parameters.ParamLocation.AVERAGE, saveToDB=True, default=Flags(0), setter=parameters.NoDefault, categories=set(), serializer=FlagSerializer, ) ) return pDefs class CompositeModelType(resolveCollections.ResolveParametersMeta): """ Metaclass for tracking subclasses of ArmiObject subclasses. It is often useful to have an easily-accessible collection of all classes that participate in the ARMI composite reactor model. This metaclass maintains a collection of all defined subclasses, called TYPES. """ TYPES: Dict[str, Type] = dict() """ Dictionary mapping class name to class object for all subclasses. :meta hide-value: """ def __new__(cls, name, bases, attrs): newType = resolveCollections.ResolveParametersMeta.__new__(cls, name, bases, attrs) CompositeModelType.TYPES[name] = newType return newType class ArmiObject(metaclass=CompositeModelType): """ The abstract base class for all composites and leaves. This: * declares the interface for objects in the composition * implements default behavior for the interface common to all classes * Declares an interface for accessing and managing child objects * Defines an interface for accessing parents. Called "component" in gang of four, this is an ArmiObject here because the word component was already taken in ARMI. The :py:class:`armi.reactor.parameters.ResolveParametersMeta` metaclass is used to automatically create ``ParameterCollection`` subclasses for storing parameters associated with any particular subclass of ArmiObject. Defining a ``pDefs`` class attribute in the definition of a subclass of ArmiObject will lead to the creation of a new subclass of py:class:`armi.reactor.parameters.ParameterCollection`, which will contain the definitions from that class's ``pDefs`` as well as the definitions for all of its parents. A new ``paramCollectionType`` class attribute will be added to the ArmiObject subclass to reflect which type of parameter collection should be used. Warning ------- This class has far too many public methods. We are in the midst of a composite tree cleanup that will likely break these out onto a number of separate functional classes grouping things like composition, location, shape/dimensions, and various physics queries. Methods are being collected here from the various specialized subclasses (Block, Assembly) in preparation for this next step. As a result, the public API on this method should be considered unstable. .. impl:: Parameters are accessible throughout the armi tree. :id: I_ARMI_PARAM1 :implements: R_ARMI_PARAM An ARMI reactor model is composed of collections of ARMIObject objects. These objects are combined in a hierarchical manner. Each level of the composite tree is able to be assigned parameters which define it, such as temperature, flux, or keff values. This class defines an attribute of type ``ParameterCollection``, which contains all the functionality of an ARMI ``Parameter`` object. Because the entire model is composed of ARMIObjects at the most basic level, each level of the Composite tree contains this parameter attribute and can thus be queried. Attributes ---------- name : str Object name parent : ArmiObject The object's parent in a hierarchical tree cached : dict Some cached values for performance p : ParameterCollection The state variables spatialGrid : grids.Grid The spatial grid that this object contains spatialLocator : grids.LocationBase The location of this object in its parent grid, or global space See Also -------- armi.reactor.parameters """ paramCollectionType: Optional[Type[parameters.ParameterCollection]] = None pDefs = _defineBaseParameters() def __init__(self, name): self.name = name self.parent = None self.cached = {} self._backupCache = None self.p = self.paramCollectionType() # NOTE: LFPs are not serialized to the database, which could matter when loading an old DB. self._lumpedFissionProducts = None self.spatialGrid = None self.spatialLocator = grids.CoordinateLocation(0.0, 0.0, 0.0, None) def __lt__(self, other): """ Implement the less-than operator. Implementing this on the ArmiObject allows most objects, under most circumstances to be sorted. This is useful from the context of the Database classes, so that they can produce a stable layout of the serialized composite structure. By default, this sorts using the spatial locator in K, J, I order, which should give a relatively intuitive order. It also makes sure that the objects being sorted live in the same grid. """ if self.spatialLocator is None or other.spatialLocator is None: runLog.error(f"could not compare {self} and {other}") raise ValueError("One or more of the compared objects have no spatialLocator") if self.spatialLocator.grid is not other.spatialLocator.grid: runLog.error(f"could not compare {self} and {other}") raise ValueError( "Composite grids must be the same to compare:\n" f"This grid: {self.spatialGrid}\n" f"Other grid: {other.spatialGrid}" ) try: t1 = tuple(reversed(self.spatialLocator.getCompleteIndices())) t2 = tuple(reversed(other.spatialLocator.getCompleteIndices())) return t1 < t2 except ValueError: runLog.error(f"failed to compare {self.spatialLocator} and {other.spatialLocator}") raise def __getstate__(self): """ Python method for reducing data before pickling. This removes links to parent objects, which allows one to, for example, pickle an assembly without pickling the entire reactor. Likewise, one could MPI_COMM.bcast an assembly without broadcasting the entire reactor. Notes ----- Special treatment of ``parent`` is not enough, since the spatialGrid also contains a reference back to the armiObject. Consequently, the ``spatialGrid`` needs to be reassigned in ``__setstate__``. """ state = self.__dict__.copy() state["parent"] = None if "r" in state: raise RuntimeError("An ArmiObject should never contain the entire Reactor.") return state def __setstate__(self, state): """ Sets the state of this ArmiObject. Notes ----- This ArmiObject may have lost a reference to its parent. If the parent was also pickled (serialized), then the parent should update the ``.parent`` attribute during its own ``__setstate__``. That means within the context of ``__setstate__`` one should not rely upon ``self.parent``. """ self.__dict__.update(state) if self.spatialGrid is not None: self.spatialGrid.armiObject = self # Spatial locators also get disassociated with their grids when detached; # make sure they get hooked back up for c in self: c.spatialLocator.associate(self.spatialGrid) # now "reattach" children for c in self: c.parent = self def __repr__(self): return f"<{self.__class__.__name__}: {self.name}>" def __format__(self, spec): return format(str(self), spec) def __bool__(self): """ Flag that says this is non-zero in a boolean context. Notes ----- The default behavior for ``not [obj]`` that has a ``__len__`` defined is to see if the length is zero. However, for these composites, we'd like Assemblies, etc. to be considered non-zero even if they don't have any blocks. This is important for parent resolution, etc. If one of these objects exists, it is non-zero, regardless of its contents. """ return True def __add__(self, other): """Return a list of all children in this and another object.""" return self.getChildren() + other.getChildren() @property def nuclideBases(self): from armi.reactor.reactors import Reactor r = self.getAncestor(lambda c: isinstance(c, Reactor)) if r: return r.nuclideBases else: return nuclideBases.nuclideBases def duplicate(self): """ Make a clean copy of this object. Warning ------- Be careful with inter-object dependencies. If one object contains a reference to another object which contains links to the entire hierarchical tree, memory can fill up rather rapidly. Weak references are designed to help with this problem. """ raise NotImplementedError def clearCache(self): """Clear the cache so all new values are recomputed.""" self.cached = {} for child in self: child.clearCache() def _getCached(self, name): """ Obtain a value from the cache. Cached values can be used to temporarily store frequently read but long-to-compute values. The practice is generally discouraged because it's challenging to make sure to properly invalidate the cache when the state changes. """ return self.cached.get(name, None) def _setCache(self, name, val): """ Set a value in the cache. See Also -------- _getCached : returns a previously-cached value """ self.cached[name] = val def copyParamsFrom(self, other): """ Overwrite this object's params with other object's. Parameters ---------- other : ArmiObject The object to copy params from """ self.p = other.p.__class__() for p, val in other.p.items(): self.p[p] = val def updateParamsFrom(self, new): """ Update this object's params with a new object's. Parameters ---------- new : ArmiObject The object to copy params from """ for paramName, val in new.p.items(): self.p[paramName] = val def iterChildren( self, deep=False, generationNum=1, predicate: Optional[Callable[["ArmiObject"], bool]] = None, ) -> Iterator["ArmiObject"]: """Iterate over children of this object.""" raise NotImplementedError() def getChildren(self, deep=False, generationNum=1, includeMaterials=False) -> list["ArmiObject"]: """Return the children of this object.""" raise NotImplementedError() def iterChildrenWithFlags(self, typeSpec: TypeSpec, exactMatch=False) -> Iterator["ArmiObject"]: """Produce an iterator of children that have given flags.""" return self.iterChildren(predicate=lambda o: o.hasFlags(typeSpec, exactMatch)) def getChildrenWithFlags(self, typeSpec: TypeSpec, exactMatch=False) -> list["ArmiObject"]: """Get all children that have given flags.""" return list(self.iterChildrenWithFlags(typeSpec, exactMatch)) def iterChildrenOfType(self, typeName: str) -> Iterator["ArmiObject"]: """Iterate over children that have a specific input type name.""" return self.iterChildren(predicate=lambda o: o.getType() == typeName) def getChildrenOfType(self, typeName: str) -> list["ArmiObject"]: """Produce a list of children that have a specific input type name.""" return list(self.iterChildrenOfType(typeName)) def getComponents(self, typeSpec: TypeSpec = None, exact=False): """ Return all armi.reactor.component.Component within this Composite. Parameters ---------- typeSpec : TypeSpec Component flags. Will restrict Components to specific ones matching the flags specified. exact : bool, optional Only match exact component labels (names). If True, 'coolant' will not match 'interCoolant'. This has no impact if compLabel is None. Returns ------- list of Component items matching compLabel and exact criteria """ raise NotImplementedError() def iterComponents(self, typeSpec: TypeSpec = None, exact=False): """Yield components one by one in a generator.""" raise NotImplementedError() def doChildrenHaveFlags(self, typeSpec: TypeSpec, deep=False): """ Generator that yields True if the next child has given flags. Parameters ---------- typeSpec : TypeSpec Requested type of the child """ for c in self.getChildren(deep): if c.hasFlags(typeSpec, exact=False): yield True else: yield False def containsAtLeastOneChildWithFlags(self, typeSpec: TypeSpec): """ Return True if any of the children are of a given type. Parameters ---------- typeSpec : TypeSpec Requested type of the children See Also -------- self.doChildrenHaveFlags self.containsOnlyChildrenWithFlags """ return any(self.doChildrenHaveFlags(typeSpec)) def containsOnlyChildrenWithFlags(self, typeSpec: TypeSpec): """ Return True if all of the children are of a given type. Parameters ---------- typeSpec : TypeSpec Requested type of the children See Also -------- self.doChildrenHaveFlags self.containsAtLeastOneChildWithFlags """ return all(self.doChildrenHaveFlags(typeSpec)) def copyParamsToChildren(self, paramNames): """ Copy param values in paramNames to all children. Parameters ---------- paramNames : list List of param names to copy to children """ for paramName in paramNames: myVal = self.p[paramName] for c in self: c.p[paramName] = myVal @classmethod def getParameterCollection(cls): """ Return a new instance of the specific ParameterCollection type associated with this object. This has the same effect as ``obj.paramCollectionType()``. Getting a new instance through a class method like this is useful in situations where the ``paramCollectionType`` is not a top-level object and therefore cannot be trivially pickled. Since we know that by the time we want to make any instances of/unpickle a given ``ArmiObject``, such a class attribute will have been created and associated. So, we use this top-level method to dig dynamically down to the underlying parameter collection type. .. impl:: Composites (and all ARMI objects) have parameter collections. :id: I_ARMI_CMP_PARAMS :implements: R_ARMI_CMP_PARAMS This class method allows a user to obtain the ``paramCollection`` object, which is the object containing the interface for all parameters of an ARMI object. See Also -------- :py:meth:`armi.reactor.parameters.parameterCollections.ParameterCollection.__reduce__` """ return cls.paramCollectionType() def getParamNames(self): """ Get a list of parameters keys that are available on this object. Will not have any corner, edge, or timenode dependence. """ return sorted(k for k in self.p.keys() if not isinstance(k, tuple)) def nameContains(self, s): """ True if s is in this object's name (eg. nameContains('fuel')==True for 'testfuel'. Notes ----- Case insensitive (all gets converted to lower) """ name = self.name.lower() if isinstance(s, list): return any(n.lower() in name for n in s) else: return s.lower() in name def getName(self): """Get composite name.""" return self.name def setName(self, name): self.name = name def hasFlags(self, typeID: TypeSpec, exact=False): """ Determine if this object is of a certain type. .. impl:: Composites have queryable flags. :id: I_ARMI_CMP_FLAG0 :implements: R_ARMI_CMP_FLAG This method queries the flags (i.e. the ``typeID``) of the Composite for a given type, returning a boolean representing whether or not the candidate flag is present in this ArmiObject. Candidate flags cannot be passed as a ``string`` type and must be of a type ``Flag``. If no flags exist in the object then ``False`` is returned. If a list of flags is provided, then all input flags will be checked against the flags of the object. If exact is ``False``, then the object must have at least one of candidates exactly. If it is ``True`` then the object flags and candidates must match exactly. Parameters ---------- typeID : TypeSpec Flags to test the object against, to see if it contains them. If a list is provided, each element is treated as a "candidate" set of flags. Return True if any of candidates match. When exact is True, the object must match one of the candidates exactly. If exact is False, the object must have at least the flags contained in a candidate for that candidate to be a match; extra flags on the object are permitted. None matches all objects if exact is False, or no objects if exact is True. exact : bool, optional Require the type of the object to fully match the provided typeID(s) Returns ------- hasFlags : bool True if this object is in the typeID list. Notes ----- Type comparisons use bitwise comparisons using valid flags. If you have an 'inner control' assembly, then this will evaluate True for the INNER | CONTROL flag combination. If you just want all FUEL, simply use FUEL with no additional qualifiers. For more complex comparisons, use bitwise operations. Always returns true if typeID is none and exact is False, allowing for default parameters to be passed in when the method does not care about the object type. If the typeID is none and exact is True, this will always return False. Examples -------- If you have an object with the ``INNER``, ``DRIVER``, and ``FUEL`` flags, then >>> obj.getType() [some integer] >>> obj.hasFlags(Flags.FUEL) True >>> obj.hasFlags(Flags.INNER | Flags.DRIVER | Flags.FUEL) True >>> obj.hasFlags(Flags.OUTER | Flags.DRIVER | Flags.FUEL) False >>> obj.hasFlags(Flags.INNER | Flags.FUEL) True >>> obj.hasFlags(Flags.INNER | Flags.FUEL, exact=True) False >>> obj.hasFlags([Flags.INNER | Flags.DRIVER | Flags.FUEL, Flags.OUTER | Flags.DRIVER | Flags.FUEL], exact=True) False """ if not typeID: return not exact if isinstance(typeID, str): raise TypeError("Must pass Flags, or an iterable of Flags; Strings are no longer supported") elif not isinstance(typeID, Flags): # list behavior gives a spec1 OR spec2 OR ... behavior. return any(self.hasFlags(typeIDi, exact=exact) for typeIDi in typeID) if not self.p.flags: # default still set, or null flag. Do down here so we get proper error # handling of invalid typeSpecs return False if exact: # all bits must be identical for exact match return self.p.flags == typeID # all bits that are 1s in the typeID must be present return self.p.flags & typeID == typeID def getType(self): """Return the object type.""" return self.p.type def setType(self, typ, flags: Optional[Flags] = None): """ Set the object type. .. impl:: Composites have modifiable flags. :id: I_ARMI_CMP_FLAG1 :implements: R_ARMI_CMP_FLAG This method allows for the setting of flags parameter of the Composite. Parameters ---------- typ : str The desired "type" for the object. Type describes the general class of the object, and typically corresponds to the name of the blueprint that created it. flags : Flags, optional The set of Flags to apply to the object. If these are omitted, then Flags will be derived from the ``typ``. Warning ------- We are in the process of developing more robust definitions for things like "name" and "type". "type" will generally refer to the name of the blueprint that created a particular object. When present, a "name" will refer to a specific instance of an object of a particular "type". Think unique names for each assembly in a core, even if they are all created from the same blueprint and therefore have the same "type". When this work is complete, it will be strongly discouraged, or even disallowed to change the type of an object after it has been created, and ``setType()`` may be removed entirely. """ self.p.flags = flags or Flags.fromStringIgnoreErrors(typ) self.p.type = typ def getVolume(self): return sum(child.getVolume() for child in self) def getArea(self, cold=False): return sum(child.getArea(cold) for child in self) def _updateVolume(self): """Recompute and store volume.""" children = self.getChildren() # Derived shapes must come last so we temporarily change the order if we # have one. from armi.reactor.components import DerivedShape for child in children[:]: if isinstance(child, DerivedShape): children.remove(child) children.append(child) for child in children: child._updateVolume() def getVolumeFractions(self): """ Return volume fractions of each child. Sets volume or area of missing piece (like coolant) if it exists. Caching would be nice here. Returns ------- fracs : list list of (component, volFrac) tuples See Also -------- test_block.Block_TestCase.test_consistentAreaWithOverlappingComponents Notes ----- void areas can be negative in gaps between fuel/clad/liner(s), but these negative areas are intended to account for overlapping positive areas to insure the total area of components inside the clad is accurate. See test_block.Block_TestCase.test_consistentAreaWithOverlappingComponents """ children = self.getChildren() numerator = [c.getVolume() for c in children] denom = sum(numerator) if denom == 0.0: numerator = [c.getArea() for c in children] denom = sum(numerator) fracs = [(ci, nu / denom) for ci, nu in zip(children, numerator)] return fracs def getVolumeFraction(self): """Return the volume fraction that this object takes up in its parent.""" if self.parent is not None: for child, frac in self.parent.getVolumeFractions(): if child is self: return frac raise ValueError(f"No parent is defined for {self}. Cannot compute its volume fraction.") def getMaxArea(self): """ The maximum area of this object if it were totally full. See Also -------- armi.reactor.blocks.HexBlock.getMaxArea """ raise NotImplementedError() def getMass(self, nuclideNames: Union[None, str, list[str]] = None) -> float: """ Determine the mass in grams of nuclide(s) and/or elements in this object. .. impl:: Return mass of composite. :id: I_ARMI_CMP_GET_MASS :implements: R_ARMI_CMP_GET_MASS This method allows for the querying of the mass of a Composite. If the ``nuclideNames`` argument is included, it will filter for the mass of those nuclide names and provide the sum of the mass of those nuclides. Parameters ---------- nuclideNames The nuclide/element specifier to get the mass of in the object. If omitted, total mass is returned. Returns ------- mass : float The mass in grams. """ return sum(c.getMass(nuclideNames=nuclideNames) for c in self) def getMassFrac(self, nucName): """ Get the mass fraction of a nuclide. Notes ----- If you need multiple mass fractions, use ``getMassFracs``. """ nuclideNames = self._getNuclidesFromSpecifier(nucName) massFracs = self.getMassFracs() return sum(massFracs.get(nucName, 0.0) for nucName in nuclideNames) def getMicroSuffix(self): raise NotImplementedError( f"Cannot get the suffix on {type(self)} objects. Only certain subclasses" " of composite such as Blocks or Components have the concept of micro suffixes." ) def _getNuclidesFromSpecifier(self, nucSpec: Union[None, str, list[str]]): """ Convert a nuclide specification to a list of valid nuclide/element keys. nucSpec : nuclide specifier Can be a string name of a nuclide or element, or a list of such strings. This might get Zr isotopes when ZR is passed in if they exist, or it will get elemental ZR if that exists. When expanding elements, all known nuclides are returned, not just the natural ones. """ allNuclidesHere = self.getNuclides() if nucSpec is None: return allNuclidesHere elif isinstance(nucSpec, (str)): nuclideNames = [nucSpec] elif isinstance(nucSpec, list): nuclideNames = nucSpec else: raise TypeError(f"nucSpec={nucSpec} is an invalid specifier. It is a {type(nucSpec)}") # expand elementals if appropriate. convertedNucNames = [] for nucName in nuclideNames: if nucName in allNuclidesHere: convertedNucNames.append(nucName) continue try: # Need all nuclide bases, not just natural isotopics because, e.g. PU # has no natural isotopics! nucs = [ nb.name for nb in self.nuclideBases.elements.bySymbol[nucName].nuclides if not isinstance(nb, nuclideBases.NaturalNuclideBase) ] convertedNucNames.extend(nucs) except KeyError: convertedNucNames.append(nucName) return sorted(set(convertedNucNames)) def getMassFracs(self): """ Get mass fractions of all nuclides in object. Ni [1/cm3] * Ai [g/mole] ~ mass """ numDensities = self.getNumberDensities() return densityTools.getMassFractions(numDensities) def setMassFrac(self, nucName, val): """ Adjust the composition of this object so the mass fraction of nucName is val. See Also -------- setMassFracs : efficiently set multiple mass fractions at the same time. """ self.setMassFracs({nucName: val}) def setMassFracs(self, massFracs): r""" Apply one or more adjusted mass fractions. This will adjust the total mass of the object, as the mass of everything designated will change, while anything else will not. .. math:: m_i = \frac{M_i}{\sum_j(M_j)} (M_{j \ne i} + M_i) m_i = M_i \frac{m_i M_{j \ne i}}{1-m_i} = M_i \frac{m_i M_{j \ne i}}{V(1-m_i)} = M_i/V = m_i \rho N_i = \frac{m_i \rho N_A}{A_i} N_i = \frac{m_i M_{j \ne i} N_A}{V (1-m_i) {A_i}} \frac{M_{j \ne i}}{V} = m_{j \ne i} \rho m_{j \ne i} = 1 - m_i Notes ----- You can't just change one mass fraction though, you have scale all others to fill the remaining frac. Parameters ---------- massFracs: dict nucName : new mass fraction pairs. """ rho = self.density() if not rho: raise ValueError(f"Cannot set mass fractions on {self} because the mass density is zero.") oldMassFracs = self.getMassFracs() totalFracSet = 0.0 for nucName, massFrac in massFracs.items(): self.setNumberDensity( nucName, (massFrac * rho * units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM / nucDir.getAtomicWeight(nucName)), ) if nucName in oldMassFracs: del oldMassFracs[nucName] totalFracSet += massFrac totalOther = sum(oldMassFracs.values()) if totalOther: # we normalize the remaining mass fractions so their concentrations relative # to each other stay constant. normalizedOtherMassFracs = {nucNameOther: val / totalOther for nucNameOther, val in oldMassFracs.items()} for nucNameOther, massFracOther in normalizedOtherMassFracs.items(): self.setNumberDensity( nucNameOther, ( (1.0 - totalFracSet) * massFracOther * rho * units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM / nucDir.getAtomicWeight(nucNameOther) ), ) def adjustMassFrac( self, nuclideToAdjust=None, elementToAdjust=None, nuclideToHoldConstant=None, elementToHoldConstant=None, val=0.0, ): r""" Set the initial Zr mass fraction while maintaining Uranium enrichment, but general purpose. Parameters ---------- nuclideToAdjust : str, optional The nuclide name to adjust elementToAdjust : str, optional The element to adjust. All isotopes in this element will adjust nuclideToHoldconstant : str, optional A nuclide to hold constant elementToHoldConstant : str Same val : float The value to set the adjust mass fraction to be. Notes ----- If you use this for two elements one after the other, you will probably get something wrong. For instance, if you have U-10Zr and add Pu at 10% mass fraction, the Zr fraction will drop below 10% of the total. The U-Zr fractions will remain constant though. So this is mostly useful if you have U-10Zr and want to change it to U-5Zr. Theory: Mass fraction of each nuclide to be adjusted = Ai where A1+A2+A...+AI = A Mass fraction of nuclides to be held constant = Ci where sum = C Mass fraction of other nuclides is Oi, sum = O new value for A is v A+C+O = 1.0 A'=v. If A>0, then A'=A*f1=v where f1 = v/A If A=0, then Ai' = v/len(A), distributing the value evenly among isotopes Now, to adjust the other nuclides, we know A'+C+O' = 1.0 , or v+C+O' = 1.0 So, O'= 1.0-v-C We can scale each Oi evenly by multiplying by the factor f2 Oi' = Oi * (1-C-v)/O = Oi * f2 where f2= (1-C-v) See Also -------- setMassFrac getMassFrac """ self.clearCache() # don't keep densities around or anything. if val > 1.0 or val < 0: raise ValueError(f"Invalid mass fraction {val} for {nuclideToAdjust}/{elementToAdjust} in {self.getName()}") if not nuclideToAdjust and not elementToAdjust: raise TypeError("Must provide a nuclide or element to adjust to adjustMassFrac") # sum of other nuclide mass fractions before change is Y # need Yx+newZr = 1.0 where x is a scaling factor # so x=(1-newZr)/Y # determine nuclides to hold constant nuclides = set(self.getNuclides()) if nuclideToHoldConstant or elementToHoldConstant: # note that if these arguments are false, you'll get ALL nuclides in the # material use material.getNuclides to get only non-zero ones. use # nucDir.getNuclides to get all. Intersect with current nuclides to # eliminate double counting of element/isotopes constantNuclides = set( nucDir.getNuclideNames(nucName=nuclideToHoldConstant, elementSymbol=elementToHoldConstant) ).intersection(nuclides) constantSum = sum(self.getMassFrac(nucName) for nucName in constantNuclides) else: constantNuclides = [] constantSum = 0.0 # determine which nuclides we're adjusting. # Rather than calling this material's getNuclides method, we call the # nucDirectory to do this. this way, even zeroed-out nuclides will get in the mix adjustNuclides = set( nucDir.getNuclideNames(nucName=nuclideToAdjust, elementSymbol=elementToAdjust) ).intersection(nuclides) # get original mass frac A of those to be adjusted. A = sum(self.getMassFrac(ni) for ni in adjustNuclides) factor1 = val / A if A else None # set the ones we're adjusting to their given value. numNucs = len(adjustNuclides) newA = 0.0 newMassFracs = {} for nuc in adjustNuclides: if factor1 is None: # this is for when adjust nuclides have zero mass fractions. Like Zr. # In this case, if there are multiple nuclides, we will distribute them # evenly because we have no other indication of how to adjust them. newMassFrac = val / numNucs else: # this is for when the nuclides we're adjusting already exist # with non-zero mass fractions could be Pu vector. newMassFrac = self.getMassFrac(nuc) * factor1 newA += newMassFrac newMassFracs[nuc] = newMassFrac if nuc == "ZR": # custom parameter only set here to determine how to behave for UZr # density, linear expansion. Can't let it roam with each mass frac # 'cause then the density roams too and there are "oscillations" self.zrFrac = newMassFrac # error checking. if abs(newA - val) > 1e-10: runLog.error(f"Adjust Mass fraction did not adjust {adjustNuclides} from {A} to {val}. It got to {newA}") raise RuntimeError("Failed to adjust mass fraction.") # determine the mass fraction of the nuclides that will be adjusted to # accommodate the requested change othersSum = 1.0 - A - constantSum if not othersSum: # no others to be modified. factor2 = 1.0 else: # use newA rather than val factor2 = (1.0 - newA - constantSum) / othersSum # change all the other nuclides using f2 factor for nuc in self.getNuclides(): if nuc not in adjustNuclides and nuc not in constantNuclides: newMassFracs[nuc] = self.getMassFrac(nuc) * factor2 self.setMassFracs(newMassFracs) def adjustMassEnrichment(self, massFraction): """ Adjust the enrichment of this object. If it's Uranium, enrichment means U-235 fraction. If it's Boron, enrichment means B-10 fraction, etc. Parameters ---------- newEnrich : float The new enrichment as a fraction. """ raise NotImplementedError def getNumberDensity(self, nucName): """ Return the number density of a nuclide in atoms/barn-cm. .. impl:: Get number density for a specific nuclide :id: I_ARMI_CMP_NUC0 :implements: R_ARMI_CMP_NUC This method queries the number density of a specific nuclide within the Composite. It invokes the ``getNuclideNumberDensities`` method for just the requested nuclide. Notes ----- This can get called very frequently and has to do volume computations so should use some kind of caching that is invalidated by any temperature, composition, etc. changes. Even with caching the volume calls are still somewhat expensive so prefer the methods in see also. See Also -------- ArmiObject.getNuclideNumberDensities: More efficient for >1 specific nuc density is needed. ArmiObject.getNumberDensities: More efficient for when all nucs in object is needed. """ return self.getNuclideNumberDensities([nucName])[0] def getNuclideNumberDensities(self, nucNames): """Return a list of number densities in atoms/barn-cm for the nuc names requested. .. impl:: Get number densities for specific nuclides. :id: I_ARMI_CMP_NUC1 :implements: R_ARMI_CMP_NUC This method provides the capability to query the volume weighted number densities for a list of nuclides within a given Composite. It provides the result in units of atoms/barn-cm. The volume weighting is accomplished by multiplying the number densities within each child Composite by the volume of the child Composite and dividing by the total volume of the Composite. """ volumes = np.array([c.getVolume() / (c.parent.getSymmetryFactor() if c.parent else 1.0) for c in self]) # c x 1 totalVol = volumes.sum() if totalVol == 0.0: # there are no children so no volume or number density return [0.0] * len(nucNames) nucDensForEachComp = np.array([c.getNuclideNumberDensities(nucNames) for c in self]) # c x n return volumes.dot(nucDensForEachComp) / totalVol def _getNdensHelper(self): """ Return a number densities dict with unexpanded lfps. Notes ----- This is implemented more simply on the component level. """ nucNames = self.getNuclides() return dict(zip(nucNames, self.getNuclideNumberDensities(nucNames))) def getNumberDensities(self, expandFissionProducts=False): """ Retrieve the number densities in atoms/barn-cm of all nuclides (or those requested) in the object. .. impl:: Number density of composite is retrievable. :id: I_ARMI_CMP_GET_NDENS :implements: R_ARMI_CMP_GET_NDENS This method provides a way for retrieving the number densities of all nuclides within the Composite. It does this by leveraging the ``_getNdensHelper`` method, which invokes the ``getNuclideNumberDensities`` method. This method considers the nuclides within each child Composite of this composite (if they exist). If the ``expandFissionProducts`` flag is ``True``, then the lumped fission products are expanded to include their constituent elements via the ``_expandLFPs`` method. Parameters ---------- expandFissionProducts : bool (optional) expand the fission product number densities Returns ------- numberDensities : dict nucName keys, number density values (atoms/bn-cm) """ numberDensities = self._getNdensHelper() if expandFissionProducts: return self._expandLFPs(numberDensities) return numberDensities def _expandLFPs(self, numberDensities): """ Expand the LFPs on the numberDensities dictionary using this composite's lumpedFissionProductCollection. """ lfpCollection = self.getLumpedFissionProductCollection() if lfpCollection: # may not have lfps in non-fuel lfpDensities = lfpCollection.getNumberDensities(self) numberDensities = { nucName: numberDensities.get(nucName, 0.0) + lfpDensities.get(nucName, 0.0) for nucName in set(numberDensities) | set(lfpDensities) } # remove LFPs from the result for lfpName in lfpCollection: numberDensities.pop(lfpName, None) else: lfpMass = sum( dens for name, dens in numberDensities.items() if isinstance(self.nuclideBases.byName[name], nuclideBases.LumpNuclideBase) ) if lfpMass: raise RuntimeError( f"Composite {self} is attempting to expand lumped fission products, but does not have " "an lfpCollection." ) return numberDensities def getChildrenWithNuclides(self, nucNames): """Return children that contain any nuclides in nucNames.""" nucNames = set(nucNames) # only convert to set once return [child for child in self if nucNames.intersection(child.getNuclides())] def getAncestor(self, fn): """ Return the first ancestor that satisfies the supplied predicate. Parameters ---------- fn : Function-like object The predicate used to test the validity of an ancestor. Should return true if the ancestor satisfies the caller's requirements """ if fn(self): return self if self.parent is None: return None else: return self.parent.getAncestor(fn) def getAncestorAndDistance(self, fn, _distance=0) -> Optional[Tuple["ArmiObject", int]]: """ Return the first ancestor that satisfies the supplied predicate, along with how many levels above self the ancestor lies. Parameters ---------- fn : Function-like object The predicate used to test the validity of an ancestor. Should return true if the ancestor satisfies the caller's requirements """ if fn(self): return self, _distance if self.parent is None: return None else: return self.parent.getAncestorAndDistance(fn, _distance + 1) def getAncestorWithFlags(self, typeSpec: TypeSpec, exactMatch=False): """ Return the first ancestor that matches the passed flags. Parameters ---------- typeSpec : TypeSpec A collection of flags to match on candidate parents exactMatch : bool Whether the flags match should be exact Returns ------- armi.composites.ArmiObject the first ancestor up the chain of parents that matches the passed flags See Also -------- ArmiObject.hasFlags() """ if self.hasFlags(typeSpec, exact=exactMatch): return self if self.parent is None: return None else: return self.parent.getAncestorWithFlags(typeSpec, exactMatch=exactMatch) def getTotalNDens(self): """ Return the total number density of all atoms in this object. Returns ------- nTot : float Total ndens of all nuclides in atoms/bn-cm. Not homogenized. """ nFPsPerLFP = fissionProductModel.NUM_FISSION_PRODUCTS_PER_LFP # LFPs count as two! Big deal in non BOL cases. return sum(dens * (nFPsPerLFP if "LFP" in name else 1.0) for name, dens in self.getNumberDensities().items()) def setNumberDensity(self, nucName, val): """ Set the number density of this nuclide to this value. This distributes atom density evenly across all children that contain nucName. If the nuclide doesn't exist in any of the children, then that's actually an error. This would only happen if some unnatural nuclide like Pu239 built up in fresh UZr. That should be anticipated and dealt with elsewhere. """ activeChildren = self.getChildrenWithNuclides({nucName}) if not activeChildren: activeVolumeFrac = 1.0 if val: raise ValueError( f"The nuclide {nucName} does not exist in any children of {self}; " f"cannot set its number density to {val}. The nuclides here are: {self.getNuclides()}" ) else: activeVolumeFrac = sum(vf for ci, vf in self.getVolumeFractions() if ci in activeChildren) dehomogenizedNdens = val / activeVolumeFrac # scale up to dehomogenize on children. for child in activeChildren: child.setNumberDensity(nucName, dehomogenizedNdens) def setNumberDensities(self, numberDensities): """ Set one or more multiple number densities. Reset any non-listed nuclides to 0.0. Parameters ---------- numberDensities : dict nucName: ndens pairs. Notes ----- We'd like to not have to call setNumberDensity for each nuclide because we don't want to call ``getVolumeFractions`` for each nuclide (it's inefficient). """ numberDensities.update({nuc: 0.0 for nuc in self.getNuclides() if nuc not in numberDensities}) self.updateNumberDensities(numberDensities) def updateNumberDensities(self, numberDensities): """ Set one or more multiple number densities. Leaves unlisted number densities alone. This changes a nuclide number density only on children that already have that nuclide, thereby allowing, for example, actinides to stay in the fuel component when setting block-level values. The complication is that various number densities are distributed among various components. This sets the number density for each nuclide evenly across all components that contain it. Parameters ---------- numberDensities : dict nucName: ndens pairs. """ children, volFracs = zip(*self.getVolumeFractions()) childNucs = tuple(set(child.getNuclides()) for child in children) allDehomogenizedNDens = collections.defaultdict(dict) # compute potentially-different homogenization factors for each child. evenly # distribute entire number density over the subset of active children. for nuc, dens in numberDensities.items(): # get "active" indices, i.e., indices of children containing nuc # NOTE: this is one of the rare instances in which (imo), using explicit # indexing clarifies subsequent code since it's not necessary to zip + # filter + extract individual components (just extract by filtered index). indiciesToSet = tuple(i for i, nucsInChild in enumerate(childNucs) if nuc in nucsInChild) if not indiciesToSet: if dens == 0: # density is zero, skip continue # This nuc doesn't exist in any children but is to be set. # Evenly distribute it everywhere. childrenToSet = children dehomogenizedNDens = dens / sum(volFracs) else: childrenToSet = tuple(children[i] for i in indiciesToSet) dehomogenizedNDens = dens / sum(volFracs[i] for i in indiciesToSet) for child in childrenToSet: allDehomogenizedNDens[child][nuc] = dehomogenizedNDens # apply the child-dependent ndens vectors to the children for child, ndens in allDehomogenizedNDens.items(): child.updateNumberDensities(ndens) def changeNDensByFactor(self, factor): """Change the number density of all nuclides within the object by a multiplicative factor.""" densitiesScaled = {nuc: val * factor for nuc, val in self.getNumberDensities().items()} self.setNumberDensities(densitiesScaled) # Update detailedNDens if self.p.detailedNDens is not None: self.p.detailedNDens *= factor # Update pinNDens if self.p.pinNDens is not None: self.p.pinNDens *= factor def clearNumberDensities(self): """ Reset all the number densities to nearly zero. Set to almost zero, so components remember which nuclides are where. """ ndens = {nuc: units.TRACE_NUMBER_DENSITY for nuc in self.getNuclides()} self.setNumberDensities(ndens) def density(self): """Returns the mass density of the object in g/cc.""" density = 0.0 for nuc in self.getNuclides(): density += ( self.getNumberDensity(nuc) * nucDir.getAtomicWeight(nuc) / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM ) return density def getNumberOfAtoms(self, nucName): """Return the number of atoms of nucName in this object.""" numDens = self.getNumberDensity(nucName) # atoms/bn-cm return numDens * self.getVolume() / units.CM2_PER_BARN def getLumpedFissionProductCollection(self): """ Get collection of LFP objects. Will work for global or block-level LFP models. Returns ------- lfps : LumpedFissionProduct lfpName keys , lfp object values See Also -------- armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct : LFP object """ return self._lumpedFissionProducts def setLumpedFissionProducts(self, lfpCollection): self._lumpedFissionProducts = lfpCollection def setChildrenLumpedFissionProducts(self, lfpCollection): for c in self: c.setLumpedFissionProducts(lfpCollection) def getFissileMassEnrich(self): """Returns the fissile mass enrichment.""" hm = self.getHMMass() if hm > 0: return self.getFissileMass() / hm else: return 0.0 def getUraniumNumEnrich(self): """Returns fissile uranium number fraction.""" uraniumNucs = self._getNuclidesFromSpecifier("U") totalU = sum(self.getNuclideNumberDensities(uraniumNucs)) if totalU < 1e-10: return 0.0 fissileU = sum(self.getNuclideNumberDensities(["U233", "U235"])) return fissileU / totalU def calcTotalParam( self, param, objs=None, volumeIntegrated=False, addSymmetricPositions=False, typeSpec: TypeSpec = None, generationNum=1, calcBasedOnFullObj=False, ): """ Sums up a parameter throughout the object's children or list of objects. Parameters ---------- param : str Name of the block parameter to sum objs : iterable, optional A list of objects to sum over. If none, all children in object will be used volumeIntegrated : bool, optional Integrate over volume addSymmetricPositions : bool, optional If True, will multiply by the symmetry factor of the core (3 for 1/3 models, 1 for full core models) typeSpec : TypeSpec object types to restrict to generationNum : int, optional Which generation to consider. 1 means direct children, 2 means children of children. Default: Just return direct children. calcBasedOnFullObj : bool, optional Some assemblies or blocks, such as the center assembly in a third core model, are not modeled as full assemblies or blocks. In the third core model objects at these positions are modeled as having 1/3 the volume and thus 1/3 the power. Setting this argument to True will apply the full value of the parameter as if it was a full block or assembly. """ tot = 0.0 if objs is None: objs = self.getChildren(generationNum=generationNum) if addSymmetricPositions: if calcBasedOnFullObj: raise ValueError( "AddSymmetricPositions is Incompatible with calcBasedOnFullObj. Will result in double counting." ) try: coreMult = self.powerMultiplier except AttributeError: coreMult = self.parent.powerMultiplier if not coreMult: raise ValueError(f"powerMultiplier is equal to {coreMult}") else: coreMult = 1.0 for a in objs: if not a.hasFlags(typeSpec): continue mult = a.getVolume() if volumeIntegrated else 1.0 if calcBasedOnFullObj: mult *= a.getSymmetryFactor() tot += a.p[param] * mult return tot * coreMult def calcAvgParam( self, param, typeSpec: TypeSpec = None, weightingParam=None, volumeAveraged=True, absolute=True, generationNum=1, ): r""" Calculate the child-wide average of a parameter. Parameters ---------- param : str The ARMI block parameter that you want the average from typeSpec : TypeSpec The child types that should be included in the calculation. Restrict average to a certain child type with this parameter. weightingParam : None or str, optional An optional block param that the average will be weighted against volumeAveraged : bool, optional volume (or height, or area) average this param absolute : bool, optional Returns the average of the absolute value of param generationNum : int, optional Which generation to average over (1 for children, 2 for grandchildren) The weighted sum is: .. math:: \left<\text{x}\right> = \frac{\sum_{i} x_i w_i}{\sum_i w_i} where :math:`i` is each child, :math:`x_i` is the param value of the i-th child, and :math:`w_i` is the weighting param value of the i-th child. Warning ------- If a param is unset/zero on any of the children, this will be included in the average and may significantly perturb results. Returns ------- float The average parameter value. """ total = 0.0 weightSum = 0.0 for child in self.getChildren(generationNum=generationNum): if child.hasFlags(typeSpec): if weightingParam: weight = child.p[weightingParam] if weight < 0: # Just for conservatism, do not allow negative weights. raise ValueError(f"Weighting value ({weightingParam},{weight}) cannot be negative.") else: weight = 1.0 if volumeAveraged: weight *= child.getVolume() weightSum += weight if absolute: total += abs(child.p[param]) * weight else: total += child.p[param] * weight if not weightSum: raise ValueError( f"Cannot calculate {weightingParam}-weighted average of {param} in {self}. " f"Weights sum to zero. typeSpec is {typeSpec}" ) return total / weightSum def getMaxParam( self, param, typeSpec: TypeSpec = None, absolute=True, generationNum=1, returnObj=False, ): """ Find the maximum value for the parameter in this container. Parameters ---------- param : str block parameter that will be sought. typeSpec : TypeSpec restricts the search to cover a variety of block types. absolute : bool looks for the largest magnitude value, regardless of sign, default: true returnObj : bool, optional If true, returns the child object as well as the value. Returns ------- maxVal : float The maximum value of the parameter asked for obj : child object The object that has the max (only returned if ``returnObj==True``) """ compartor = lambda x, y: x > y return self._minMaxHelper( param, typeSpec, absolute, generationNum, returnObj, -float("inf"), compartor, ) def getMinParam( self, param, typeSpec: TypeSpec = None, absolute=True, generationNum=1, returnObj=False, ): """ Find the minimum value for the parameter in this container. See Also -------- getMaxParam : details """ compartor = lambda x, y: x < y return self._minMaxHelper(param, typeSpec, absolute, generationNum, returnObj, float("inf"), compartor) def _minMaxHelper( self, param, typeSpec: TypeSpec, absolute, generationNum, returnObj, startingNum, compartor, ): """Helper for getMinParam and getMaxParam.""" maxP = (startingNum, None) realVal = 0.0 objs = self.getChildren(generationNum=generationNum) for b in objs: if b.hasFlags(typeSpec): try: val = b.p[param] except parameters.UnknownParameterError: # No worries; not all Composite types are guaranteed to have the # relevant parameter. It might be a good idea to more strongly # type-check this, perhaps by passing the paramDef, # rather than its name? continue if val is None: # Neither bigger or smaller than anything (also illegal in Python3) continue if absolute: absVal = abs(val) else: absVal = val if compartor(absVal, maxP[0]): maxP = (absVal, b) realVal = val if returnObj: return realVal, maxP[1] else: return realVal def getChildParamValues(self, param): """Get the child parameter values in a numpy array.""" return np.array([child.p[param] for child in self]) def isFuel(self): """True if this is a fuel block.""" return self.hasFlags(Flags.FUEL) def containsHeavyMetal(self): """True if this has HM.""" return any(nucDir.isHeavyMetal(nucName) for nucName in self.getNuclides()) def getNuclides(self): """ Determine which nuclides are present in this armi object. Returns ------- list List of nuclide names that exist in this """ nucs = set() for child in self: nucs.update(child.getNuclides()) return nucs def getFissileMass(self): """Returns fissile mass in grams.""" return self.getMass(nuclideBases.NuclideBase.fissile) def getHMMass(self): """Returns heavy metal mass in grams.""" nucs = [] for nucName in self.getNuclides(): if nucDir.isHeavyMetal(nucName): nucs.append(nucName) mass = self.getMass(nucs) return mass def getHMMoles(self): """ Get the number of moles of heavy metal in this object. Notes ----- If an object is on a symmetry line, the volume reported by getVolume is reduced to reflect that the block is not wholly within the reactor. This reduction in volume reduces the reported HM moles. """ return self.getHMDens() / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * self.getVolume() def getHMDens(self): """ Compute the total heavy metal density of this object. Returns ------- hmDens : float The total heavy metal number (atom) density in atoms/bn-cm. """ hmNuclides = [nuclide for nuclide in self.getNuclides() if nucDir.isHeavyMetal(nuclide)] hmDens = np.sum(self.getNuclideNumberDensities(hmNuclides)) return hmDens def getFPMass(self): """Returns mass of fission products in this block in grams.""" nucs = [] for nucName in self.getNuclides(): if "LFP" in nucName: nucs.append(nucName) mass = self.getMass(nucs) return mass def getFuelMass(self): """Returns mass of fuel in grams.""" return sum((c.getFuelMass() for c in self)) def constituentReport(self): """A print out of some pertinent constituent information.""" from armi.utils import iterables elementz = self.nuclideBases.elements rows = [["Constituent", "HMFrac", "FuelFrac"]] columns = [-1, self.getHMMass(), self.getFuelMass()] for base_ele in ["U", "PU"]: total = sum([self.getMass(nuclide.name) for nuclide in elementz.bySymbol[base_ele]]) rows.append([base_ele, total, total]) fp_total = self.getFPMass() rows.append(["FP", fp_total, fp_total]) ma_nuclides = iterables.flatten( [ele.nuclides for ele in [elementz.byZ[key] for key in elementz.byZ.keys() if key > 94]] ) ma_total = sum([self.getMass(nuclide.name) for nuclide in ma_nuclides]) rows.append(["MA", ma_total, ma_total]) for i, row in enumerate(rows): for j, entry in enumerate(row): try: percent = entry / columns[j] * 100.0 rows[i][j] = percent or "-" except ZeroDivisionError: rows[i][j] = "NaN" except TypeError: pass # trying to divide the string name return "\n".join(["{:<14}{:<10}{:<10}".format(*row) for row in rows]) def getAtomicWeight(self): r""" Calculate the atomic weight of this object in g/mole of atoms. .. warning:: This is not the molecular weight, which is grams per mole of molecules (grams/gram-molecule). That requires knowledge of the chemical formula. Don't be surprised when you run this on UO2 and find it to be 90; there are a lot of Oxygen atoms in UO2. .. math:: A = \frac{\sum_i N_i A_i }{\sum_i N_i} """ numerator = 0.0 denominator = 0.0 numDensities = self.getNumberDensities() for nucName, nDen in numDensities.items(): atomicWeight = self.nuclideBases.byName[nucName].weight numerator += atomicWeight * nDen denominator += nDen return numerator / denominator def getMasses(self): """ Return a dictionary of masses indexed by their nuclide names. Notes ----- Implemented to get number densities and then convert to mass because getMass is too slow on a large tree. """ numDensities = self.getNumberDensities() vol = self.getVolume() return {nucName: densityTools.getMassInGrams(nucName, vol, ndens) for nucName, ndens in numDensities.items()} def getIntegratedMgFlux(self, adjoint=False, gamma=False): raise NotImplementedError def getMgFlux(self, adjoint=False, average=False, gamma=False): """ Return the multigroup neutron flux in [n/cm^2/s]. The first entry is the first energy group (fastest neutrons). Each additional group is the next energy group, as set in the ISOTXS library. On blocks, it is stored integrated over volume on <block>.p.mgFlux Parameters ---------- adjoint : bool, optional Return adjoint flux instead of real average : bool, optional If true, will return average flux between latest and previous. Doesn't work for pin detailed yet gamma : bool, optional Whether to return the neutron flux or the gamma flux. Returns ------- flux : np.ndarray multigroup neutron flux in [n/cm^2/s] """ if average: raise NotImplementedError( f"{self.__class__} class has no method for producing average MG flux -- tryusing blocks" ) volume = self.getVolume() return self.getIntegratedMgFlux(adjoint=adjoint, gamma=gamma) / volume def removeMass(self, nucName, mass): self.addMass(nucName, -mass) def addMass(self, nucName, mass): """Add mass to a particular nuclide. Parameters ---------- nucName : str nuclide name e.g. 'U235' mass : float mass in grams of nuclide to be added to this armi Object """ volume = self.getVolume() addedNumberDensity = densityTools.calculateNumberDensity(nucName, mass, volume) self.setNumberDensity(nucName, self.getNumberDensity(nucName) + addedNumberDensity) def addMasses(self, masses): """ Adds a vector of masses. Parameters ---------- masses : dict a dictionary of masses (g) indexed by nucNames (string) """ for nucName, mass in masses.items(): if mass: self.addMass(nucName, mass) def setMass(self, nucName, mass): """ Set the mass in an object by adjusting the ndens of the nuclides. Parameters ---------- nucName : str Nuclide name to set mass of mass : float Mass in grams to set. """ d = calculateNumberDensity(nucName, mass, self.getVolume()) self.setNumberDensity(nucName, d) def setMasses(self, masses): """ Set a vector of masses. Parameters ---------- masses : dict a dictionary of masses (g) indexed by nucNames (string) """ self.clearNumberDensities() for nucName, mass in masses.items(): self.setMass(nucName, mass) def getSymmetryFactor(self): """ Return a scaling factor due to symmetry on the area of the object or its children. See Also -------- armi.reactor.blocks.HexBlock.getSymmetryFactor : concrete implementation """ return 1.0 def getBoundingIndices(self): """ Find the 3-D index bounds (min, max) of all children in the spatial grid of this object. Returns ------- bounds : tuple ((minI, maxI), (minJ, maxJ), (minK, maxK)) """ minI = minJ = minK = float("inf") maxI = maxJ = maxK = -float("inf") for obj in self: i, j, k = obj.spatialLocator.getCompleteIndices() if i >= maxI: maxI = i if i <= minI: minI = i if j >= maxJ: maxJ = j if j <= minJ: minJ = j if k >= maxK: maxK = k if k <= minK: minK = k return ((minI, maxI), (minJ, maxJ), (minK, maxK)) def getComponentNames(self): r""" Get all unique component names of this Composite. Returns ------- set or str A set of all unique component names found in this Composite. """ return set(c.getName() for c in self.iterComponents()) def getComponentsOfShape(self, shapeClass): """ Return list of components in this block of a particular shape. Parameters ---------- shapeClass : Component The class of component, e.g. Circle, Helix, Hexagon, etc. Returns ------- param : list List of components in this block that are of the given shape. """ return [c for c in self.iterComponents() if isinstance(c, shapeClass)] def getComponentsOfMaterial(self, material=None, materialName=None): """ Return list of components in this block that are made of a particular material. Only one of the selectors may be used Parameters ---------- material : armi.materials.material.Material, optional The material to match materialName : str, optional The material name to match. Returns ------- componentsWithThisMat : list """ if materialName is None: materialName = material.getName() else: assert material is None, "Cannot call with more than one selector. Choose one or the other." componentsWithThisMat = [] for c in self.iterComponents(): if c.getProperties().getName() == materialName: componentsWithThisMat.append(c) return componentsWithThisMat def hasComponents(self, typeSpec: Union[TypeSpec, List[TypeSpec]], exact=False): """ Return true if components matching all TypeSpec exist in this object. Parameters ---------- typeSpec : Flags or iterable of Flags Component flags to check for """ # Wrap the typeSpec in a tuple if we got a scalar try: typeSpec = iter(typeSpec) except TypeError: typeSpec = (typeSpec,) return all(self.getComponents(t, exact) for t in typeSpec) def getComponentByName(self, name: str) -> "Component": """ Gets a particular component from this object, based on its name. Parameters ---------- name The blueprint name of the component to return Returns ------- Component, c, whose c.name matches name. """ components = [c for c in self.iterComponents() if c.name == name] nComp = len(components) if nComp == 0: return None elif nComp > 1: raise ValueError(f"More than one component named '{name}' in {self}") else: return components[0] def getComponent(self, typeSpec: TypeSpec, exact: bool = False, quiet: bool = True) -> Optional["Component"]: """ Get a particular component from this object. Be careful with multiple similar names in one object. Parameters ---------- typeSpec : flags.Flags or list of Flags The type specification of the component to return exact : boolean, optional Demand that the component flags be exactly equal to the typespec. Default: False quiet : boolean, optional Log if the component is not found. Default: True Returns ------- Component : The component that matches the criteria or None Raises ------ ValueError: more than one Component matches the typeSpec """ results = self.getComponents(typeSpec, exact=exact) if len(results) == 1: return results[0] elif not results: if not quiet: runLog.debug( f"No component matched {typeSpec} in {self}. Returning None", single=True, label=f"None component returned instead of {typeSpec}", ) return None else: raise ValueError(f"Multiple components match in {self} match typeSpec {typeSpec}: {results}") def getNumComponents(self, typeSpec: TypeSpec, exact=False): """ Get the number of components that have these flags, taking into account multiplicity. Useful for getting nPins even when there are pin detailed cases. Parameters ---------- typeSpec : Flags Expected flags of the component to get. e.g. Flags.FUEL Returns ------- total : int the number of components of this type in this object, including multiplicity. """ total = 0 for c in self.iterComponents(typeSpec, exact): total += int(c.getDimension("mult")) return total def setComponentDimensionsReport(self): """Makes a summary of the dimensions of the components in this object.""" reportGroups = [] for c in self.iterComponents(): reportGroups.append(c.setDimensionReport()) return reportGroups def expandAllElementalsToIsotopics(self): reactorNucs = self.getNuclides() for elemental in self.nuclideBases.where( lambda nb: isinstance(nb, nuclideBases.NaturalNuclideBase) and nb.name in reactorNucs ): self.expandElementalToIsotopics(elemental) def expandElementalToIsotopics(self, elementalNuclide): """ Expands the density of a specific elemental nuclides to its natural isotopics. Parameters ---------- elementalNuclide : :class:`armi.nucDirectory.nuclideBases.NaturalNuclide` natural nuclide to replace. """ natName = elementalNuclide.name for component in self.iterComponents(): elementalDensity = component.getNumberDensity(natName) if elementalDensity == 0.0: continue keepIndex = np.where(component.p.nuclides != natName.encode())[0] newNuclides = [nuc.decode() for nuc in component.p.nuclides[keepIndex]] newNDens = component.p.numberDensities[keepIndex] component.updateNumberDensities(dict(zip(newNuclides, newNDens)), wipe=True) # add in isotopics for natNuc in elementalNuclide.getNaturalIsotopics(): component.setNumberDensity(natNuc.name, elementalDensity * natNuc.abundance) def getAverageTempInC(self, typeSpec: TypeSpec = None, exact=False): """Return the average temperature of the ArmiObject in C by averaging all components.""" tempNumerator = 0.0 totalVol = 0.0 for component in self.iterComponents(typeSpec, exact): vol = component.getVolume() tempNumerator += component.temperatureInC * vol totalVol += vol return tempNumerator / totalVol def resolveLinkedDims(self, components): """Resolve link strings to links on all child components.""" for component in self.iterComponents(): component.resolveLinkedDims(components) def getDominantMaterial(self, typeSpec: TypeSpec = None, exact=False): """ Return the first sample of the most dominant material (by volume) in this object. Parameters ---------- typeSpec : Flags or iterable of Flags, optional The types of components to consider (e.g. ``[Flags.FUEL, Flags.CONTROL]``) exact : bool, optional Whether or not the TypeSpec is exact Returns ------- mat : armi.materials.material.Material the first instance of the most dominant material (by volume) in this object. See Also -------- getComponentsOfMaterial Gets components that are made of a particular material gatherMaterialsByVolume Classifies all materials by volume """ return getDominantMaterial([self], typeSpec, exact) class Composite(ArmiObject): """ An ArmiObject that has children. This is a fundamental ARMI state object that generally represents some piece of the nuclear reactor that is made up of other smaller pieces. This object can cache information about its children to help performance. **Details about spatial representation** Spatial representation of a ``Composite`` is handled through a combination of the ``spatialLocator`` and ``spatialGrid`` parameters. The ``spatialLocator`` is a numpy triple representing either: 1. Indices in the parent's ``spatialGrid`` (for lattices, etc.), used when the dtype is int. 2. Coordinates in the parent's universe in cm, used when the dtype is float. The top parent of any composite must have a coordinate-based ``spatialLocator``. For example, a Reactor an a Pump should both have coordinates based on how far apart they are. The traversal of indices and grids is recursive. The Reactor/Core/Assembly/Block model is handled by putting a 2-D grid (either Theta-R, Hex, or Cartesian) on the Core and individual 1-D Z-meshes on the assemblies. Then, Assemblies have 2-D spatialLocators (i,j,0) and Blocks have 1-D spatiaLocators (0,0,k). These get added to form the global indices. This way, if an assembly is moved, all the blocks immediately and naturally move with it. Individual children may have coordinate-based spatialLocators mixed with siblings in a grid. This allows mixing grid-representation with explicit representation, often useful in advanced assemblies and thermal reactors. The traversal of indices and grids is recursive. The Reactor/Core/Assembly/Block model is handled by putting a 2-D grid (either Theta-R, Hex, or Cartesian) on the Core and individual 1-D Z-meshes on the assemblies. Then, Assemblies have 2-D spatialLocators (i,j,0) and Blocks have 1-D spatiaLocators (0,0,k). These get added to form the global indices. This way, if an assembly is moved, all the blocks immediately and naturally move with it. Individual children may have coordinate-based spatialLocators mixed with siblings in a grid. This allows mixing grid-representation with explicit representation, often useful in advanced assemblies and thermal reactors. .. impl:: Composites are a physical part of the reactor in a hierarchical data model. :id: I_ARMI_CMP0 :implements: R_ARMI_CMP An ARMI reactor model is composed of collections of ARMIObject objects. This class is a child-class of the ARMIObject class and provides a structure allowing a reactor model to be composed of Composites. This class provides various methods to query and modify the hierarchical ARMI reactor model, including but not limited to, iterating, sorting, and adding or removing child Composites. """ _children: list["Composite"] def __init__(self, name): ArmiObject.__init__(self, name) self.childrenByLocator = {} self._children = [] def __getitem__(self, index): return self._children[index] def __setitem__(self, index, obj): raise NotImplementedError("Unsafe to insert elements directly") def __iter__(self): return iter(self._children) def __len__(self): return len(self._children) def __contains__(self, item): """ Membership check. This does not use quality checks for membership checking because equality operations can be fairly heavy. Rather, this only checks direct identity matches. """ return id(item) in set(id(c) for c in self._children) def sort(self): """Sort the children of this object.""" # sort the top-level children of this Composite self._children.sort() # recursively sort the children below it. for c in self._children: if issubclass(c.__class__, Composite): c.sort() def index(self, obj): """Obtain the list index of a particular child.""" return self._children.index(obj) def append(self, obj): """Append a child to this object.""" self._children.append(obj) def extend(self, seq): """Add a list of children to this object.""" for item in seq: self.add(item) def add(self, obj): """Add one new child.""" if obj in self: raise RuntimeError(f"Cannot add {obj} because it has already been added to {self}.") obj.parent = self self._children.append(obj) def remove(self, obj): """Remove a particular child.""" obj.parent = None obj.spatialLocator = obj.spatialLocator.detachedCopy() self._children.remove(obj) def moveTo(self, locator): """Move to specific location in parent. Often in a grid.""" if locator.grid.armiObject is not self.parent: raise ValueError( f"Cannot move {self} to a location in {locator.grid.armiObject}" ", which is not its parent ({self.parent})." ) self.spatialLocator = locator def insert(self, index, obj): """Insert an object into the list of children at a particular index.""" if obj in self._children: raise RuntimeError(f"Cannot insert {obj} because it has already been added to {self}.") obj.parent = self self._children.insert(index, obj) def removeAll(self): """Remove all children.""" for c in self.getChildren()[:]: self.remove(c) def setChildren(self, items): """Clear this container and fills it with new children.""" self.removeAll() for c in items: self.add(c) def iterChildren( self, deep=False, generationNum=1, predicate: Optional[Callable[["Composite"], bool]] = None, ) -> Iterator["Composite"]: """Iterate over children objects of this composite. Parameters ---------- deep : bool, optional If true, traverse the entire composite tree. Otherwise, go as far as ``generationNum``. generationNum: int, optional Produce composites at this depth. A depth of ``1`` includes children of ``self``, ``2`` is children of children, and so on. predicate: f(Composite) -> bool, optional Function to check on a composite before producing it. All items in the iteration will pass this check. Returns ------- iterator of Composite See Also -------- :meth:`getChildren` produces a list for situations where you need to perform multiple iterations or do list operations (append, indexing, sorting, containment, etc.) Composites are naturally iterable. The following are identical:: >>> for child in c.getChildren(): ... pass >>> for child in c.iterChildren(): ... pass >>> for child in c: ... pass If you do not need any depth-traversal, natural iteration should be sufficient. The :func:`filter` command may be sufficient if you do not wish to pass a predicate. The following are identical:: >>> checker = lambda c: len(c.name) % 3 >>> for child in c.getChildren(predicate=checker): ... pass >>> for child in c.iterChildren(predicate=checker): ... pass >>> for child in filter(checker, c): ... pass If you're going to be doing traversal beyond the first generation, this method will help you. """ if deep and generationNum > 1: raise RuntimeError("Cannot get children with a generation number set and the deep flag set") if predicate is None: checker = lambda _: True else: checker = predicate yield from self._iterChildren(deep, generationNum, checker) def _iterChildren( self, deep: bool, generationNum: int, checker: Callable[["Composite"], bool] ) -> Iterator["Composite"]: if deep or generationNum == 1: yield from filter(checker, self) if deep or generationNum > 1: for c in self: yield from c._iterChildren(deep, generationNum - 1, checker) def iterChildrenWithMaterials(self, *args, **kwargs) -> Iterator: """Produce an iterator that also includes any materials found on descendants. Arguments are forwarded to :meth:`iterChildren` and control the depth of traversal and filtering of objects. This is useful for sending state across MPI tasks where you need a more full representation of the composite tree. Which includes the materials attached to components. """ children = self.iterChildren(*args, **kwargs) # Each entry is either (c, ) or (c, c.material) if the child has a material attribute stitched = map( lambda c: ((c,) if getattr(c, "material", None) is None else (c, c.material)), children, ) # Iterator that iterates over each "sub" iterator. If we have ((c0, ), (c1, m1)), this produces a single # iterator of (c0, c1, m1) return itertools.chain.from_iterable(stitched) def getChildren( self, deep=False, generationNum=1, includeMaterials=False, predicate: Optional[Callable[["Composite"], bool]] = None, ) -> list["Composite"]: """ Return the children objects of this composite. .. impl:: Composites have children in the hierarchical data model. :id: I_ARMI_CMP1 :implements: R_ARMI_CMP This method retrieves all children within a given Composite object. Children of any generation can be retrieved. This is achieved by visiting all children and calling this method recursively for each generation requested. If the method is called with ``includeMaterials``, it will additionally include information about the material for each child. If a function is supplied as the ``predicate`` argument, then this method will be used to evaluate all children as a filter to include or not. For example, if the caller of this method only desires children with a certain flag, or children which only contain a certain material, then the ``predicate`` function can be used to perform this filtering. Parameters ---------- deep : boolean, optional Return all children of all levels. generationNum : int, optional Which generation to return. 1 means direct children, 2 means children of children. Setting this parameter will only return children of this generation, not their parents. Default: Just return direct children. includeMaterials : bool, optional Include the material properties predicate : callable, optional An optional unary predicate to use for filtering results. This can be used to request children of specific types, or with desired attributes. Not all ArmiObjects have the same methods and members, so care should be taken to make sure that the predicate executes gracefully in all cases (e.g., use ``getattr(obj, "attribute", None)`` to access instance attributes). Failure to meet the predicate only affects the object in question; children will still be considered. See Also -------- :meth:`iterChildren` if you do not need to produce a full list, e.g., just iterating over objects. Examples -------- >>> obj.getChildren() [child1, child2, child3] >>> obj.getChildren(generationNum=2) [grandchild1, grandchild2, grandchild3] >>> obj.getChildren(deep=True) [child1, child2, child3, grandchild1, grandchild2, grandchild3] # Assuming that grandchild1 and grandchild3 are Component objects >>> obj.getChildren(deep=True, predicate=lambda o: isinstance(o, Component)) [grandchild1, grandchild3] """ if not includeMaterials: items = self.iterChildren(deep=deep, generationNum=generationNum, predicate=predicate) else: items = self.iterChildrenWithMaterials(deep=deep, generationNum=generationNum, predicate=predicate) return list(items) def getComponents(self, typeSpec: TypeSpec = None, exact=False): """ Return a list of Component objects within this Composite. Parameters ---------- typeSpec : TypeSpec Component flags. Will restrict Components to specific ones matching the flags specified. exact : bool, optional Only match exact component labels (names). If True, 'coolant' will not match 'interCoolant'. This has no impact if typeSpec is None. Returns ------- list of Component items matching typeSpec and exact criteria """ return list(self.iterComponents(typeSpec, exact)) def getFirstComponent(self, typeSpec: TypeSpec = None, exact=False): """ Returns a single Component object within this Composite. Parameters ---------- typeSpec : TypeSpec Component flags. Will restrict Components to specific ones matching the flags specified. exact : bool, optional Only match exact component labels (names). If True, 'coolant' will not match 'interCoolant'. This has no impact if typeSpec is None. Returns ------- Component The first item matching typeSpec and exact criteria """ try: return next(self.iterComponents(typeSpec, exact)) except StopIteration: raise ValueError(f"No component matches {typeSpec} {exact}") def iterComponents(self, typeSpec: TypeSpec = None, exact: bool = False) -> Iterator["Component"]: """ Return an iterator of armi.reactor.component.Component objects within this Composite. Parameters ---------- typeSpec : TypeSpec Component flags. Will restrict Components to specific ones matching the flags specified. exact : bool, optional Only match exact component labels (names). If True, 'coolant' will not match 'interCoolant'. This has no impact if typeSpec is None. Returns ------- iterator of Component items matching typeSpec and exact criteria """ return (c for child in self for c in child.iterComponents(typeSpec, exact)) def syncMpiState(self): """ Synchronize all parameters of this object and all children to all worker nodes over the network using MPI. In parallelized runs, if each process has its own copy of the entire reactor hierarchy, this method synchronizes the state of all parameters on all objects. .. impl:: Composites can be synchronized across MPI threads. :id: I_ARMI_CMP_MPI :implements: R_ARMI_CMP_MPI Parameters need to be handled properly during parallel code execution. This method synchronizes all parameters of the composite object across all processes by cycling through all the children of the Composite and ensuring that their parameters are properly synchronized. If it fails to synchronize, an error message is displayed which alerts the user to which Composite has inconsistent data across the processes. Returns ------- int number of parameters synchronized over all components """ if context.MPI_SIZE == 1: return 0 startTime = timeit.default_timer() # sync parameters... genItems = itertools.chain( [self], self.iterChildrenWithMaterials(deep=True), ) allComps = [c for c in genItems if hasattr(c, "p")] sendBuf = [c.p.getSyncData() for c in allComps] runLog.debug(f"syncMpiState has {len(allComps)} comps") try: context.MPI_COMM.barrier() # sync up allGatherTime = -timeit.default_timer() allSyncData = context.MPI_COMM.allgather(sendBuf) allGatherTime += timeit.default_timer() except: msg = ["Failure while trying to allgather."] for ci, compData in enumerate(sendBuf): if compData is not None: msg += [f"sendBuf[{ci}]: {compData}"] runLog.error("\n".join(msg)) raise # key is (comp, paramName) value is conflicting nodes errors = collections.defaultdict(list) syncCount = 0 compsPerNode = {len(nodeSyncData) for nodeSyncData in allSyncData} if len(compsPerNode) != 1: raise ValueError(f"The workers have different reactor sizes! comp lengths: {compsPerNode}") for ci, comp in enumerate(allComps): if not hasattr(comp, "_syncParameters"): # materials don't have Parameters to sync continue data = (nodeSyncData[ci] for nodeSyncData in allSyncData) syncCount += comp._syncParameters(data, errors) if errors: errorData = sorted( (str(comp), comp.__class__.__name__, str(comp.parent), paramName, nodes) for (comp, paramName), nodes in errors.items() ) message = "Synchronization failed due to overlapping data. Only the first duplicates are listed\n{}".format( tabulate.tabulate( errorData, headers=[ "Composite", "Composite Type", "Composite Parent", "ParameterName", "NodeRanks", ], ) ) raise ValueError(message) self._markSynchronized() runLog.extra( f"Synchronized reactor over MPI in {timeit.default_timer() - startTime:.4f} seconds" f", {allGatherTime:.4f} seconds in MPI allgather. count:{syncCount}" ) return syncCount def _syncParameters(self, allSyncData, errors): """Ensure no overlap with syncedKeys, use errors to report overlapping data.""" syncedKeys = set() for nodeRank, nodeSyncData in enumerate(allSyncData): if nodeSyncData is None: continue for key, val in nodeSyncData.items(): if key in syncedKeys: # Edge Case: a Composite object is flagged as out of sync, and this parameter # was also globally modified and readjusted to the original value. curVal = self.p[key] if isinstance(val, np.ndarray) or isinstance(curVal, np.ndarray): if (val != curVal).any(): errors[self, key].append(nodeRank) elif curVal != val: errors[self, key].append(nodeRank) runLog.error(f"in {self}, {key} differ ({curVal} != {val})") continue syncedKeys.add(key) self.p[key] = val self.clearCache() return len(syncedKeys) def _markSynchronized(self): """ Mark the composite and child parameters as synchronized across MPI. We clear SINCE_LAST_DISTRIBUTE_STATE so that anything after this point will set the SINCE_LAST_DISTRIBUTE_STATE flag, indicating it has been modified SINCE_LAST_DISTRIBUTE_STATE. """ paramDefs = set() items = itertools.chain( [self], self.iterChildrenWithMaterials(deep=True), ) for child in items: # Materials don't have a "p" / Parameter attribute to sync if hasattr(child, "p"): # below reads as: assigned & everything_but(SINCE_LAST_DISTRIBUTE_STATE) child.p.assigned &= ~parameters.SINCE_LAST_DISTRIBUTE_STATE paramDefs.add(child.p.paramDefs) for paramDef in paramDefs: paramDef.resetAssignmentFlag(parameters.SINCE_LAST_DISTRIBUTE_STATE) def retainState(self, paramsToApply=None): """ Restores a state before and after some operation. Parameters ---------- paramsToApply : iterable Parameters that should be applied to the state after existing the state retainer. All others will be reverted to their values upon entering. Notes ----- This should be used in a `with` statement. """ return StateRetainer(self, paramsToApply) def backUp(self): """ Create and store a backup of the state. This needed to be overridden due to linked components which actually have a parameter value of another ARMI component. """ self._backupCache = (self.cached, self._backupCache) self.cached = {} # don't .clear(), using reference above! self.p.backUp() if self.spatialGrid: self.spatialGrid.backUp() def restoreBackup(self, paramsToApply): """ Restore the parameters from previously created backup. Parameters ---------- paramsToApply : list of ParmeterDefinitions restores the state of all parameters not in `paramsToApply` """ self.p.restoreBackup(paramsToApply) self.cached, self._backupCache = self._backupCache if self.spatialGrid: self.spatialGrid.restoreBackup() def getLumpedFissionProductsIfNecessary(self, nuclides=None): """Return Lumped Fission Product objects that belong to this object or any of its children.""" if self.requiresLumpedFissionProducts(nuclides=nuclides): lfps = self.getLumpedFissionProductCollection() if lfps is None: for c in self: return c.getLumpedFissionProductsIfNecessary(nuclides=nuclides) else: return lfps # There are no lumped fission products in the batch so if you use a # dictionary no one will know the difference return {} def getLumpedFissionProductCollection(self): """ Get collection of LFP objects. Will work for global or block-level LFP models. Returns ------- lfps : object lfpName keys, lfp object values See Also -------- armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct """ lfps = ArmiObject.getLumpedFissionProductCollection(self) if lfps is None: for c in self: lfps = c.getLumpedFissionProductCollection() if lfps is not None: break return lfps def requiresLumpedFissionProducts(self, nuclides=None): """True if any of the nuclides in this object are Lumped nuclides.""" if nuclides is None: nuclides = self.getNuclides() # ruff: noqa: SIM110 for nucName in nuclides: if isinstance(self.nuclideBases.byName[nucName], nuclideBases.LumpNuclideBase): return True return False def getIntegratedMgFlux(self, adjoint=False, gamma=False): """ Returns the multigroup neutron tracklength in [n-cm/s]. The first entry is the first energy group (fastest neutrons). Each additional group is the next energy group, as set in the ISOTXS library. Parameters ---------- adjoint : bool, optional Return adjoint flux instead of real gamma : bool, optional Whether to return the neutron flux or the gamma flux. Returns ------- integratedFlux : np.ndarray multigroup neutron tracklength in [n-cm/s] """ integratedMgFlux = np.zeros(1) for c in self: mgFlux = c.getIntegratedMgFlux(adjoint=adjoint, gamma=gamma) if mgFlux is not None: integratedMgFlux = integratedMgFlux + mgFlux return integratedMgFlux def _getReactionRates(self, nucName, nDensity=None): """ Wrapper around logic to get reaction rates for a certain nuclide, to handle any errors. Parameters ---------- nucName : str nuclide name -- e.g. 'U235' nDensity : float number density Returns ------- rxnRates : dict dictionary of reaction rates (rxn/s) for nG, nF, n2n, nA and nP Notes ----- If you set nDensity to 1/CM2_PER_BARN this makes 1 group cross section generation easier. This method is not designed to work on ``Assembly``, ``Core``, or anything higher on the hierarchy than ``Block``. """ from armi.reactor.blocks import Block from armi.reactor.reactors import Core if nDensity is None: nDensity = self.getNumberDensity(nucName) try: return self._getReactionRateDict( nucName, self.getAncestor(lambda c: isinstance(c, Core)).lib, self.getAncestor(lambda x: isinstance(x, Block)).getMicroSuffix(), self.getIntegratedMgFlux(), nDensity, ) except AttributeError: runLog.warning( f"Object {self} does not belong to a core and so has no reaction rates.", single=True, ) return {"nG": 0, "nF": 0, "n2n": 0, "nA": 0, "nP": 0} except KeyError: runLog.warning( f"Attempting to get a reaction rate on an isotope not in the lib {nucName}.", single=True, ) return {"nG": 0, "nF": 0, "n2n": 0, "nA": 0, "nP": 0} def _getReactionRateDict(self, nucName, lib, xsSuffix, mgFlux, nDens): """ Helper to get the reaction rates of a certain nuclide on one ArmiObject. Parameters ---------- nucName : str nuclide name -- e.g. 'U235', 'PU239', etc. Not to be confused with the nuclide _label_, see the nucDirectory module for a description of the difference. lib : isotxs cross section library xsSuffix : str cross section suffix, consisting of the type followed by the burnup group, e.g. 'AB' for the second burnup group of type A mgFlux : np.ndarray integrated mgFlux (n-cm/s) nDens : float number density (atom/bn-cm) Returns ------- rxnRates - dict dictionary of reaction rates (rxn/s) for nG, nF, n2n, nA and nP Notes ----- Assume there is no n3n cross section in ISOTXS """ nucLabel = self.nuclideBases.byName[nucName].label key = f"{nucLabel}{xsSuffix}" libNuc = lib[key] rxnRates = {"n3n": 0} for rxName, mgXSs in [ ("nG", libNuc.micros.nGamma), ("nF", libNuc.micros.fission), ("n2n", libNuc.micros.n2n), ("nA", libNuc.micros.nalph), ("nP", libNuc.micros.np), ]: rxnRates[rxName] = nDens * sum(mgXSs * mgFlux) return rxnRates def getReactionRates(self, nucName, nDensity=None): """ Get the reaction rates of a certain nuclide on this ArmiObject. Parameters ---------- nucName : str nuclide name -- e.g. 'U235' nDensity : float number Density Returns ------- rxnRates : dict reaction rates (1/s) for nG, nF, n2n, nA and nP Notes ----- This is volume integrated NOT (1/cm3-s). If you set nDensity to 1 this makes 1-group cross section generation easier. """ from armi.reactor.components import Component # find child objects objects = self.getChildren(deep=True, predicate=lambda x: isinstance(x, Component)) if not len(objects): objects = [self] # The reaction rates for this object is the sum of its children rxnRates = {"nG": 0, "nF": 0, "n2n": 0, "nA": 0, "nP": 0, "n3n": 0} for armiObject in objects: for rxName, val in armiObject._getReactionRates(nucName, nDensity).items(): rxnRates[rxName] += val return rxnRates def printContents(self, includeNuclides=True): """Display information about all the comprising children in this object.""" runLog.important(self) for c in self: c.printContents(includeNuclides=includeNuclides) def _genChildByLocationLookupTable(self): """Update the childByLocation lookup table.""" runLog.extra("Generating location-to-child lookup table.") self.childrenByLocator = {} for child in self: self.childrenByLocator[child.spatialLocator] = child def getBoundingCircleOuterDiameter(self, Tc=None, cold=False): """ Get sum circle bound. Used to roughly approximate relative size vs. other objects """ getter = operator.methodcaller("getBoundingCircleOuterDiameter", Tc, cold) return sum(map(getter, self)) def getPuMoles(self): """Returns total number of moles of Pu isotopes.""" nucNames = [nuc.name for nuc in self.nuclideBases.elements.byZ[94].nuclides] puN = np.sum(self.getNuclideNumberDensities(nucNames)) return puN / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * self.getVolume() class StateRetainer: """ Retains state during some operations. This can be used to temporarily cache state, perform an operation, extract some info, and then revert back to the original state. * A state retainer is faster than restoring state from a database as it reduces the number of IO reads; however, it does use more memory. * This can be used on any object within the composite pattern via with ``[rabc].retainState([list], [of], [parameters], [to], [retain]):``. Use on an object up in the hierarchy applies to all objects below as well. * This is intended to work across MPI, so that if you were to broadcast the reactor the state would be correct; however the exact implication on ``parameters`` may be unclear. """ def __init__(self, composite: Composite, paramsToApply=None): """ Create an instance of a StateRetainer. Parameters ---------- composite: Composite composite object to retain state (recursively) paramsToApply: iterable of parameters.Parameter Iterable of parameters.Parameter to retain updated values after `__exit__`. All other parameters are reverted to the original state, i.e. retained at the original value. """ self.composite = composite self.paramsToApply = set(paramsToApply or []) def __enter__(self): self._enterExitHelper(lambda obj: obj.backUp()) return self def __exit__(self, *args): self._enterExitHelper(lambda obj: obj.restoreBackup(self.paramsToApply)) def _enterExitHelper(self, func): """Helper method for ``__enter__`` and ``__exit__``. ``func`` is a lambda to either ``backUp()`` or ``restoreBackup()``. """ paramDefs = set() items = itertools.chain( (self.composite,), self.composite.iterChildrenWithMaterials(deep=True), ) for child in items: if hasattr(child, "p"): # materials don't have Parameters paramDefs.update(child.p.paramDefs) func(child) for paramDef in paramDefs: func(paramDef) def gatherMaterialsByVolume(objects: List[ArmiObject], typeSpec: TypeSpec = None, exact=False): """ Compute the total volume of each material in a set of objects and give samples. Parameters ---------- objects : list of ArmiObject Objects to look within. This argument allows clients to search though some subset of the three (e.g. when you're looking for all CLADDING components within FUEL blocks) typeSpec : TypeSpec Flags for the components to look at exact : bool Whether or not the TypeSpec is exact Notes ----- This helper method is outside the main ArmiObject tree for the special clients that need to filter both by container type (e.g. Block type) with one set of flags, and Components with another set of flags. .. warning:: This is a **composition** related helper method that will likely be filed into classes/modules that deal specifically with the composition of things in the data model. Thus clients that use it from here should expect to need updates soon. """ volumes = {} samples = {} for obj in objects: for c in obj.iterComponents(typeSpec, exact): vol = c.getVolume() matName = c.material.getName() volumes[matName] = volumes.get(matName, 0.0) + vol if matName not in samples: samples[matName] = c.material return volumes, samples def getDominantMaterial(objects: List[ArmiObject], typeSpec: TypeSpec = None, exact=False): """ Return the first sample of the most dominant material (by volume) in a set of objects. Warning ------- This is a **composition** related helper method that will likely be filed into classes/modules that deal specifically with the composition of things in the data model. Thus clients that use it from here should expect to need updates soon. """ volumes, samples = gatherMaterialsByVolume(objects, typeSpec, exact) if volumes: # find matName with max volume maxMatName = list(sorted(volumes.items(), key=lambda item: item[1])).pop()[0] # return this material. Note that if this material has properties like Zr-frac, enrichment, # etc. then this will just return one in the batch, not an average. return samples[maxMatName] return None ================================================ FILE: armi/reactor/converters/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Contains code that can convert reactor models from one geometry to another. Conversions between geometries are often needed in advance of a certain type of physics calculation that cannot be done on the full 3-D detailed geometry. For example, sometimes an analyst wants to convert a reactor from 3-D to R-Z in advance of a very fast running neutronics solution. Converting from one geometry to another while properly conserving mass or some other parameter manually is tedious and error prone. So it's well-suited for automation with ARMI. This subpackage contains code that does a certain subset of conversions along those lines. .. warning:: Geometry conversions are relatively design-specific, so the converters in this subpackage are relatively limited in scope as to what they can convert, largely targeting hexagonal pin-type assemblies. If your geometry is different from this, this code is best considered as examples and starting points, as you will likely need to write your own converters in your own plugin. Of course, if your converter is sufficiently generic, we welcome it here. In other words, some of these converters may at some point migrate to a more design-specific plugin. See Also -------- armi.cases.inputModifiers Modify input files and re-write them. """ ================================================ FILE: armi/reactor/converters/axialExpansionChanger/__init__.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Enable component-wise axial expansion for assemblies and/or a reactor.""" # ruff: noqa: F401 from armi.reactor.converters.axialExpansionChanger.assemblyAxialLinkage import ( AssemblyAxialLinkage, ) from armi.reactor.converters.axialExpansionChanger.axialExpansionChanger import ( AxialExpansionChanger, makeAssemsAbleToSnapToUniformMesh, ) from armi.reactor.converters.axialExpansionChanger.expansionData import ( ExpansionData, getSolidComponents, iterSolidComponents, ) ================================================ FILE: armi/reactor/converters/axialExpansionChanger/assemblyAxialLinkage.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import functools import itertools import typing from textwrap import dedent from armi import runLog from armi.reactor.blocks import Block from armi.reactor.components import Component, UnshapedComponent from armi.reactor.converters.axialExpansionChanger.expansionData import ( iterSolidComponents, ) from armi.reactor.grids import MultiIndexLocation if typing.TYPE_CHECKING: from armi.reactor.assemblies import Assembly def areAxiallyLinked(componentA: Component, componentB: Component) -> bool: """Determine axial component linkage for two components. Parameters ---------- componentA : :py:class:`Component <armi.reactor.components.component.Component>` component of interest componentB : :py:class:`Component <armi.reactor.components.component.Component>` component to compare and see if is linked to componentA Notes ----- If componentA and componentB are both solids and the same type, geometric overlap can be checked via getCircleInnerDiameter and getBoundingCircleOuterDiameter. Four different cases are accounted for. If they do not meet these initial criteria, linkage is assumed to be False. Case #1: Unshaped Components. There is no way to determine overlap so they're assumed to be not linked. Case #2: Blocks with specified grids. If componentA and componentB have identical grid indices (cannot be a partial case, ALL of the indices must be contained by one or the other), then overlap can be checked. Case #3: If Component position is not specified via a grid, the multiplicity is checked. If consistent, they are assumed to be in the same positions and their overlap is checked. Case #4: Components are either not both solids, are not the same type, or Cases 1-3 are not True. Returns ------- linked : bool status is componentA and componentB are axially linked to one another """ ## Cases 4 linked = False if isinstance(componentA, type(componentB)) and ( componentA.containsSolidMaterial() and componentB.containsSolidMaterial() ): if isinstance(componentA, UnshapedComponent): ## Case 1 runLog.warning( f"Components {componentA} and {componentB} are UnshapedComponents " "and do not have 'getCircleInnerDiameter' or getBoundingCircleOuterDiameter methods; " "nor is it physical to do so. Instead of crashing and raising an error, " "they are going to be assumed to not be linked.", single=True, ) elif isinstance(componentA.spatialLocator, MultiIndexLocation) and isinstance( componentB.spatialLocator, MultiIndexLocation ): ## Case 2 fromA = set(tuple(index) for index in componentA.spatialLocator.indices) fromB = set(tuple(index) for index in componentB.spatialLocator.indices) if fromA == fromB: linked = _checkOverlap(componentA, componentB) elif componentA.getDimension("mult") == componentB.getDimension("mult"): ## Case 3 linked = _checkOverlap(componentA, componentB) return linked def _checkOverlap(componentA: Component, componentB: Component) -> bool: """Check two components for geometric overlap by seeing if one can fit within the other. Notes ----- When component dimensions are retrieved, cold=True to ensure that dimensions are evaluated at cold/input temperatures. At temperature, solid-solid interfaces in ARMI may produce slight overlaps due to thermal expansion. Handling these potential overlaps are out of scope. """ idA = componentA.getCircleInnerDiameter(cold=True) odA = componentA.getBoundingCircleOuterDiameter(cold=True) idB = componentB.getCircleInnerDiameter(cold=True) odB = componentB.getBoundingCircleOuterDiameter(cold=True) biggerID = max(idA, idB) smallerOD = min(odA, odB) return biggerID < smallerOD # Make a generic type so we can "template" the axial link class based on what could be above/below a thing Comp = typing.TypeVar("Comp", Block, Component) @dataclasses.dataclass class AxialLink(typing.Generic[Comp]): """Small class for named references to objects above and below a specific object. Axial expansion in ARMI works by identifying what objects occupy the same axial space. For components in blocks, identify which above and below axially align. This is used to determine what, if any, mass needs to be re-assigned across blocks during expansion. For blocks, the linking determines what blocks need to move as a result of a specific block's axial expansion. Attributes ---------- lower : Composite or None Object below, if any. upper : Composite or None Object above, if any. Notes ----- This class is "templated" by the type of composite that could be assigned and fetched. A block-to-block linkage could be type-hinted via ``AxialLink[Block]`` or ``AxialLink[Component]`` for component-to-component link. See Also -------- * :attr:`AxialAssemblyLinkage.linkedBlocks` * :attr:`AxialAssemblyLinkage.linkedComponents` """ lower: typing.Optional[Comp] = dataclasses.field(default=None) upper: typing.Optional[Comp] = dataclasses.field(default=None) class AssemblyAxialLinkage: """Determines and stores the block- and component-wise axial linkage for an assembly. Parameters ---------- assem : armi.reactor.assemblies.Assembly Assembly to be linked Attributes ---------- a : :py:class:`Assembly <armi.reactor.assemblies.Assembly>` reference to original assembly; is directly modified/changed during expansion. linkedBlocks : dict Keys are blocks in the assembly. Their values are :class:`AxialLink` with ``upper`` and ``lower`` attributes for the blocks potentially above and below this block. linkedComponents : dict Keys are solid components in the assembly. Their values are :class:`AxialLink` with ``upper`` and ``lower`` attributes for the solid components potentially above and below this block. """ linkedBlocks: dict[Block, AxialLink[Block]] linkedComponents: dict[Component, AxialLink[Component]] def __init__(self, assem: "Assembly"): self.a = assem self.linkedBlocks = self.getLinkedBlocks(assem) self.linkedComponents = {} self._determineAxialLinkage() @classmethod def getLinkedBlocks( cls, blocks: typing.Sequence[Block], ) -> dict[Block, AxialLink[Block]]: """Produce a mapping showing how blocks are linked. Parameters ---------- blocks : sequence of armi.reactor.blocks.Block Ordered sequence of blocks from bottom to top. Could just as easily be an :class:`armi.reactor.assemblies.Assembly`. Returns ------- dict[Block, AxialLink[Block]] Dictionary where keys are individual blocks and their corresponding values point to blocks above and below. """ nBlocks = len(blocks) if nBlocks: return cls._getLinkedBlocks(blocks, nBlocks) raise ValueError("No blocks passed. Cannot determine links") @staticmethod def _getLinkedBlocks(blocks: typing.Sequence[Block], nBlocks: int) -> dict[Block, AxialLink[Block]]: # Use islice to avoid making intermediate lists of subsequences of blocks lower = itertools.chain((None,), itertools.islice(blocks, 0, nBlocks - 1)) upper = itertools.chain(itertools.islice(blocks, 1, None), (None,)) links = {} for low, mid, high in zip(lower, blocks, upper): links[mid] = AxialLink(lower=low, upper=high) return links def _determineAxialLinkage(self): """Gets the block and component based linkage.""" for b in self.a: for c in iterSolidComponents(b): self._getLinkedComponents(b, c) def _findComponentLinkedTo(self, c: Component, otherBlock: typing.Optional[Block]) -> typing.Optional[Component]: if otherBlock is None: return None candidate = None # Iterate over all solid components in the other block that are linked to this one areLinked = functools.partial(self.areAxiallyLinked, c) for otherComp in filter(areLinked, iterSolidComponents(otherBlock)): if candidate is None: candidate = otherComp else: errMsg = f""" Multiple component axial linkages have been found for the following component! Component {c} -> Block {c.parent} -> Assembly {c.parent.parent} This is indicative of an error in the blueprints! Candidate components in {otherBlock}: {candidate} {otherComp} """ runLog.error(msg=dedent(errMsg)) raise RuntimeError(dedent(errMsg)) return candidate def _getLinkedComponents(self, b: Block, c: Component): """Retrieve the axial linkage for component c. Parameters ---------- b : :py:class:`Block <armi.reactor.blocks.Block>` key to access blocks containing linked components c : :py:class:`Component <armi.reactor.components.component.Component>` component to determine axial linkage for Raises ------ RuntimeError multiple candidate components are found to be axially linked to a component """ linkedBlocks = self.linkedBlocks[b] lowerC = self._findComponentLinkedTo(c, linkedBlocks.lower) upperC = self._findComponentLinkedTo(c, linkedBlocks.upper) lstLinkedC = AxialLink(lowerC, upperC) self.linkedComponents[c] = lstLinkedC if self.linkedBlocks[b].lower is None and lstLinkedC.lower is None: runLog.debug( f"Assembly {self.a}, Block {b}, Component {c} has nothing linked below it!", single=True, ) if self.linkedBlocks[b].upper is None and lstLinkedC.upper is None: runLog.debug( f"Assembly {self.a}, Block {b}, Component {c} has nothing linked above it!", single=True, ) @staticmethod def areAxiallyLinked(componentA: Component, componentB: Component) -> bool: """Check if two components are axially linked. Parameters ---------- componentA : :py:class:`Component <armi.reactor.components.component.Component>` component of interest componentB : :py:class:`Component <armi.reactor.components.component.Component>` component to compare and see if is linked to componentA Returns ------- bool Status of linkage check See Also -------- :func:`areAxiallyLinked` for more details, including the criteria for considering components linked. This method is provided to allow subclasses the ability to override the linkage check. """ return areAxiallyLinked(componentA, componentB) ================================================ FILE: armi/reactor/converters/axialExpansionChanger/axialExpansionChanger.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Enable component-wise axial expansion for assemblies and/or a reactor.""" import typing from textwrap import dedent from numpy import array from armi import runLog from armi.materials.material import Fluid from armi.reactor.assemblies import Assembly from armi.reactor.converters.axialExpansionChanger.assemblyAxialLinkage import ( AssemblyAxialLinkage, ) from armi.reactor.converters.axialExpansionChanger.expansionData import ( ExpansionData, iterSolidComponents, ) from armi.reactor.converters.axialExpansionChanger.redistributeMass import RedistributeMass from armi.reactor.flags import Flags from armi.utils.customExceptions import InputError if typing.TYPE_CHECKING: from armi.reactor.blocks import Block from armi.reactor.components.component import Component def getDefaultReferenceAssem(assems): """Return a default reference assembly.""" # if assemblies are defined in blueprints, handle meshing # assume finest mesh is reference assemsByNumBlocks = sorted( assems, key=lambda a: len(a), reverse=True, ) return assemsByNumBlocks[0] if assemsByNumBlocks else None def makeAssemsAbleToSnapToUniformMesh(assems, nonUniformAssemFlags, referenceAssembly=None): """Make this set of assemblies aware of the reference mesh so they can stay uniform as they axially expand.""" if not referenceAssembly: referenceAssembly = getDefaultReferenceAssem(assems) # make the snap lists so assems know how to expand nonUniformAssems = [Flags.fromStringIgnoreErrors(t) for t in nonUniformAssemFlags] for a in assems: if any(a.hasFlags(f) for f in nonUniformAssems): continue a.makeAxialSnapList(referenceAssembly) class AxialExpansionChanger: """ Axially expand or contract assemblies or an entire core. Attributes ---------- linked: :py:class:`AssemblyAxialLinkage` establishes object containing axial linkage information expansionData: :py:class:`ExpansionData <armi.reactor.converters.axialExpansionChanger.expansionData.ExpansionData>` establishes object to store and access relevant expansion data Notes ----- - Is designed to work with general, vertically oriented, pin-type assembly designs. It is not set up to account for any other assembly type. - Useful for fuel performance, thermal expansion, reactivity coefficients, etc. - The axial expansion changer does not consider the expansion or contraction of fluids and therefore their conservation is not guarunteed. The conservation of fluid mass is expected only if each component type on a block has 1) uniform expansion rates and 2) axially isothermal fluid temperatures. """ linked: typing.Optional[AssemblyAxialLinkage] expansionData: typing.Optional[ExpansionData] topMostBlock: typing.Optional["Block"] # 3cm is a presumptive lower threshold for DIF3D DIF3D_MIN_BLOCK_HEIGHT: float = 3.0 # when checking the diffference between the component and block heights, 1e-12 cm is used as a threshold to account # for meaningful differences. This threshold filters out negligible differences arising from numerical precision # that otherwise have a negliglble impact on the assembly post-axial expansion. Anything larger than this value is # presumed to be valid of a warning that may warrant further investigation. COMP_BLOCK_HEIGHT_DIFF_THRESHOLD: float = 1e-12 # Establish the class used to redistribute mass between components. MASS_REDISTRIBUTOR = RedistributeMass def __init__(self, detailedAxialExpansion: bool = False): """ Build an axial expansion converter. Parameters ---------- detailedAxialExpansion : bool, optional A boolean to indicate whether or not detailedAxialExpansion is to be utilized. """ self._detailedAxialExpansion = detailedAxialExpansion self.linked = None self.expansionData = None self.topMostBlock = None @classmethod def expandColdDimsToHot( cls, assems: list, isDetailedAxialExpansion: bool, referenceAssembly=None, ): """Expand BOL assemblies, resolve disjoint axial mesh (if needed), and update block BOL heights. .. impl:: Perform expansion during core construction based on block heights at a specified temperature. :id: I_ARMI_INP_COLD_HEIGHT :implements: R_ARMI_INP_COLD_HEIGHT This method is designed to be used during core construction to axially thermally expand the assemblies to their "hot" temperatures (as determined by ``Thot`` values in blueprints). First, The Assembly is prepared for axial expansion via ``setAssembly``. In ``applyColdHeightMassIncrease``, the number densities on each Component is adjusted to reflect that Assembly inputs are at cold (i.e., ``Tinput``) temperatures. To expand to the requested hot temperatures, thermal expansion factors are then computed in ``computeThermalExpansionFactors``. Finally, the Assembly is axially thermally expanded in ``axiallyExpandAssembly``. If the setting ``detailedAxialExpansion`` is ``False``, then each Assembly gets its Block mesh set to match that of the "reference" Assembly (see ``getDefaultReferenceAssem`` and ``setBlockMesh``). Once the Assemblies are axially expanded, the Block BOL heights are updated. To account for the change in Block volume from axial expansion, ``completeInitialLoading`` is called to update any volume-dependent Block information. Parameters ---------- assems: list[:py:class:`Assembly <armi.reactor.assemblies.Assembly>`] list of assemblies to be thermally expanded isDetailedAxialExpansion: bool If False, assemblies will be forced to conform to the reference mesh after expansion referenceAssembly: :py:class:`Assembly <armi.reactor.assemblies.Assembly>`, optional Assembly whose mesh other meshes will conform to if isDetailedAxialExpansion is False. If not provided, will assume the finest mesh assembly which is typically fuel. Notes ----- Calling this method will result in an increase in mass via applyColdHeightMassIncrease! See Also -------- :py:meth:`applyColdHeightMassIncrease` """ assems = list(assems) if not referenceAssembly: referenceAssembly = getDefaultReferenceAssem(assems) axialExpChanger = cls(isDetailedAxialExpansion) for a in assems: axialExpChanger.setAssembly(a, expandFromTinputToThot=True) axialExpChanger.applyColdHeightMassIncrease() axialExpChanger.expansionData.computeThermalExpansionFactors() axialExpChanger.axiallyExpandAssembly(recalculateBurnup=False) if not isDetailedAxialExpansion: for a in assems: a.setBlockMesh(referenceAssembly.getAxialMesh()) # update block BOL heights to reflect hot heights for a in assems: for b in a: b.p.heightBOL = b.getHeight() b.completeInitialLoading() axialExpChanger.recalculateBurnup(b) def performPrescribedAxialExpansion(self, a: Assembly, components: list, percents: list, setFuel=True): """Perform axial expansion/contraction of an assembly given prescribed expansion percentages. .. impl:: Perform expansion/contraction, given a list of components and expansion coefficients. :id: I_ARMI_AXIAL_EXP_PRESC :implements: R_ARMI_AXIAL_EXP_PRESC This method performs component-wise axial expansion for an Assembly given expansion coefficients and a corresponding list of Components. In ``setAssembly``, the Assembly is prepared for axial expansion by determining Component-wise axial linkage and checking to see if a dummy Block is in place (necessary for ensuring conservation properties). The provided expansion factors are then assigned to their corresponding Components in ``setExpansionFactors``. Finally, the axial expansion is performed in ``axiallyExpandAssembly`` Parameters ---------- a : :py:class:`Assembly <armi.reactor.assemblies.Assembly>` ARMI assembly to be changed components : list[:py:class:`Component <armi.reactor.components.component.Component>`] list of Components to be expanded percents : list[float] list of expansion percentages for each component listed in components setFuel : boolean, optional Boolean to determine whether or not fuel blocks should have their target components set This is useful when target components within a fuel block need to be determined on-the-fly. Notes ----- - percents may be positive (expansion) or negative (contraction) """ self.setAssembly(a, setFuel) self.expansionData.setExpansionFactors(components, percents) self.axiallyExpandAssembly() def performThermalAxialExpansion( self, a: Assembly, tempGrid: list, tempField: list, setFuel: bool = True, expandFromTinputToThot: bool = False, ): """Perform thermal expansion/contraction for an assembly given an axial temperature grid and field. .. impl:: Perform thermal expansion/contraction, given an axial temperature distribution over an assembly. :id: I_ARMI_AXIAL_EXP_THERM :implements: R_ARMI_AXIAL_EXP_THERM This method performs component-wise thermal expansion for an assembly given a discrete temperature distribution over the axial length of the Assembly. In ``setAssembly``, the Assembly is prepared for axial expansion by determining Component-wise axial linkage and checking to see if a dummy Block is in place (necessary for ensuring conservation properties). The discrete temperature distribution is then leveraged to update Component temperatures and compute thermal expansion factors (via ``updateComponentTempsBy1DTempField`` and ``computeThermalExpansionFactors``, respectively). Finally, the axial expansion is performed in ``axiallyExpandAssembly``. Parameters ---------- a : :py:class:`Assembly <armi.reactor.assemblies.Assembly>` ARMI assembly to be changed tempGrid : float, list Axial temperature grid (in cm) (i.e., physical locations where temp is stored) tempField : float, list Temperature values (in C) along grid setFuel : boolean, optional Boolean to determine whether or not fuel blocks should have their target components set This is useful when target components within a fuel block need to be determined on-the-fly. expandFromTinputToThot: bool determines if thermal expansion factors should be calculated from c.inputTemperatureInC to c.temperatureInC (True) or some other reference temperature and c.temperatureInC (False) """ self.setAssembly(a, setFuel, expandFromTinputToThot) self.expansionData.updateComponentTempsBy1DTempField(tempGrid, tempField) self.expansionData.computeThermalExpansionFactors() self.axiallyExpandAssembly() def reset(self): self.linked = None self.expansionData = None def setAssembly(self, a: Assembly, setFuel=True, expandFromTinputToThot=False): """Set the armi assembly to be changed and init expansion data class for assembly. Parameters ---------- a : :py:class:`Assembly <armi.reactor.assemblies.Assembly>` ARMI assembly to be changed setFuel : boolean, optional Boolean to determine whether or not fuel blocks should have their target components set This is useful when target components within a fuel block need to be determined on-the-fly. expandFromTinputToThot: bool determines if thermal expansion factors should be calculated from c.inputTemperatureInC to c.temperatureInC (True) or some other reference temperature and c.temperatureInC (False) Notes ----- When considering thermal expansion, if there is an axial temperature distribution on the assembly, the axial expansion methodology will NOT perfectly preserve mass. The magnitude of the gradient of the temperature distribution is the primary factor in determining the cumulative loss of mass conservation. """ self.linked = AssemblyAxialLinkage(a) self.expansionData = ExpansionData(a, setFuel=setFuel, expandFromTinputToThot=expandFromTinputToThot) self._checkAssemblyConstructionIsValid() def _checkAssemblyConstructionIsValid(self): self._isTopDummyBlockPresent() self._checkForBlocksWithoutSolids() def _checkForBlocksWithoutSolids(self): """ Makes sure that there aren't any blocks (other than the top-most dummy block) that consist entirely of fluid components. The expansion changer doesn't know what to do with such assemblies. """ # skip top most dummy block since that is, by design, all fluid for b in self.linked.a[:-1]: if all(isinstance(c.material, Fluid) for c in b.iterComponents()): raise InputError( f"Assembly {self.linked.a} is constructed improperly for use with the axial expansion changer " f"as block, {b}, consists of exclusively fluid component(s). If this is not a mistake, consider " "using the 'assemFlagsToSkipAxialExpansion' case setting to bypass performing axial expansion " "on this assembly." ) def applyColdHeightMassIncrease(self): """ Increase component mass because they are declared at cold dims. Notes ----- A cold 1 cm tall component will have more mass that a component with the same mass/length as a component with a hot height of 1 cm. This should be called when the setting `inputHeightsConsideredHot` is used. This adjusts the expansion factor applied during applyMaterialMassFracsToNumberDensities. """ for c in self.linked.a.iterComponents(): axialExpansionFactor = 1.0 + c.material.linearExpansionFactor(c.temperatureInC, c.inputTemperatureInC) c.changeNDensByFactor(axialExpansionFactor) def _isTopDummyBlockPresent(self): """Determines if top most block of assembly is a dummy block. Notes ----- - If true, then axial expansion will be physical for all blocks. - If false, the top most block in the assembly is artificially chopped to preserve the assembly height. A runLog.Warning also issued. """ self.topMostBlock = self.linked.a[-1] if not self.topMostBlock.hasFlags(Flags.DUMMY): runLog.warning( f"No dummy block present at the top of {self.linked.a}! Top most block will be artificially chopped to " "preserve assembly height" ) if self._detailedAxialExpansion: msg = "Cannot run detailedAxialExpansion without a dummy block at the top of the assembly!" runLog.error(msg) raise RuntimeError(msg) def axiallyExpandAssembly(self, recalculateBurnup: bool = True): """Utilizes assembly linkage to do axial expansion. .. impl:: Preserve the total height of an ARMI assembly, during expansion. :id: I_ARMI_ASSEM_HEIGHT_PRES :implements: R_ARMI_ASSEM_HEIGHT_PRES The total height of an Assembly is preserved by not changing the ``ztop`` position of the top-most Block in an Assembly. The ``zbottom`` of the top-most Block is adjusted to match the Block immediately below it. The ``height`` of the top-most Block is is then updated to reflect any expansion/contraction. Parameters ---------- recalculateBurnup Optional parameter to skip the recalculate burnup step. """ mesh = [0.0] runLog.debug( "Printing component expansion information (growth percentage and 'target component') for each block in " f"assembly {self.linked.a}." ) # expand all of the components for b in self.linked.a: for c in iterSolidComponents(b): growFrac = self.expansionData.getExpansionFactor(c) # component ndens and component heights are scaled to their respective growth factor c.changeNDensByFactor(1.0 / growFrac) c.zbottom = b.p.zbottom c.height = growFrac * b.getHeight() c.ztop = c.zbottom + c.height # align blocks on target components for ib, b in enumerate(self.linked.a): if b is not self.topMostBlock: targetComp = self.expansionData.getTargetComponent(b) # redefine block bounds based on target component b.p.zbottom = targetComp.zbottom b.p.ztop = targetComp.ztop b.p.height = b.p.ztop - b.p.zbottom b.clearCache() b.p.z = b.p.zbottom + b.getHeight() / 2.0 cLinkedAbove = self.linked.linkedComponents[targetComp].upper if cLinkedAbove is not None: if self.expansionData.isTargetComponent(cLinkedAbove): # the linked component in the block above is the target component for that block. e.g., fuel to # fuel. Shift this linked target component up (expansion) or down (contraction) without changing # its height. In this case, component mass is conserved for both target components. cLinkedAbove.zbottom = targetComp.ztop cLinkedAbove.ztop = cLinkedAbove.height + cLinkedAbove.zbottom else: # the current target component type continues in the block above, but the target component in # the block above is different. e.g., the transition from stationary duct to control material in # a typical pin-based reactor control assembly design. Shift the target component in the block # above up (expansion) or down (contraction) without changing its height. In this case, # component mass is conserved for both target components. for c in iterSolidComponents(self.linked.linkedBlocks[b].upper): c.zbottom = targetComp.ztop c.ztop = c.height + c.zbottom else: bAbove = self.linked.linkedBlocks[b].upper if bAbove is self.topMostBlock: if not bAbove.hasFlags(Flags.DUMMY): for c in iterSolidComponents(bAbove): c.zbottom = b.p.ztop c.ztop = c.zbottom + c.height else: targetCompAbove = self.expansionData.getTargetComponent(bAbove) # shift the bounds of the target component in the block above to align with the bounds of the # current block. targetCompAbove.zbottom = b.p.ztop targetCompAbove.ztop = targetCompAbove.zbottom + targetCompAbove.height # deal with non-target components for c in filter(lambda c: c is not targetComp, iterSolidComponents(b)): if self.linked.linkedComponents[c].lower is None: # this component is not axially linked to anything below and needs to shift with its # respective parent block. c.zbottom = b.p.zbottom c.ztop = c.zbottom + c.height cAbove = self.linked.linkedComponents[c].upper if cAbove is not None: # align components cAbove.zbottom = c.ztop cAbove.ztop = cAbove.zbottom + cAbove.height # redistribute mass deltaZTop = b.p.ztop - c.ztop self._checkComponentHeight(c) if deltaZTop > 0.0: self.MASS_REDISTRIBUTOR( fromComp=cAbove, toComp=c, assemName=repr(self.linked.a), deltaZTop=deltaZTop ) elif deltaZTop < 0.0: self.MASS_REDISTRIBUTOR( fromComp=c, toComp=cAbove, assemName=repr(self.linked.a), deltaZTop=deltaZTop ) # realign components based on deltaZTop self._shiftLinkedCompsForDelta(c, cAbove, deltaZTop) else: b.p.zbottom = self.linked.linkedBlocks[b].lower.p.ztop b.p.height = b.p.ztop - b.p.zbottom b.p.z = b.p.zbottom + b.getHeight() / 2.0 b.clearCache() # If the self.topMostBlock is a dummy block, the following is meaningless as there are no solid # components. However, if it is not a dummy block, we need to adjust the solid components within it in # order to keep their elevation information consistent with the block. for c in iterSolidComponents(b): c.zbottom = b.p.zbottom c.ztop = b.p.ztop c.height = c.ztop - c.zbottom self._checkBlockHeight(b) self._recomputeBlockMassParams(b) # redo mesh -- functionality based on assembly.calculateZCoords() mesh.append(b.p.ztop) b.spatialLocator = self.linked.a.spatialGrid[0, 0, ib] bounds = list(self.linked.a.spatialGrid._bounds) bounds[2] = array(mesh) self.linked.a.spatialGrid._bounds = tuple(bounds) if recalculateBurnup: for b in self.linked.a.iterBlocks(Flags.FUEL): self.recalculateBurnup(b) def _recomputeBlockMassParams(self, b: "Block"): """ After component initial mass parameters have been adjusted for expansion, recompute block parameters that are derived from children. """ paramsToMove = ( "massHmBOL", "molesHmBOL", ) for paramName in paramsToMove: b.p[paramName] = ( sum(c.p[paramName] for c in b.iterComponents() if c.p[paramName] is not None) / b.getSymmetryFactor() ) def recalculateBurnup(self, b: "Block"): """Post axial-expansion, heavy metal may have moved between blocks; recalculate burnup. Notes ----- Since burnup can be calculated differently, this is meant to be populated in a downstream application subclass. """ pass def _shiftLinkedCompsForDelta(self, c: "Component", cAbove: "Component", deltaZTop: float): # shift the height and ztop of c downwards (-deltaZTop) or upwards (+deltaZTop) c.height += deltaZTop c.ztop += deltaZTop # the height of cAbove grows and zbottom moves downwards (-deltaZTop) or shrinks and moves upward (+deltaZTop) cAbove.height -= deltaZTop cAbove.zbottom += deltaZTop def manageCoreMesh(self, r): """Manage core mesh post assembly-level expansion. Parameters ---------- r : :py:class:`Reactor <armi.reactor.reactors.Reactor>` ARMI reactor to have mesh modified Notes ----- - if no detailedAxialExpansion, then do "cheap" approach to uniformMesh converter. - update average core mesh values with call to r.core.updateAxialMesh() - oldMesh will be None during initial core construction at processLoading as it has not yet been set. """ if not self._detailedAxialExpansion: # loop through again now that the reference is adjusted and adjust the non-fuel assemblies. for a in r.core.getAssemblies(): a.setBlockMesh(r.core.refAssem.getAxialMesh(), conserveMassFlag="auto") oldMesh = r.core.p.axialMesh r.core.updateAxialMesh() if oldMesh: runLog.extra("Updated r.core.p.axialMesh (old, new)") for old, new in zip(oldMesh, r.core.p.axialMesh): runLog.extra(f"{old:.6e}\t{new:.6e}") def _checkComponentHeight(self, c): if c.zbottom > c.ztop: msg = f""" {c} has a negative height. This is unphysical. Assembly: {self.linked.a} Block: {c.parent} Component: {c} Component Height = {c.ztop} - {c.zbottom} = {c.height}. """ raise ArithmeticError(dedent(msg)) def _checkBlockHeight(self, b): """Do some basic block height validation.""" if b.getHeight() < self.DIF3D_MIN_BLOCK_HEIGHT: runLog.debug(f"Block {b.name} ({str(b.p.flags)}) has a height less than 3.0 cm. ({b.getHeight():.12e})") if b.getHeight() < 0.0: raise ArithmeticError(f"Block {b.name} ({str(b.p.flags)}) has a negative height. ({b.getHeight():.12e})") for c in iterSolidComponents(b): if c.height - b.getHeight() > self.COMP_BLOCK_HEIGHT_DIFF_THRESHOLD: diff = c.height - b.getHeight() expectedChange = "increase" if diff < 0.0 else "decrease" if c.hasFlags(Flags.FUEL) or c.hasFlags(Flags.CONTROL): msg = f""" The height of {c} has gone out of sync with its parent block! Assembly: {self.linked.a} Block: {b} Component: {c} Block Height = {b.getHeight()} Component Height = {c.height} The difference in height is {diff} cm. This difference will result in an artificial {expectedChange} in the mass of {c}. This is indicative that there are multiple axial component terminations in {b}. Per the ARMI User Manual, to preserve mass there can only be one axial component termination per block. """ runLog.warning(dedent(msg), label="Component height different.") if self.linked.linkedBlocks[b].lower: lowerBlock = self.linked.linkedBlocks[b].lower if lowerBlock.p.ztop != b.p.zbottom: runLog.warning( "Block heights have gone out of sync!\n" f"\t{lowerBlock.getType()}: {lowerBlock.p.ztop}\n" f"\t{b.getType()}: {b.p.zbottom}", single=True, ) ================================================ FILE: armi/reactor/converters/axialExpansionChanger/expansionData.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data container for axial expansion.""" from statistics import mean from textwrap import dedent from typing import TYPE_CHECKING, Iterable, Optional, Union from armi.materials import material from armi.reactor.flags import Flags TARGET_FLAGS_IN_PREFERRED_ORDER = [ Flags.FUEL, Flags.CONTROL, Flags.POISON, Flags.SHIELD, Flags.SLUG, ] if TYPE_CHECKING: from armi.reactor.assemblies import Assembly from armi.reactor.blocks import Block from armi.reactor.components import Component def iterSolidComponents(b: "Block") -> Iterable["Component"]: """Iterate over all solid components in the block.""" return filter(lambda c: not isinstance(c.material, material.Fluid), b) def getSolidComponents(b: "Block") -> list["Component"]: """ Return list of components in the block that have solid material. Notes ----- Axial expansion only needs to be applied to solid materials. We should not update number densities on fluid materials to account for changes in block height. See Also -------- :func:`iterSolidComponents` produces an iterable rather than a list and may be better suited if you simply want to iterate over solids in a block. """ return list(iterSolidComponents(b)) class ExpansionData: r"""Data container for axial expansion. The primary responsibility of this class is to determine the axial expansion factors for each solid component in the assembly. Expansion factors can be computed from the component temperatures in :meth:`computeThermalExpansionFactors` or provided directly to the class via :meth:`setExpansionFactors`. This class relies on the concept of a "target" expansion component for each block. While components will expand at different rates, the final height of the block must be determined. The target component, determined by :meth:`determineTargetComponents`, will drive the total height of the block post-expansion. Parameters ---------- a: :py:class:`Assembly <armi.reactor.assemblies.Assembly>` Assembly to assign component-wise expansion data to setFuel: bool used to determine if fuel component should be set as axial expansion target component during initialization. see self._isFuelLocked expandFromTinputToThot: bool Determines if thermal expansion factors should be calculated from - ``c.inputTemperatureInC`` to ``c.temperatureInC`` when ``True``, or - some other reference temperature and ``c.temperatureInC`` when ``False`` """ _expansionFactors: dict["Component", float] componentReferenceTemperature: dict["Component", float] def __init__(self, a: "Assembly", setFuel: bool, expandFromTinputToThot: bool): self._a = a self.componentReferenceTemperature = {} self._expansionFactors = {} self._componentDeterminesBlockHeight = {} self._setAllTargetComponents(setFuel) self.expandFromTinputToThot = expandFromTinputToThot def setExpansionFactors(self, components: list["Component"], expFrac: list[float]): """Sets user defined expansion fractions. Parameters ---------- components : List[:py:class:`Component <armi.reactor.components.component.Component>`] list of Components to have their heights changed expFrac : List[float] list of L1/L0 height changes that are to be applied to components Raises ------ RuntimeError If components and expFrac are different lengths """ if len(components) != len(expFrac): raise RuntimeError( "Number of components and expansion fractions must be the same!\n" f" len(components) = {len(components)}\n" f" len(expFrac) = {len(expFrac)}" ) for exp in expFrac: if exp <= 0.0: raise RuntimeError( f"Expansion factor {exp}, L1/L0, is not physical. Expansion fractions should be greater than 0.0." ) for c, p in zip(components, expFrac): self._expansionFactors[c] = p def updateComponentTempsBy1DTempField(self, tempGrid, tempField): """Assign a block-average axial temperature to components. Parameters ---------- tempGrid : numpy array 1D axial temperature grid (i.e., physical locations where temp is stored) tempField : numpy array temperature values along grid Notes ----- - given a 1D axial temperature grid and distribution, searches for temperatures that fall within the bounds of a block, and averages them - this average temperature is then passed to self.updateComponentTemp() Raises ------ ValueError if no temperature points found within a block RuntimeError if tempGrid and tempField are different lengths """ if len(tempGrid) != len(tempField): raise RuntimeError("tempGrid and tempField must have the same length.") self.componentReferenceTemperature = {} # reset, just to be safe for b in self._a: tmpMapping = [] for idz, z in enumerate(tempGrid): if b.p.zbottom <= z <= b.p.ztop: tmpMapping.append(tempField[idz]) if z > b.p.ztop: break if len(tmpMapping) == 0: raise ValueError( f"{b} has no temperature points within it!\n" "Likely need to increase the refinement of the temperature grid." ) blockAveTemp = mean(tmpMapping) for c in b: self.updateComponentTemp(c, blockAveTemp) def updateComponentTemp(self, c: "Component", temp: float): """Update component temperatures with a provided temperature. Parameters ---------- c : :py:class:`Component <armi.reactor.components.component.Component>` component to which the temperature, temp, is to be applied temp : float new component temperature in C Notes ----- - "reference" height and temperature are the current states; i.e. before 1) the new temperature, temp, is applied to the component, and 2) the component is axially expanded """ self.componentReferenceTemperature[c] = c.temperatureInC c.setTemperature(temp) def computeThermalExpansionFactors(self): """Computes expansion factors for all components via thermal expansion.""" for b in self._a: self._setComponentThermalExpansionFactors(b) def _setComponentThermalExpansionFactors(self, b: "Block"): """For each component in the block, set the thermal expansion factors.""" for c in iterSolidComponents(b): self._perComponentThermalExpansionFactors(c) def _perComponentThermalExpansionFactors(self, c: "Component"): """Set the thermal expansion factors for a single component.""" if self.expandFromTinputToThot: # get thermal expansion factor between c.inputTemperatureInC & c.temperatureInC self._expansionFactors[c] = c.getThermalExpansionFactor() elif c in self.componentReferenceTemperature: growFrac = c.getThermalExpansionFactor(T0=self.componentReferenceTemperature[c]) self._expansionFactors[c] = growFrac else: # We want expansion factors relative to componentReferenceTemperature not # Tinput. But for this component there isn't a componentReferenceTemperature, so # we'll assume that the expansion factor is 1.0. self._expansionFactors[c] = 1.0 def getExpansionFactor(self, c: "Component"): """Retrieves expansion factor for c. Parameters ---------- c : :py:class:`Component <armi.reactor.components.component.Component>` Component to retrieve expansion factor for """ value = self._expansionFactors.get(c, 1.0) return value def _setAllTargetComponents(self, setFuel: bool): """Sets axial expansion target component on each block in the expanded assembly. Parameters ---------- setFuel boolean to determine if fuel block should have its target component set. Useful for when target components should be determined on the fly. """ for b in self._a: self.setTargetComponent(b, setFuel) def setTargetComponent(self, b: "Block", setFuel: bool): """Set the axial expansion target component on a specific Block. Parameters ---------- b ARMI Block which is to have its axial expansion target component set. setFuel boolean to determine if fuel block should have its target component set. Useful for when target components should be determined on the fly. """ if b.p.axialExpTargetComponent: target = b.getComponentByName(b.p.axialExpTargetComponent) self._setExpansionTarget(b, target) elif b.hasFlags(Flags.PLENUM) or b.hasFlags(Flags.ACLP): self.determineTargetComponent(b, Flags.CLAD) elif b.hasFlags(Flags.DUMMY): # Dummy blocks are intended to contain only fluid and do not need a target component pass elif setFuel and b.hasFlags(Flags.FUEL): self._isFuelLocked(b) else: self.determineTargetComponent(b) def determineTargetComponent(self, b: "Block", flagOfInterest: Optional[Flags] = None) -> "Component": """Determines the component who's expansion will determine block height. This information is also stored on the block at ``Block.p.axialExpTargetComponent`` for faster retrieval later. Parameters ---------- b : :py:class:`Block <armi.reactor.blocks.Block>` block to specify target component for flagOfInterest : :py:class:`Flags <armi.reactor.flags.Flags>` the flag of interest to identify the target component Returns ------- Component Component identified as target component, if found. Notes ----- - if flagOfInterest is None, finds the component within b that contains flags that are defined in a preferred order of flags, or barring that, in b.p.flags - if flagOfInterest is not None, finds the component that contains the flagOfInterest. Raises ------ RuntimeError no target component found RuntimeError multiple target components found """ if flagOfInterest is None: # Follow expansion of most neutronically important component, fuel then control/poison for targetFlag in TARGET_FLAGS_IN_PREFERRED_ORDER: candidates = b.getChildrenWithFlags(targetFlag) if candidates: break # some blocks/components are not included in the above list but should still be found if not candidates: candidates = [c for c in b.getChildren() if c.p.flags in b.p.flags] else: candidates = b.getChildrenWithFlags(flagOfInterest) if len(candidates) == 0: # if only 1 solid, be smart enought to snag it solidMaterials = getSolidComponents(b) if len(solidMaterials) == 1: candidates = solidMaterials if len(candidates) == 0: raise RuntimeError(f"No target component found!\n Block {b}") if len(candidates) > 1: msg = f""" Cannot have more than one component within a block that has the target flag! Block {b} flagOfInterest {flagOfInterest} Components {candidates} """ raise RuntimeError(dedent(msg)) target = candidates[0] self._setExpansionTarget(b, target) return target def _setExpansionTarget(self, b: "Block", target: "Component"): self._componentDeterminesBlockHeight[target] = True b.p.axialExpTargetComponent = target.name def _isFuelLocked(self, b: "Block"): """Physical/realistic implementation reserved for ARMI plugin. Parameters ---------- b : :py:class:`Block <armi.reactor.blocks.Block>` block to specify target component for Raises ------ RuntimeError multiple fuel components found within b Notes ----- - This serves as an example to check for fuel/clad locking/interaction found in SFRs. - A more realistic/physical implementation is reserved for ARMI plugin(s). """ c = b.getComponent(Flags.FUEL) if c is None: raise RuntimeError(f"No fuel component within {b}!") self._setExpansionTarget(b, c) def isTargetComponent(self, c: Union["Component", None]) -> bool: """Returns bool if c is a target component. Parameters ---------- c : :py:class:`Component <armi.reactor.components.component.Component>` Component to check target component status """ return bool(c in self._componentDeterminesBlockHeight) def getTargetComponent(self, b: "Block"): """Returns the target component for a block. Parameters ---------- b the block to query for the target component """ c = filter(self.isTargetComponent, iterSolidComponents(b)) try: return next(c) except StopIteration: raise RuntimeError(f"No target component found for {b} in {b.parent}!") ================================================ FILE: armi/reactor/converters/axialExpansionChanger/redistributeMass.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import typing from math import isclose from textwrap import dedent from scipy.optimize import brentq from armi import runLog from armi.reactor.flags import Flags from armi.utils import densityTools if typing.TYPE_CHECKING: from armi.reactor.components.component import Component class RedistributeMass: """Given ``deltaZTop``, add mass from ``fromComp`` and give it to ``toComp``. Parameters ---------- fromComp Component which is going to give mass to toComp toComp Component that is recieving mass from fromComp deltaZTop The length, in cm, of fromComp being given to toComp initOnly Optional parameter to only initialize the class and not perform the redistribution. If True, the redistribution can be executed by calling :py:meth:`performRedistribution`. """ def __init__( self, fromComp: "Component", toComp: "Component", deltaZTop: float, assemName: str, initOnly: bool = False ): self.fromComp = fromComp self.toComp = toComp self.assemblyName: str = assemName self.deltaZTop = deltaZTop self.massFrom: float = 0.0 self.massTo: float = 0.0 if not initOnly: self.performRedistribution() def performRedistribution(self): """Perform the mass redistribution between two compatible components.""" if self.compatabilityCheck(): self.setNewToCompNDens() self.setNewToCompTemperature() if self.fromComp.p.molesHmBOL is not None and self.toComp.p.molesHmBOL is not None: self._adjustMassParams() @property def fromCompVolume(self): return self.fromComp.getArea() * abs(self.deltaZTop) @property def toCompVolume(self): return self.toComp.getArea() * self.toComp.height @property def newVolume(self): """Compute and return the new post-redistribution volume of toComp.""" return self.toCompVolume + self.fromCompVolume def compatabilityCheck(self) -> bool: """Ensure fromComp and toComp are the same material. Notes ----- If the linked components are not the same material, we cannot transfer mass between materials because then the resulting material has unknown properties. Returns ------- False if incompatible; true otherwise. """ if type(self.fromComp.material) is not type(self.toComp.material): msg = f""" Cannot redistribute mass between components that are different materials! Trying to redistribute mass between the following components in {self.assemblyName}: from --> {self.fromComp.parent} : {self.fromComp} : {type(self.fromComp.material)} to --> {self.toComp.parent} : {self.toComp} : {type(self.toComp.material)} Instead, mass will be removed from ({self.fromComp} | {type(self.fromComp.material)}) and ({self.toComp} | {type(self.toComp.material)} will be artificially expanded. The consequence is that mass conservation is no longer guaranteed for the {self.toComp.getType()} component type on this assembly! """ runLog.warning(dedent(msg), label="Cannot redistribute mass between different materials.", single=True) return False return True def setNewToCompNDens(self): """Calculate the post-redistribution number densities for toComp and determine how much mass is in play for fromComp and toComp. Notes ----- Only the mass of ``toComp`` is changed in this method. The mass of ``fromComp`` is changed separately by changing the height of ``fromComp`` -- the number densities of ``fromComp`` are not modified. When redistributing mass, if ``fromComp`` and ``toComp`` are different temperatures, the temperature of ``toComp`` will change. See :py:meth:`setNewToCompTemperature`. """ # calculate the mass of each nuclide and then the ndens for the new mass newNDens: dict[str, float] = {} nucs = self._getAllNucs(self.toComp.getNuclides(), self.fromComp.getNuclides()) for nuc in nucs: massByNucFrom = densityTools.getMassInGrams(nuc, self.fromCompVolume, self.fromComp.getNumberDensity(nuc)) massByNucTo = densityTools.getMassInGrams(nuc, self.toCompVolume, self.toComp.getNumberDensity(nuc)) newNDens[nuc] = densityTools.calculateNumberDensity(nuc, massByNucFrom + massByNucTo, self.newVolume) self.massFrom += massByNucFrom self.massTo += massByNucTo # Set newNDens on toComp self.toComp.setNumberDensities(newNDens) def setNewToCompTemperature(self): r"""Calculate and set the post-redistribution temperature of toComp. Notes ----- Calculating this new temperature is non trivial due to thermal expansion. The following defines what the area of ``toComp`` is post-redistribution, .. math:: A_1(\hat{T}) \left( H_1 + \delta \right) &= A_1(T_1) H_1 + A_2(T_2)\delta,\\ A_1(\hat{T}) &= \frac{A_1(T_1) H_1 + A_2(T_2)\delta}{H_1 + \delta}. Where, :math:`A_1, T_1, H_1`, are the area, temperature, and height of ``toComp``, :math:`A_2, T_2`, are the area and temparature of ``fromComp``, :math:`\delta` is the parameter ``deltaZTop``, and :math:`\hat{T}` is the new temperature of ``toComp`` post-redistribution. :func:`scipy.optimize.brentq` is used to find the root of the above equation, indicating the value for :math:`\hat{T}` that finds the desired area, post-redistribution of mass. """ if isclose(self.fromComp.temperatureInC, self.toComp.temperatureInC, rel_tol=1e-09): # per isclose documentation, rel_tol of 1e-09 is roughly equivaluent to ensuring the temps are # the same to roughly 9 digits. newToCompTemp = self.toComp.temperatureInC else: targetArea = self.newVolume / (self.toComp.height + abs(self.deltaZTop)) try: newToCompTemp = brentq( f=lambda T: self.toComp.getArea(Tc=T) - targetArea, a=self.fromComp.temperatureInC, b=self.toComp.temperatureInC, ) except ValueError: totalMass = self.massFrom + self.massTo newToCompTemp = ( self.massFrom / totalMass * self.fromComp.temperatureInC + self.massTo / totalMass * self.toComp.temperatureInC ) if (self.toComp.hasFlags(Flags.FUEL) or self.toComp.hasFlags(Flags.CONTROL)) or ( self.fromComp.hasFlags(Flags.FUEL) or self.fromComp.hasFlags(Flags.CONTROL) ): msg = f""" Temperature search algorithm in axial expansion has failed in {self.assemblyName} Trying to search for new temp between from --> {self.fromComp.parent} : {self.fromComp} : {type(self.fromComp.material)} at {self.fromComp.temperatureInC} C to --> {self.toComp.parent} : {self.toComp} : {type(self.toComp.material)} at {self.toComp.temperatureInC} C f({self.fromComp.temperatureInC}) = {self.toComp.getArea(Tc=self.fromComp.temperatureInC) - targetArea} f({self.toComp.temperatureInC}) = {self.toComp.getArea(Tc=self.toComp.temperatureInC) - targetArea} Instead, a mass weighted average temperature of {newToCompTemp} will be used. The consequence is that mass conservation is no longer guaranteed for this component type on this assembly! """ # noqa: E501 runLog.warning(dedent(msg), label="Temp Search Failure") except Exception as ee: raise ee # Do not use component.setTemperature as this mucks with the number densities we just calculated. self.toComp.temperatureInC = newToCompTemp self.toComp.clearCache() @staticmethod def _sortKey(item): """Break isotope string down by element, atomic weight, and metastable state for sorting. Raises a RuntimeError if the string does not match the expected pattern. """ pattern = re.compile( r""" ([a-zA-Z]{1,2}) # Element (\d{1,3})? # atomic weight (optional, e.g., "C") ([a-zA-Z])? # metastable state (optional, e.g., Am242M or Am242) """, re.VERBOSE, ) match = re.search(pattern, item) if match: # Convert numeric parts to int for correct numerical sorting element = match.group(1) atomicWeight = int(match.group(2)) if match.group(2) else 0 metastable = 1 if match.group(3) else 0 return (atomicWeight, element, metastable) raise RuntimeError(f"Unknown isotope! - {item}") def _getAllNucs(self, nucsA: list[str], nucsB: list[str]) -> list[str]: """Return a list that contains all of the nuclides in nucsA and nucsB. Notes ----- The returned list is sorted by :py:meth:`sortKey`. Isotopes are sorted based on 1) atomic weight, 2) element, and 3) metastable state. """ nucsToAdd = set(nucsA).union(set(nucsB)) return sorted(nucsToAdd, key=self._sortKey) def _adjustMassParams(self): """Adjust massHmBOL and molesHmBOL on fromComp and toComp.""" paramsToMove = ( "massHmBOL", "molesHmBOL", ) removalFrac = abs(self.deltaZTop) / self.fromComp.height for paramName in paramsToMove: if self.fromComp.p[paramName] is not None: amountMoved = removalFrac * self.fromComp.p[paramName] self.toComp.p[paramName] = self.toComp.p[paramName] + amountMoved self.fromComp.p[paramName] = self.fromComp.p[paramName] - amountMoved ================================================ FILE: armi/reactor/converters/blockConverters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert block geometry from one to another, etc.""" import copy import math from armi import runLog from armi.reactor import blocks, components, grids from armi.reactor.flags import Flags from armi.utils.plotting import plotConvertedBlock SIN60 = math.sin(math.radians(60.0)) class BlockConverter: """Converts a block.""" def __init__(self, sourceBlock): """ Parameters ---------- sourceBlock : :py:class:`armi.reactor.blocks.Block` An ARMI Block object to convert. quite : boolean, optional If True, less information is output in the runLog. """ self._sourceBlock = sourceBlock self.convertedBlock = None # the new block that is created. def dissolveComponentIntoComponent(self, soluteName, solventName, minID=0.0): """ Make a new block that homogenized one component into another while conserving number of atoms. Parameters ---------- soluteName : str The name of the solute component in _sourceBlock solventName : str The name of the solvent component in _sourceBlock minID : float The minimum hot temperature diameter allowed for the solvent. This is useful for forcing components to not overlap. Warning ------- Nuclides merged into another component will be the temperature of the new component as temperature is stored on the component level. In the solute and solvent are the same temperature this is not an issue. Converted blocks that have dissolved components should avoid having their temperatures changed. This is because the component being merged into retains its old thermal expansion properties and may not be consistent with how the components would behave independently. For this reason it is recommended that these blocks be made right before the physics calculation of interest and be immediately discarded. Attaching them to the reactor is not recommended. """ runLog.extra( "Homogenizing the {} component into the {} component in block {}".format( soluteName, solventName, self._sourceBlock.getType() ), single=True, ) # break up dimension links since we will be messing with this block's components newBlock = copy.deepcopy(self._sourceBlock) # cannot pass components directly since the new block will have new components solute = newBlock.getComponentByName(soluteName) solvent = newBlock.getComponentByName(solventName) self._checkInputs(soluteName, solventName, solute, solvent) soluteLinks = solute.getLinkedComponents() # the area about to be added by the dimension change can be different than the simple area of the # merged component due to void gaps between components oldArea = solvent.getArea() runLog.debug("removing {}".format(solute)) # skip recomputation of area fractions because the blocks still have 0 height at this stage and derived # shape volume computations will fail soluteArea = solute.getArea() solute.mergeNuclidesInto(solvent) newBlock.remove(solute, recomputeAreaFractions=False) self._sourceBlock = newBlock # adjust new shape area. if solvent.__class__ is components.DerivedShape: pass # If it's coolant, the auto-fill area system gets it. coolant has no links else: soluteID, soluteOD = ( solute.getDimension("id", cold=False), solute.getDimension("od", cold=False), ) if soluteArea >= 0.0: if solvent.getDimension("id", cold=False) > soluteID: runLog.debug(f"Decreasing ID of {solvent} to accommodate {solute}.") solvent.setDimension("id", soluteID, cold=False) if solvent.getDimension("od", cold=False) < soluteOD: runLog.debug(f"Increasing OD of {solvent} to accommodate {solute}.") solvent.setDimension("od", soluteOD, cold=False) if solvent.getDimension("id", cold=False) < minID: runLog.debug(f"Updating the ID of {solvent} the the specified min ID: {minID}.") solvent.setDimension("id", minID, cold=False) else: # can only merge a negative-area component if one of the dimensions is linked matchedDimension = False if solvent.getDimension("id", cold=False) == soluteOD: runLog.debug(f"Increasing ID of {solvent} to accommodate {solute}.") solvent.setDimension("id", soluteID, cold=False) matchedDimension = True if solvent.getDimension("od", cold=False) == soluteID: runLog.debug(f"Decreasing OD of {solvent} to accommodate {solute}.") solvent.setDimension("od", soluteOD, cold=False) matchedDimension = True if not matchedDimension: errorMsg = ( "Cannot merge negative-area component {solute} into {solvent} without the two being linked." ) runLog.error(errorMsg) raise ValueError(errorMsg) if soluteLinks: self.restablishLinks(solute, solvent, soluteLinks) self._verifyExpansion(solute, solvent) solvent.changeNDensByFactor(oldArea / solvent.getArea()) def _checkInputs(self, soluteName, solventName, solute, solvent): if solute is None or solvent is None: raise ValueError( "Block {} must have a {} component and a {} component to homogenize.".format( self._sourceBlock, soluteName, solventName ) ) if not ( isinstance(solvent, components.DerivedShape) or all(isinstance(c, components.Circle) for c in (solute, solvent)) ): raise ValueError( "Components are not of compatible shape to be merged solute: {}, solvent: {}".format(solute, solvent) ) if solute.getArea() < 0: # allow negative-area gap if not solute.hasFlags(Flags.GAP): raise ValueError( "Cannot merge solute with negative area into a solvent. {} area: {}".format( solute, solute.getArea() ) ) if solvent.getArea() <= 0: raise ValueError( "Cannot merge into a solvent with negative or 0 area. {} area: {}".format(solvent, solvent.getArea()) ) def restablishLinks(self, solute, solvent, soluteLinks): runLog.extra( "Solute is linked to component(s) {} and these links will be reestablished.".format(soluteLinks), single=True, ) for linkedC in soluteLinks: if linkedC in solvent.getLinkedComponents(): if not linkedC.containsVoidMaterial(): raise ValueError( "Non-Void component {} was linked to solute and solvent {} in converted block {}. " "Please dissolve this separately.".format(linkedC, solvent, self._sourceBlock) ) runLog.extra( "Removing void component {} in converted block {}.".format(linkedC, self._sourceBlock.getType()), single=True, ) self._sourceBlock.remove(linkedC) else: dims = linkedC.getDimensionNamesLinkedTo(solute) runLog.extra( "Linking component {} in converted block {} to solvent {}.".format( linkedC, self._sourceBlock.getType(), solvent ), single=True, ) for dimToChange, dimOfOther in dims: linkedC.setLink(dimToChange, solvent, dimOfOther) def _verifyExpansion(self, solute, solvent): validComponents = (c for c in self._sourceBlock if not isinstance(c, components.DerivedShape)) for c in sorted(validComponents): if not isinstance(c, components.Circle) or c is solvent or c.containsVoidMaterial(): continue if c.isEncapsulatedBy(solvent): raise ValueError( "There is a non void component {} in the location where component {} was expanded " "to absorb component solute {}. solvent dims {}, {} comp dims {} {}.".format( c, solvent, solute, solvent.p.id, solvent.p.od, c.p.id, c.p.od ) ) if c.getArea() < 0.0: runLog.warning( "Component {} still has negative area after {} was dissolved into {}".format(c, solute, solvent), single=True, ) def convert(self): raise NotImplementedError class ComponentMerger(BlockConverter): """For a provided block, merged the solute component into the solvent component. .. impl:: Homogenize one component into another. :id: I_ARMI_BLOCKCONV0 :implements: R_ARMI_BLOCKCONV This subclass of ``BlockConverter`` is meant as a one-time-use tool, to convert a ``Block`` into one ``Component``. A ``Block`` is a ``Composite`` that may probably has multiple ``Components`` somewhere in it. This means averaging the material properties in the original ``Block``, and ensuring that the final ``Component`` has the same shape and volume as the original ``Block``. This subclass essentially just uses the base class method ``dissolveComponentIntoComponent()`` given prescribed solute and solvent materials, to define the merger. Notes ----- It is the job of the developer to determine if merging a Block into one Component will yield valid or sane results. """ def __init__(self, sourceBlock, soluteName, solventName): """ Parameters ---------- sourceBlock : :py:class:`armi.reactor.blocks.Block` An ARMI Block object to convert. soluteName : str The name of the solute component in _sourceBlock solventName : str The name of the solvent component in _sourceBlock quite : boolean, optional If True, less information is output in the runLog. """ BlockConverter.__init__(self, sourceBlock) self.soluteName = soluteName self.solventName = solventName def convert(self): """Return a block with the solute merged into the solvent.""" self.dissolveComponentIntoComponent(self.soluteName, self.solventName) return self._sourceBlock class MultipleComponentMerger(BlockConverter): """ Dissolves multiple components and checks validity at end. Doesn't run _verifyExpansion until the end so that the order the components are dissolved in does not cause a failure. For example if two liners are dissolved into the clad and the farthest liner was dissolved first, this would normally cause a ValueError in _verifyExpansion since the clad would be completely expanded over a non void component. .. impl:: Homogenize multiple components into one. :id: I_ARMI_BLOCKCONV1 :implements: R_ARMI_BLOCKCONV This subclass of ``BlockConverter`` is meant as a one-time-use tool, to convert a multiple ``Components`` into one. This means averaging the material properties in the original ``Components``, and ensuring that the final ``Component`` has the same shape and volume as all of the originals. This subclass essentially just uses the base class method ``dissolveComponentIntoComponent()`` given prescribed solute and solvent materials, to define the merger. Though care is taken here to ensure the merger isn't verified until it is completely finished. """ def __init__(self, sourceBlock, soluteNames, solventName, specifiedMinID=0.0): """Standard constructor method. Parameters ---------- sourceBlock : :py:class:`armi.reactor.blocks.Block` An ARMI Block object to convert. soluteNames : list List of str names of the solute components in _sourceBlock solventName : str The name of the solvent component in _sourceBlock minID : float The minimum hot temperature diameter allowed for the solvent. This is useful for forcing components to not overlap. quite : boolean, optional If True, less information is output in the runLog. """ BlockConverter.__init__(self, sourceBlock) self.soluteNames = soluteNames self.solventName = solventName self.specifiedMinID = specifiedMinID def _verifyExpansion(self, solute, solvent): """Wait until all components are dissolved to check this.""" pass def convert(self): """Return a block with the solute merged into the solvent.""" for soluteName in self.soluteNames: self.dissolveComponentIntoComponent(soluteName, self.solventName, minID=self.specifiedMinID) solvent = self._sourceBlock.getComponentByName(self.solventName) if solvent.__class__ is not components.DerivedShape: BlockConverter._verifyExpansion(self, self.soluteNames, solvent) return self._sourceBlock class MixedPinComponentMerger(MultipleComponentMerger): def __init__(self, sourceBlock, soluteNames, solventName, pin, specifiedMinID=0.0): """ This BlockConverter handles mixed blocks with multiple pin types. A pin is a list of circular components that share a common spatial locator and thus make up a "pin", which is a physical construct but not a formal ARMI construct. This class can merge multiple components at a time within a single pin. To perform conversions on multiple pins within a mixed block, a new instance of this class must be constructed for each pin, and then the :py:meth:`convert` method must be called in a waterfall fashion -- that is, the block returned from :py:meth:`convert` should be passed into the constructor of the next instance to perform a chain of component merges. .. impl:: Homogenize multiple components into one in a single pin within a mixed pin assembly. :id: I_ARMI_BLOCKCONV2 :implements: R_ARMI_BLOCKCONV Parameters ---------- sourceBlock : :py:class:`armi.reactor.blocks.Block` An ARMI Block object to convert. soluteNames : list List of str names of the solute components in _sourceBlock solventName : str The name of the solvent component in _sourceBlock pin : List[Component] List of the components that make up the pin being converted. minID : float The minimum hot temperature diameter allowed for the solvent. This is useful for forcing components to not overlap. quite : boolean, optional If True, less information is output in the runLog. """ super().__init__(sourceBlock, soluteNames, solventName, specifiedMinID=specifiedMinID) self.pin = pin def convert(self): """ Return a block with the solute merged into the solvent. Run _verifyPinExpansion so that verification is limited to a single pin. """ for soluteName in self.soluteNames: self.dissolveComponentIntoComponent(soluteName, self.solventName, minID=self.specifiedMinID) solvent = self._sourceBlock.getComponentByName(self.solventName) if solvent.__class__ is not components.DerivedShape: self._verifyPinExpansion(self.soluteNames, solvent) return self._sourceBlock def _verifyPinExpansion(self, solute, solvent): """Verify the conversion of a single pin construct.""" validComponents = (c for c in self.pin if not isinstance(c, components.DerivedShape)) for c in sorted(validComponents): if c not in self._sourceBlock: # c was merged continue if not isinstance(c, components.Circle) or c is solvent or c.containsVoidMaterial(): continue if c.isEncapsulatedBy(solvent): raise ValueError( "There is a non void component {} in the location where component {} was expanded " "to absorb component solute {}. solvent dims {}, {} comp dims {} {}.".format( c, solvent, solute, solvent.p.id, solvent.p.od, c.p.id, c.p.od ) ) if c.getArea() < 0.0: runLog.warning( "Component {} still has negative area after {} was dissolved into {}".format(c, solute, solvent), single=True, ) class BlockAvgToCylConverter(BlockConverter): """ Convert a block and driver block into a block made of a concentric circles using block (homogenized) composition. Notes ----- This converter is intended for use in building 1-dimensional models of a set of block. numInternalRings controls the number of rings to use for the source block, while the numExternalRings controls the number of rings for the driver fuel block. The number of blocks to in each ring grows by 6 for each ring in hex geometry and 8 for each ring in Cartesian. This converter is opinionated in that it uses a spatial grid to determine how many blocks to add based on the type of the ``sourceBlock``. For example, if the ``sourceBlock`` is a HexBlock then a HexGrid will be used. If the ``sourceBlock`` is a CartesianBlock then a CartesianGrid without an offset will be used. See Also -------- HexComponentsToCylConverter: This converter is more useful if the pin lattice is in a hex lattice. """ def __init__( self, sourceBlock, driverFuelBlock=None, numInternalRings=1, numExternalRings=None, ): BlockConverter.__init__(self, sourceBlock) self._driverFuelBlock = driverFuelBlock self._numExternalRings = numExternalRings self.convertedBlock = blocks.ThRZBlock(name=sourceBlock.name + "-cyl", height=sourceBlock.getHeight()) self.convertedBlock.setLumpedFissionProducts(sourceBlock.getLumpedFissionProductCollection()) self._numInternalRings = numInternalRings def convert(self): """Return a block converted into cylindrical geometry, possibly with other block types surrounding it.""" self._addBlockRings(self._sourceBlock, self._sourceBlock.getType(), self._numInternalRings, 1) self._addDriverFuelRings() return self.convertedBlock def _addBlockRings(self, blockToAdd, blockName, numRingsToAdd, firstRing, mainComponent=None): """Add a homogeneous block ring to the converted block.""" runLog.info("Converting representative block {} to its equivalent cylindrical model".format(self._sourceBlock)) innerDiam = self.convertedBlock[-1].getDimension("od") if len(self.convertedBlock) else 0.0 if mainComponent is not None: newCompProps = mainComponent.material tempInput = tempHot = mainComponent.temperatureInC else: # no component specified so just use block vals newCompProps = "Custom" # this component shouldn't change temperature anyway tempInput = tempHot = blockToAdd.getAverageTempInC() if isinstance(blockToAdd, blocks.HexBlock): grid = grids.HexGrid.fromPitch(1.0) elif isinstance(blockToAdd, blocks.CartesianBlock): grid = grids.CartesianGrid.fromRectangle(1.0, 1.0) else: raise ValueError(f"The `sourceBlock` of type {type(blockToAdd)} is not supported in {self}.") for ringNum in range(firstRing, firstRing + numRingsToAdd): numFuelBlocksInRing = grid.getPositionsInRing(ringNum) assert numFuelBlocksInRing is not None fuelBlockTotalArea = numFuelBlocksInRing * blockToAdd.getArea() driverOuterDiam = getOuterDiamFromIDAndArea(innerDiam, fuelBlockTotalArea) driverRing = components.Circle( blockName, newCompProps, tempInput, tempHot, od=driverOuterDiam, id=innerDiam, mult=1, ) driverRing.setNumberDensities(blockToAdd.getNumberDensities()) # no flag set here since its block level, and its a block, not component... self.convertedBlock.add(driverRing) innerDiam = driverOuterDiam def _addDriverFuelRings(self): """ Add driver fuel blocks as the outer-most surrounding ring. Notes ----- This is intended to be used to drive non-fuel compositions, DU, etc. """ if self._driverFuelBlock is None: return if not self._driverFuelBlock.isFuel(): raise ValueError("Driver block {} must be fuel".format(self._driverFuelBlock)) if self._numExternalRings < 0: raise ValueError( "Number of fuel rings is set to {}, but must be a positive integer.".format(self._numExternalRings) ) blockName = self._driverFuelBlock.getType() + " driver" fuel = self._driverFuelBlock.getChildrenWithFlags(Flags.FUEL)[0] # used for mat properties and temperature self._addBlockRings( self._driverFuelBlock, blockName, self._numExternalRings, self._numInternalRings + 1, mainComponent=fuel, ) def plotConvertedBlock(self, fName=None): """A pass-through to preserve the API. Render an image of the converted block.""" return plotConvertedBlock(self._sourceBlock, self.convertedBlock, fName) class HexComponentsToCylConverter(BlockAvgToCylConverter): """ Converts a hexagon full of pins into a circle full of concentric circles. Notes ----- This is intended to capture heterogeneous effects while generating cross sections in MCC3. The resulting 1D cylindrical block will not be used in subsequent core calculations. Repeated pins/coolant rings will be built, followed by the non-pins like duct/intercoolant pinComponentsRing1 | coolant | pinComponentsRing2 | coolant | ... | nonpins ... The ``ductHeterogeneous`` option allows the user to treat everything inside the duct as a single homogenized composition. This could significantly reduce the memory and runtime required for the lattice physics solver, and also provide an alternative approximation for the spatial self-shielding effect on microscopic cross sections. This converter expects the ``sourceBlock`` and ``driverFuelBlock`` to defined and for the ``sourceBlock`` to have a spatial grid defined. Additionally, both the ``sourceBlock`` and ``driverFuelBlock`` must be instances of HexBlocks. """ PIN_COMPONENT_FLAGS = ( Flags.FUEL, Flags.ANNULAR | Flags.VOID, Flags.GAP, Flags.BOND, Flags.LINER, Flags.CLAD, Flags.WIRE, Flags.CONTROL, Flags.REFLECTOR, Flags.SHIELD, Flags.SLUG, Flags.PIN, Flags.POISON, ) def __init__( self, sourceBlock, driverFuelBlock=None, numExternalRings=None, mergeIntoClad=None, mergeIntoFuel=None, ductHeterogeneous=False, ): BlockAvgToCylConverter.__init__( self, sourceBlock, driverFuelBlock=driverFuelBlock, numExternalRings=numExternalRings, ) if not isinstance(sourceBlock, blocks.HexBlock): raise TypeError( "Block {} is not hexagonal and cannot be converted to an equivalent cylinder".format(sourceBlock) ) if sourceBlock.spatialGrid is None: raise ValueError( f"{sourceBlock} has no spatial grid attribute, therefore " f"the block conversion with {self.__class__.__name__} cannot proceed." ) if driverFuelBlock is not None: if not isinstance(driverFuelBlock, blocks.HexBlock): raise TypeError( "Block {} is not hexagonal and cannot be converted to an equivalent cylinder".format( driverFuelBlock ) ) self.pinPitch = sourceBlock.getPinPitch() self.mergeIntoClad = mergeIntoClad or [] self.mergeIntoFuel = mergeIntoFuel or [] self.ductHeterogeneous = ductHeterogeneous self.interRingComponent = sourceBlock.getComponent(Flags.COOLANT, exact=True) if not self.interRingComponent: raise ValueError(f"Block {sourceBlock} cannot be converted to rings without a `coolant` component") self._remainingCoolantFillArea = self.interRingComponent.getArea() def convert(self): """Perform the conversion. .. impl:: Convert hex blocks to cylindrical blocks. :id: I_ARMI_BLOCKCONV_HEX_TO_CYL :implements: R_ARMI_BLOCKCONV_HEX_TO_CYL This method converts a ``HexBlock`` to a cylindrical ``Block``. Obviously, this is not a physically meaningful transition; it is a helpful approximation tool for analysts. This is a subclass of ``BlockAvgToCylConverter`` which is a subclass of ``BlockConverter``. This converter expects the ``sourceBlock`` and ``driverFuelBlock`` to defined and for the ``sourceBlock`` to have a spatial grid defined. Additionally, both the ``sourceBlock`` and ``driverFuelBlock`` must be instances of ``HexBlocks``. """ runLog.info("Converting representative block {} to its equivalent cylindrical model".format(self._sourceBlock)) self._dissolveComponents() numRings = self._sourceBlock.spatialGrid.getMinimumRings(self._sourceBlock.getNumPins()) pinComponents, nonPins = self._classifyComponents() if self.ductHeterogeneous: self._buildInsideDuct() else: self._buildFirstRing(pinComponents) for ring in range(2, numRings + 1): self._buildNthRing(pinComponents, ring) self._buildNonPinRings(nonPins) self._addDriverFuelRings() for comp in self.convertedBlock.getComponents(): assert comp.getArea() >= 0.0, ( f"{comp} in {self.convertedBlock} has a negative area of {comp.getArea()}. " "Negative areas are not supported." ) return self.convertedBlock def _dissolveComponents(self): # always merge wire into coolant. self.dissolveComponentIntoComponent("wire", "coolant") # update coolant area to fill in wire area that was left behind. self.interRingComponent = self._sourceBlock.getComponent(Flags.COOLANT, exact=True) self._remainingCoolantFillArea = self.interRingComponent.getArea() # do user-input merges into cladding for componentName in self.mergeIntoClad: self.dissolveComponentIntoComponent(componentName, "clad") # do user-input merges into fuel for componentName in self.mergeIntoFuel: self.dissolveComponentIntoComponent(componentName, "fuel") def _classifyComponents(self): """ Figure out which components are in each pin ring and which are not. Notes ----- Assumption is that anything with multiplicity equal to numPins is a pin (clad, wire, bond, etc.) Non-pins will include things like coolant, duct, interduct, etc. This skips components that have a negative area, which can exist if a user implements a linked component containing void or non-solid materials (e.g., gaps) """ pinComponents, nonPins = [], [] for c in self._sourceBlock: # If the area of the component is negative than this component should be skipped # altogether. If not skipped, the conversion process still works, but this would # result in one or more rings having an outer diameter than is smaller than the # inner diameter. if c.getArea() < 0.0: continue if any(c.hasFlags(f) for f in self.PIN_COMPONENT_FLAGS): pinComponents.append(c) elif c.name != "coolant": # coolant is addressed in self.interRingComponent nonPins.append(c) return list(sorted(pinComponents)), nonPins def _buildInsideDuct(self): """Build a homogenized material of the components inside the duct.""" blockType = self._sourceBlock.getType() blockName = f"Homogenized {blockType}" newBlock, mixtureFlags = stripComponents(self._sourceBlock, Flags.DUCT) outerDiam = getOuterDiamFromIDAndArea(0.0, newBlock.getArea()) circle = components.Circle( blockName, "_Mixture", newBlock.getAverageTempInC(), newBlock.getAverageTempInC(), id=0.0, od=outerDiam, mult=1, ) circle.setNumberDensities(newBlock.getNumberDensities()) circle.p.flags = mixtureFlags self.convertedBlock.add(circle) def _buildFirstRing(self, pinComponents): """Add first ring of components to new block.""" for oldC in pinComponents: c = copy.deepcopy(oldC) c.setName(c.name + " 1") c.setDimension("mult", 1.0) # first ring will have dims of 1 pin c.p.flags = oldC.p.flags self.convertedBlock.add(c) def _buildNthRing(self, pinComponents, ringNum): """ Build nth ring of pins and add them to block. Each n-th ring is preceded with a circle of coolant between the previous ring and this one. Since we blended the wire and coolant, the area of this area is supposed to include the wire area. This will be a fuel (or control) meat surrounded on both sides by clad, bond, liner, etc. layers. """ numPinsInRing = self._sourceBlock.spatialGrid.getPositionsInRing(ringNum) pinRadii = [c.getDimension("od") / 2.0 for c in pinComponents] bigRingRadii = radiiFromRingOfRods(self.pinPitch * (ringNum - 1), numPinsInRing, pinRadii) nameSuffix = " {}".format(ringNum) coolantOD = bigRingRadii[0] * 2.0 self._addCoolantRing(coolantOD, nameSuffix) innerDiameter = coolantOD compsToTransformIntoRings = pinComponents[::-1] + pinComponents[1:] for i, (bcs, bigRingRadius) in enumerate(zip(compsToTransformIntoRings, bigRingRadii[1:])): outerDiameter = bigRingRadius * 2.0 name = bcs.name + nameSuffix + str(i) bigComponent = self._addSolidMaterialRing(bcs, innerDiameter, outerDiameter, name) self.convertedBlock.add(bigComponent) innerDiameter = outerDiameter def _buildNonPinRings(self, nonPins): """ Throw each non-pin component on as an individual outer circle. Also needs to add final coolant layer between the outer pins and the non-pins. Will crash if there are things that are not circles or hexes. """ if not self.ductHeterogeneous: # fill in the last ring of coolant using the rest coolInnerDiam = self.convertedBlock[-1].getDimension("od") coolantOD = getOuterDiamFromIDAndArea(coolInnerDiam, self._remainingCoolantFillArea) self._addCoolantRing(coolantOD, " outer") innerDiameter = coolantOD else: innerDiameter = self.convertedBlock[-1].getDimension("od") for i, hexagon in enumerate(sorted(nonPins)): outerDiam = getOuterDiamFromIDAndArea(innerDiameter, hexagon.getArea()) # conserve area of hex. name = hexagon.name + " {}".format(i) circularHexagon = self._addSolidMaterialRing(hexagon, innerDiameter, outerDiam, name) self.convertedBlock.add(circularHexagon) innerDiameter = outerDiam @staticmethod def _addSolidMaterialRing(baseComponent, innerDiameter, outDiameter, name): circle = components.Circle( name, baseComponent.material, baseComponent.temperatureInC, baseComponent.temperatureInC, id=innerDiameter, od=outDiameter, mult=1, ) circle.setNumberDensities(baseComponent.getNumberDensities()) circle.p.flags = baseComponent.p.flags return circle def _addCoolantRing(self, coolantOD, nameSuffix): innerDiam = self.convertedBlock[-1].getDimension("od") irc = self.interRingComponent interRing = components.Circle( irc.name + nameSuffix, irc.material, irc.temperatureInC, irc.temperatureInC, od=coolantOD, id=innerDiam, mult=1, ) interRing.setNumberDensities(irc.getNumberDensities()) interRing.p.flags = irc.p.flags self.convertedBlock.add(interRing) self._remainingCoolantFillArea -= interRing.getArea() def getOuterDiamFromIDAndArea(ID, area): """Return the outer diameter of an annulus with given inner diameter (ID) and area.""" return math.sqrt(ID**2.0 + 4.0 * area / math.pi) # from A = pi *(d ** 2)/4.0 def radiiFromHexPitches(pitches): """Return list of radii for equivalent-area circles from list of from hexagon flat-to-flat pitches.""" return [x * math.sqrt(SIN60 / math.pi) for x in pitches] def radiiFromHexSides(sideLengths): """Return list of radii for equivalent-area circles from list of from hexagon side lengths.""" return [x * math.sqrt(3.0 * SIN60 / math.pi) for x in sideLengths] def radiiFromRingOfRods(distToRodCenter, numRods, rodRadii, layout="hexagon"): r""" Return list of radii from ring of rods. Parameters ---------- distToRodCenter : float Distance from center of assembly to center of pin. numRods : int Number of rods in the ring of rods rodRadii : list Radii from smallest to largest. Outer radius becomes inner radius of next component. Returns ------- radiiList : list List of radii from inner to outer. Components are added on both sides. Notes ----- There are two assumptions when making circles: #. The rings are concentric about the ``radToRodCenter``. #. The ring area of the fuel rods are distributed to the inside and outside rings with the same thickness. ``thicknessOnEachSide`` (:math:`t`) is calculated as follows: .. math:: :nowrap: \begin{aligned} r_1 &\equiv \text{inner rad that thickness is added to on inside} \\ r_2 &\equiv \text{outer rad that thickness is added to on outside} \\ \texttt{radToRodCenter} &= \frac{r_1 + r_2}{2} \text{(due to being concentric)} \\ \text{Total Area} &= \text{Area of annulus 1} + \text{Area of annulus 2} \\ \text{Area of annulus 1} &= \pi r_1^2 - \pi (r_1 - t)^2 \\ \text{Area of annulus 2} &= \pi (r_2 + t)^2 - \pi r_2^2 \\ t &= \frac{\text{Total Area}}{4\pi\times\texttt{radToRodCenter}} \end{aligned} """ if layout == "polygon": alpha = 2.0 * math.pi / float(numRods) radToRodCenter = distToRodCenter * math.sqrt(math.sin(alpha) / alpha) elif layout == "hexagon": if numRods % 6: raise ValueError("numRods ({}) must be a multiple of 6.".format(numRods)) sideLengthOfBigHex = distToRodCenter # for equilateral triangle radToRodCenter = radiiFromHexSides([sideLengthOfBigHex])[0] else: raise ValueError("Invalid layout {}".format(layout)) radiiFromRodCenter = [] rLast = bigRLast = 0.0 for rodRadius in rodRadii: area = math.pi * (rodRadius**2.0 - rLast**2.0) * float(numRods) thicknessOnEachSide = area / (4 * math.pi * radToRodCenter) distFromCenterComp = bigRLast + thicknessOnEachSide radiiFromRodCenter.append(radToRodCenter + distFromCenterComp) radiiFromRodCenter.append(radToRodCenter - distFromCenterComp) # build thickness on both sides rLast, bigRLast = rodRadius, distFromCenterComp return sorted(radiiFromRodCenter) def stripComponents(block, compFlags): """ Remove all components from a block outside of the first component that matches compFlags. Parameters ---------- block : armi.reactor.blocks.Block Source block from which to produce a modified copy compFlags : armi.reactor.flags.Flags Component flags to indicate which components to strip from the block. All components outside of the first one that matches compFlags are stripped. Returns ------- newBlock : armi.reactor.blocks.Block Copy of source block with specified components stripped off. mixtureFlags : TypeSpec Combination of all component flags within newBlock. Notes ----- This is often used for creating a partially heterogeneous representation of a block. For example, one might want to treat everything inside of a specific component (such as the duct) as homogenized, while keeping a heterogeneous representation of the remaining components. """ newBlock = copy.deepcopy(block) avgBlockTemp = block.getAverageTempInC() mixtureFlags = newBlock.getComponent(Flags.COOLANT).p.flags innerMostComp = next(i for i, c in enumerate(sorted(newBlock.getComponents())) if c.hasFlags(compFlags)) outsideComp = True indexedComponents = [(i, c) for i, c in enumerate(sorted(newBlock.getComponents()))] for i, c in sorted(indexedComponents, reverse=True): if outsideComp: if i == innerMostComp: compIP = c.getDimension("ip") outsideComp = False newBlock.remove(c, recomputeAreaFractions=False) else: mixtureFlags = mixtureFlags | c.p.flags # add pitch defining component with no area newBlock.add( components.Hexagon( "pitchComponent", "Void", avgBlockTemp, avgBlockTemp, ip=compIP, op=compIP, ) ) return newBlock, mixtureFlags ================================================ FILE: armi/reactor/converters/geometryConverters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Change a reactor from one geometry to another. Examples may include going from Hex to R-Z or from Third-core to full core. This module contains **converters** (which create new reactor objects with different geometry), and **changers** (which modify a given reactor in place) in this module. Generally, mass is conserved in geometry conversions. Warning ------- These are mostly designed for hex geometry. """ import collections import copy import math import operator from typing import TYPE_CHECKING, Union import numpy as np from armi import materials, runLog from armi.physics.neutronics.fissionProductModel import lumpedFissionProduct from armi.reactor import ( assemblies, blocks, components, geometry, grids, parameters, reactors, ) from armi.reactor.converters import blockConverters, meshConverters from armi.reactor.flags import Flags from armi.reactor.parameters import ( NEVER, SINCE_LAST_GEOMETRY_TRANSFORMATION, Category, ParamLocation, ) from armi.utils import hexagon, plotting, units if TYPE_CHECKING: from armi.reactor import Core from armi.reactor.assemblies import Assembly from armi.reactor.blocks import Block BLOCK_AXIAL_MESH_SPACING = 20 # Block axial mesh spacing set for nodal diffusion calculation (cm) STR_SPACE = " " class GeometryChanger: """Geometry changer class that updates the geometry (number of assems or blocks per assem) of a given reactor.""" def __init__(self, cs=None): self._newAssembliesAdded = [] self._sourceReactor = None self._cs = cs def __repr__(self): return "<{}>".format(self.__class__.__name__) def convert(self, r): """ Run the conversion. Parameters ---------- r : Reactor object The reactor to convert. """ raise NotImplementedError def reset(self): """ When called, the reactor core model is reset to it's original configuration, or parameter data from the converted reactor core model is transformed back to the origin reactor state, thus cleaning up the converted reactor core model. Notes ----- This should be implemented on each of the geometry converters. """ runLog.info(f"Resetting the state of the converted reactor core model in {self}") self._newAssembliesAdded = [] class GeometryConverter(GeometryChanger): """ Base class for GeometryConverter which makes a new converted reactor. Examples -------- To convert a hex case to a R-Z case, do this: >>> from armi.reactorConverters import HexToRZConverter >>> HexToRZConverter(useMostCommonXsId=False, expandReactor=False) >>> geomConv.convert(r) >>> newR = geomConv.convReactor >>> dif3d = dif3dInterface.Dif3dInterface("dif3dRZ", newR) >>> dif3d.o = self.o >>> dif3d.writeInput("rzGeom_actual.inp") """ def __init__(self, cs=None): GeometryChanger.__init__(self, cs=cs) self.convReactor = None class FuelAssemNumModifier(GeometryChanger): """ Modify the number of fuel assemblies in the reactor. Notes ----- - The number of fuel assemblies should ALWAYS be set for the third-core regardless of the reactor geometry model. - The modification is only valid for third-core and full-core geometry models. """ def __init__(self, cs): GeometryChanger.__init__(self, cs) self.numFuelAssems = None # in full core. self.fuelType = "feed fuel" self.overwriteList = [Flags.REFLECTOR, Flags.SHIELD] self.ringsToAdd = [] self.modifyReactorPower = False def convert(self, r): """ Set the number of fuel assemblies in the reactor. Notes ----- - While adding fuel, does not modify existing fuel/control positions, but does overwrite assemblies in the overwriteList (e.g. reflectors, shields) - Once specified amount of fuel is in place, removes all assemblies past the outer fuel boundary - To re-add reflector/shield assemblies around the new core, use the ringsToAdd attribute """ self._sourceReactor = r if self._sourceReactor.core.powerMultiplier != 1 and self._sourceReactor.core.powerMultiplier != 3: raise ValueError( "Invalid reactor geometry {} in {}. Reactor must be full or third core to modify the " "number of assemblies.".format(r.core.powerMultiplier, self) ) # Set the number of fueled and non-fueled positions within the core (Full core or third-core) coreGeom = "full-core" if self._sourceReactor.core.powerMultiplier == 1 else "third-core" runLog.info("Modifying {} geometry to have {} fuel assemblies.".format(coreGeom, self.numFuelAssems)) nonFuelAssems = ( sum(not assem.hasFlags(Flags.FUEL) for assem in self._sourceReactor.core) * self._sourceReactor.core.powerMultiplier ) self.numFuelAssems *= self._sourceReactor.core.powerMultiplier totalCoreAssems = nonFuelAssems + self.numFuelAssems # Adjust the total power of the reactor by keeping power per assembly constant if self.modifyReactorPower: self._sourceReactor.core.p.power *= float(self.numFuelAssems) / ( len(self._sourceReactor.core.getAssemblies(Flags.FUEL)) * self._sourceReactor.core.powerMultiplier ) # Get the sorted assembly locations in the core (Full core or third core) assemOrderList = r.core.spatialGrid.generateSortedHexLocationList(totalCoreAssems) if self._sourceReactor.core.powerMultiplier == 3: assemOrderList = [loc for loc in assemOrderList if r.core.spatialGrid.isInFirstThird(loc)] # Add fuel assemblies to the core addingFuelIsComplete = False numFuelAssemsAdded = 0 for loc in assemOrderList: assem = self._sourceReactor.core.childrenByLocator.get(loc) if numFuelAssemsAdded < self.numFuelAssems: if assem is None: raise KeyError("Cannot find expected fuel assem in {}".format(loc)) # Add new fuel assembly to the core if assem.hasFlags(self.overwriteList): fuelAssem = self._sourceReactor.core.createAssemblyOfType(assemType=self.fuelType, cs=self._cs) # Remove existing assembly in the core location before adding new assembly if assem.hasFlags(self.overwriteList): self._sourceReactor.core.removeAssembly(assem, discharge=False) self._sourceReactor.core.add(fuelAssem, loc) numFuelAssemsAdded += self._sourceReactor.core.powerMultiplier else: # Keep the existing assembly in the core if assem.hasFlags(Flags.FUEL): # Count the assembly in the location if it is fuel numFuelAssemsAdded += self._sourceReactor.core.powerMultiplier else: pass # Flag the completion of adding fuel assemblies (see note 1) elif numFuelAssemsAdded == self.numFuelAssems: addingFuelIsComplete = True # Remove the remaining assemblies in the the assembly list once all the fuel has been added if addingFuelIsComplete and assem is not None: self._sourceReactor.core.removeAssembly(assem, discharge=False) # Remove all other assemblies from the core for assem in self._sourceReactor.core.getAssemblies(): if assem.spatialLocator not in assemOrderList: # check if assembly is on the list r.core.removeAssembly(assem, discharge=False) # get rid of the old assembly # Add the remaining rings of assemblies to the core for assemType in self.ringsToAdd: self.addRing(assemType=assemType) # Complete the reactor loading self._sourceReactor.core.processLoading(self._cs) self._sourceReactor.core.numRings = self._sourceReactor.core.getNumRings() self._sourceReactor.core.regenAssemblyLists() self._sourceReactor.core.circularRingList = None # need to reset this (possibly other stuff too) def addRing(self, assemType="big shield"): """ Add a ring of fuel assemblies around the outside of an existing core. Works by first finding the assembly furthest from the center, then filling in all assemblies that are within one pitch further with the specified assembly type Parameters ---------- assemType : str Assembly type that will be added to the outside of the core """ r = self._sourceReactor # first look through the core and finds the one farthest from the center maxDist = 0.0 for assem in r.core.getAssemblies(): dist = np.linalg.norm(assem.spatialLocator.getGlobalCoordinates()) # get distance from origin dist = round(dist, 6) # round dist to 6 places to avoid differences due to floating point math maxDist = max(maxDist, dist) # add one hex pitch to the maximum distance to get the bounding distance for the new ring hexPitch = r.core.spatialGrid.pitch newRingDist = maxDist + hexPitch maxArea = math.pi * (newRingDist + hexPitch) ** 2.0 # area that is guaranteed to bound the new core maxAssemsFull = maxArea / hexagon.area(hexPitch) # divide by hex area to get number of hexes in a full core # generate ordered list of assembly locations assemOrderList = r.core.spatialGrid.generateSortedHexLocationList(maxAssemsFull) if r.core.powerMultiplier == 3: assemOrderList = [loc for loc in assemOrderList if self._sourceReactor.core.spatialGrid.isInFirstThird(loc)] elif r.core.powerMultiplier != 1: raise RuntimeError("{} only works on full or 1/3 symmetry.".format(self)) # add new assemblies to core within one ring for locator in assemOrderList: assem = r.core.childrenByLocator.get(locator) # check on assemblies, moving radially outward dist = np.linalg.norm(locator.getGlobalCoordinates()) dist = round(dist, 6) if dist <= newRingDist: # check distance if assem is None: # no assembly in that position, add assembly newAssem = r.core.createAssemblyOfType(assemType=assemType, cs=self._cs) r.core.add(newAssem, locator) # put new assembly in reactor! else: # all other types of assemblies (fuel, control, etc) leave as is pass else: pass def reset(self): """Resetting the reactor core model state after adding fuel assemblies is not currently supported.""" raise NotImplementedError class HexToRZThetaConverter(GeometryConverter): """ Convert hex-based cases to an equivalent R-Z-Theta full core geometry. Parameters ---------- converterSettings: dict Settings that specify how the mesh of the RZTheta reactor should be generated. Controls the number of theta regions, how to group regions, etc. uniformThetaMesh bool flag that determines if the theta mesh should be uniform or not thetaBins Number of theta bins to create radialConversionType * ``Ring Compositions`` -- to convert by composition axialConversionType * ``Axial Coordinates`` -- use :py:class:`armi.reactor.converters.meshConverters._RZThetaReactorMeshConverterByAxialCoordinates` * ``Axial Bins`` -- use :py:class:`armi.reactor.converters.meshConverters._RZThetaReactorMeshConverterByAxialBins` homogenizeAxiallyByFlags Boolean that if set to True will ignore the `axialConversionType` input and determine a mesh based on the material boundaries for each RZ region axially. expandReactor : bool If True, the HEX-Z reactor will be expanded to full core geometry prior to converting to the RZT reactor. Either way the converted RZTheta core will be full core. strictHomogenization : bool If True, the converter will restrict HEX-Z blocks with dissimilar XS types from being homogenized into an RZT block. """ _GEOMETRY_TYPE = geometry.GeomType.RZT _SYMMETRY_TYPE = geometry.SymmetryType( domainType=geometry.DomainType.FULL_CORE, boundaryType=geometry.BoundaryType.NO_SYMMETRY, ) _BLOCK_MIXTURE_TYPE_MAP = { "mixture control": ["control"], "mixture fuel": ["fuel"], "mixture radial shield": ["radial shield"], "mixture axial shield": ["shield"], "mixture structure": [ "grid plate", "reflector", "inlet nozzle", "handling socket", ], "mixture duct": ["duct"], "mixture plenum": ["plenum"], } _BLOCK_MIXTURE_TYPE_EXCLUSIONS = ["control", "fuel", "radial shield"] _MESH_BY_RING_COMP = "Ring Compositions" _MESH_BY_AXIAL_COORDS = "Axial Coordinates" _MESH_BY_AXIAL_BINS = "Axial Bins" def __init__(self, cs, converterSettings, expandReactor=False, strictHomogenization=False): GeometryConverter.__init__(self, cs) self.converterSettings = converterSettings self.meshConverter = None self._expandSourceReactor = expandReactor self._strictHomogenization = strictHomogenization self._radialMeshConversionType = None self._axialMeshConversionType = None self._previousRadialZoneAssemTypes = None self._currentRadialZoneType = None self._assemsInRadialZone = collections.defaultdict(list) self._newBlockNum = 0 self.blockMap = collections.defaultdict(list) self.blockVolFracs = collections.defaultdict(dict) self._homogenizeAxiallyByFlags = False def _generateConvertedReactorMesh(self): """Convert the source reactor using the converterSettings.""" runLog.info("Generating mesh coordinates for the reactor conversion") self._radialMeshConversionType = self.converterSettings["radialConversionType"] self._axialMeshConversionType = self.converterSettings["axialConversionType"] self._homogenizeAxiallyByFlags = self.converterSettings.get("homogenizeAxiallyByFlags", False) converter = None if self._radialMeshConversionType == self._MESH_BY_RING_COMP: if self._homogenizeAxiallyByFlags: converter = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialFlags( self.converterSettings ) elif self._axialMeshConversionType == self._MESH_BY_AXIAL_COORDS: converter = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialCoordinates( self.converterSettings ) elif self._axialMeshConversionType == self._MESH_BY_AXIAL_BINS: converter = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialBins(self.converterSettings) if converter is None: raise ValueError( "No mesh converter exists for `radialConversionType` and `axialConversionType` settings " "of {} and {}".format(self._radialMeshConversionType, self._axialMeshConversionType) ) self.meshConverter = converter return self.meshConverter.generateMesh(self._sourceReactor) def convert(self, r): """ Run the conversion to 3 dimensional R-Z-Theta. .. impl:: Tool to convert a hex core to an RZTheta core. :id: I_ARMI_CONV_3DHEX_TO_2DRZ :implements: R_ARMI_CONV_3DHEX_TO_2DRZ This method converts the hex-z mesh to r-theta-z mesh. It first verifies that the geometry type of the input reactor ``r`` has the expected HEX geometry. Upon conversion, it determines the inner and outer diameters of each ring in the r-theta-z mesh and calls ``_createRadialThetaZone`` to create a radial theta zone with a homogenized mixture. The axial dimension of the r-theta-z mesh is then updated by ``updateAxialMesh``. Attributes ---------- r : Reactor object The reactor to convert. Notes ----- The linked requirement technically points to a child class of this class, HexToRZConverter. However, this is the method where the conversion actually happens and thus the implementation tag is noted here. As a part of the RZT mesh converters it is possible to obtain a radial mesh that has repeated ring numbers. For instance, if there are fuel assemblies and control assemblies within the same radial hex ring then it's possible that a radial mesh output from the byRingComposition mesh converter method will look something like: self.meshConverter.radialMesh = [2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 8, 9, 10] In this instance the hex ring will remain the same for multiple iterations over radial direction when homogenizing the hex core into the RZT geometry. In this case, the converter needs to keep track of the compositions within this ring so that it can separate this repeated ring into multiple RZT rings. Each of the RZT rings should have a single composition (fuel1, fuel2, control, etc.) See Also -------- armi.reactor.converters.meshConverters """ runLog.info(f"Converting {r.core} using {self}") if r.core.geomType != geometry.GeomType.HEX: raise ValueError("Cannot use {} to convert {} reactor".format(self, str(r.core.geomType).upper())) self._sourceReactor = r self._setupSourceReactorForConversion() rztSpatialGrid = self._generateConvertedReactorMesh() runLog.info(rztSpatialGrid) self._setupConvertedReactor(rztSpatialGrid) self.convReactor.core.lib = self._sourceReactor.core.lib innerDiameter = 0.0 lowerRing = 1 radialMeshCm = [0.0] for radialIndex, upperRing in enumerate(self.meshConverter.radialMesh): lowerTheta = 0.0 # see notes self._previousRadialZoneAssemTypes = self._previousRadialZoneAssemTypes if lowerRing == upperRing else [] if lowerRing == upperRing: lowerRing = upperRing - 1 self._setNextAssemblyTypeInRadialZone(lowerRing, upperRing) self._setAssemsInRadialZone(radialIndex, lowerRing, upperRing) for thetaIndex, upperTheta in enumerate(self.meshConverter.thetaMesh): zoneAssems = self._getAssemsInRadialThetaZone(lowerRing, upperRing, lowerTheta, upperTheta) self._writeRadialThetaZoneHeader( radialIndex, lowerRing, upperRing, thetaIndex, lowerTheta, upperTheta, ) outerDiameter = self._createRadialThetaZone( innerDiameter, thetaIndex, radialIndex, lowerTheta, upperTheta, zoneAssems, ) lowerTheta = upperTheta innerDiameter = outerDiameter lowerRing = upperRing radialMeshCm.append(outerDiameter / 2.0) # replace temporary index-based ring indices with actual radial distances self.convReactor.core.spatialGrid._bounds = ( self.convReactor.core.spatialGrid._bounds[0], np.array(radialMeshCm), self.convReactor.core.spatialGrid._bounds[2], ) self.convReactor.core.updateAxialMesh() self.convReactor.core.summarizeReactorStats() # Track the new assemblies that were created when the converted reactor was # initialized so that the global assembly counter can be reset later. self._newAssembliesAdded = self.convReactor.core.getAssemblies() def _setNextAssemblyTypeInRadialZone(self, lowerRing, upperRing): """ Change the currently-active assembly type to the next active one based on a specific order. If this is called with the same (lowerRing, upperRing) twice, the next assembly type will be applied. This is useful, for instance, in putting control zones amidst fuel. """ sortedAssemTypes = self._getSortedAssemblyTypesInRadialZone(lowerRing, upperRing) for aType in sortedAssemTypes: if aType not in self._previousRadialZoneAssemTypes: self._previousRadialZoneAssemTypes.append(aType) self._currentRadialZoneType = aType break def _getSortedAssemblyTypesInRadialZone(self, lowerRing, upperRing): """ Retrieve assembly types in a radial zone between (lowerRing, upperRing), sort from highest occurrence to lowest. Notes ----- - Assembly types are based on the assembly names and not the direct composition within each assembly. For instance, if two assemblies are named `fuel 1` and `fuel 2` but they have the same composition at some reactor state then they will still be separated as two different assembly types. """ aCountByTypes = collections.Counter() for a in self._getAssembliesInCurrentRadialZone(lowerRing, upperRing): aCountByTypes[a.getType().lower()] += 1 # sort on tuple (int, str) to force consistent ordering of result when counts are tied sortedAssemTypes = sorted(aCountByTypes, key=lambda aType: (aCountByTypes[aType], aType), reverse=True) return sortedAssemTypes def _getAssembliesInCurrentRadialZone(self, lowerRing, upperRing): ringAssems = [] for ring in range(lowerRing, upperRing): ringAssems.extend(self._sourceReactor.core.getAssembliesInSquareOrHexRing(ring)) return ringAssems def _setupSourceReactorForConversion(self): self._sourceReactor.core.summarizeReactorStats() if self._expandSourceReactor: self._expandSourceReactorGeometry() def _setupConvertedReactor(self, grid): self.convReactor = reactors.Reactor("ConvertedReactor", self._sourceReactor.blueprints) core = reactors.Core("Core") if self._cs is not None: core.setOptionsFromCs(self._cs) self.convReactor.add(core) grid.symmetry = self._SYMMETRY_TYPE grid.geomType = self._GEOMETRY_TYPE grid.armiObject = self.convReactor.core self.convReactor.core.spatialGrid = grid self.convReactor.core.p.power = self._sourceReactor.core.p.power self.convReactor.core.name += " - {0}".format(self._GEOMETRY_TYPE) def _setAssemsInRadialZone(self, radialIndex, lowerRing, upperRing): """ Retrieve a list of assemblies in the reactor between (lowerRing, upperRing). Notes ----- self._assemsInRadialZone keeps track of the unique assemblies that are in each radial ring. This ensures that no assemblies are duplicated when using self._getAssemsInRadialThetaZone() """ lowerTheta = 0.0 for _thetaIndex, upperTheta in enumerate(self.meshConverter.thetaMesh): assemsInRadialThetaZone = self._getAssemsInRadialThetaZone(lowerRing, upperRing, lowerTheta, upperTheta) newAssemsInRadialZone = set(assemsInRadialThetaZone) oldAssemsInRadialZone = set(self._assemsInRadialZone[radialIndex]) self._assemsInRadialZone[radialIndex].extend( sorted(list(newAssemsInRadialZone.union(oldAssemsInRadialZone))) ) lowerTheta = upperTheta if not self._assemsInRadialZone[radialIndex]: raise ValueError( "No assemblies in radial zone {} between rings {} and {}".format( self._assemsInRadialZone[radialIndex], lowerRing, upperRing ) ) @staticmethod def _getAssembliesInSector(core, theta1, theta2): """ Locate assemblies in an angular sector. Parameters ---------- theta1, theta2 : float The angles (in degrees) in which assemblies shall be drawn. Returns ------- aList : list List of assemblies in this sector """ aList = [] converter = EdgeAssemblyChanger() converter.addEdgeAssemblies(core) for a in core: x, y, _ = a.spatialLocator.getLocalCoordinates() theta = math.atan2(y, x) if theta < 0.0: theta = math.tau + theta theta = math.degrees(theta) phi = theta if theta1 <= phi <= theta2 or abs(theta1 - phi) < 0.001 or abs(theta2 - phi) < 0.001: aList.append(a) converter.removeEdgeAssemblies(core.r.core) if not aList: raise ValueError("There are no assemblies in {} between angles of {} and {}".format(core, theta1, theta2)) return aList def _getAssemsInRadialThetaZone(self, lowerRing, upperRing, lowerTheta, upperTheta): """Retrieve list of assemblies in the reactor between (lowerRing, upperRing) and (lowerTheta, upperTheta). """ thetaAssems = self._getAssembliesInSector( self._sourceReactor.core, math.degrees(lowerTheta), math.degrees(upperTheta) ) ringAssems = self._getAssembliesInCurrentRadialZone(lowerRing, upperRing) if self._radialMeshConversionType == self._MESH_BY_RING_COMP: ringAssems = self._selectAssemsBasedOnType(ringAssems) ringAssems = set(ringAssems) thetaAssems = set(thetaAssems) assemsInRadialThetaZone = sorted(ringAssems.intersection(thetaAssems)) if not assemsInRadialThetaZone: raise ValueError( "No assemblies in radial-theta zone between rings {} and {} and theta bounds of {} and {}".format( lowerRing, upperRing, lowerTheta, upperTheta ) ) return assemsInRadialThetaZone def _selectAssemsBasedOnType(self, assems): """Retrieve a list of assemblies of a given type within a subset of an assembly list. Parameters ---------- assems: list Subset of assemblies in the reactor. """ selectedAssems = [] for a in assems: if a.getType().lower() == self._currentRadialZoneType: selectedAssems.append(a) return selectedAssems def _createRadialThetaZone(self, innerDiameter, thetaIndex, radialIndex, lowerTheta, upperTheta, zoneAssems): """ Add a new stack of circles to the TRZ reactor by homogenizing assems. Parameters ---------- innerDiameter : float The current innerDiameter of the radial-theta zone thetaIndex : float The theta index of the radial-theta zone radialIndex : float The radial index of the radial-theta zone lowerTheta : float The lower theta bound for the radial-theta zone upperTheta : float The upper theta bound for the radial-theta zone Returns ------- outerDiameter : float The outer diameter (in cm) of the radial zone just added """ newAssembly = assemblies.ThRZAssembly("mixtureAssem") newAssembly.spatialLocator = self.convReactor.core.spatialGrid[thetaIndex, radialIndex, 0] newAssembly.p.AziMesh = 2 newAssembly.spatialGrid = grids.AxialGrid.fromNCells(len(self.meshConverter.axialMesh), armiObject=newAssembly) lfp = lumpedFissionProduct.lumpedFissionProductFactory(self._cs) lowerAxialZ = 0.0 for axialIndex, upperAxialZ in enumerate(self.meshConverter.axialMesh): # Setup the new block data newBlockName = "B{:04d}{}".format(int(newAssembly.getNum()), chr(axialIndex + 65)) newBlock = blocks.ThRZBlock(newBlockName) # Compute the homogenized block data ( newBlockAtoms, newBlockType, newBlockTemp, newBlockVol, ) = self.createHomogenizedRZTBlock(newBlock, lowerAxialZ, upperAxialZ, zoneAssems) # Compute radial zone outer diameter axialSegmentHeight = upperAxialZ - lowerAxialZ radialZoneVolume = self._calcRadialRingVolume(lowerAxialZ, upperAxialZ, radialIndex) radialRingArea = radialZoneVolume / axialSegmentHeight * self._sourceReactor.core.powerMultiplier outerDiameter = blockConverters.getOuterDiamFromIDAndArea(innerDiameter, radialRingArea) # Set new homogenized block parameters material = materials.material.Material() material.name = "mixture" material.refDens = 1.0 # generic density. Will cancel out. dims = { "inner_radius": innerDiameter / 2.0, "radius_differential": (outerDiameter - innerDiameter) / 2.0, "inner_axial": lowerAxialZ, "height": axialSegmentHeight, "inner_theta": lowerTheta, "azimuthal_differential": (upperTheta - lowerTheta), "mult": 1.0, "Tinput": newBlockTemp, "Thot": newBlockTemp, } for nuc in self._sourceReactor.blueprints.allNuclidesInProblem: material.setMassFrac(nuc, 0.0) newComponent = components.DifferentialRadialSegment("mixture", material, **dims) newBlock.p.axMesh = int(axialSegmentHeight / BLOCK_AXIAL_MESH_SPACING) + 1 newBlock.p.zbottom = lowerAxialZ newBlock.p.ztop = upperAxialZ newBlock.setLumpedFissionProducts(lfp) # Assign the new block cross section type and burn up group newBlock.setType(newBlockType) newXsType, newEnvGroup = self._createBlendedXSID(newBlock) newBlock.p.xsType = newXsType newBlock.p.envGroup = newEnvGroup # Update the block dimensions and set the block densities newComponent.updateDims() # ugh. newBlock.p.height = axialSegmentHeight newBlock.clearCache() newBlock.add(newComponent) for nuc, atoms in newBlockAtoms.items(): newBlock.setNumberDensity(nuc, atoms / newBlockVol) self._writeRadialThetaZoneInfo(axialIndex + 1, axialSegmentHeight, newBlock) self._checkVolumeConservation(newBlock) newAssembly.add(newBlock) lowerAxialZ = upperAxialZ newAssembly.calculateZCoords() # builds mesh self.convReactor.core.add(newAssembly) return outerDiameter def _calcRadialRingVolume(self, lowerZ, upperZ, radialIndex): """Compute the total volume of a list of assemblies within a ring between two axial heights.""" ringVolume = 0.0 for assem in self._assemsInRadialZone[radialIndex]: for b, heightHere in assem.getBlocksBetweenElevations(lowerZ, upperZ): ringVolume += b.getVolume() * heightHere / b.getHeight() if not ringVolume: raise ValueError("Ring volume of ring {} is 0.0".format(radialIndex + 1)) return ringVolume def _checkVolumeConservation(self, newBlock): """Write the volume fractions of each hex block within the homogenized RZT block.""" newBlockVolumeFraction = 0.0 for hexBlock in self.blockMap[newBlock]: newBlockVolumeFraction += self.blockVolFracs[newBlock][hexBlock] if abs(newBlockVolumeFraction - 1.0) > 0.00001: raise ValueError( "The volume fraction of block {} is {} and not 1.0. An error occurred when " "converting the reactor geometry.".format(newBlock, newBlockVolumeFraction) ) def createHomogenizedRZTBlock(self, homBlock, lowerAxialZ, upperAxialZ, radialThetaZoneAssems): """ Create the homogenized RZT block by computing the average atoms in the zone. Additional calculations are performed to determine the homogenized block type, the block average temperature, and the volume fraction of each hex block that is in the new homogenized block. """ homBlockXsTypes = set() numHexBlockByType = collections.Counter() homBlockAtoms = collections.defaultdict(int) homBlockVolume = 0.0 homBlockTemperature = 0.0 for assem in radialThetaZoneAssems: blocksHere = assem.getBlocksBetweenElevations(lowerAxialZ, upperAxialZ) for b, heightHere in blocksHere: homBlockXsTypes.add(b.p.xsType) numHexBlockByType[b.getType().lower()] += 1 blockVolumeHere = b.getVolume() * heightHere / b.getHeight() if blockVolumeHere == 0.0: raise ValueError("Geometry conversion failed. Block {} has zero volume".format(b)) homBlockVolume += blockVolumeHere homBlockTemperature += b.getAverageTempInC() * blockVolumeHere numDensities = b.getNumberDensities() for nucName, nDen in numDensities.items(): homBlockAtoms[nucName] += nDen * blockVolumeHere self.blockMap[homBlock].append(b) self.blockVolFracs[homBlock][b] = blockVolumeHere # Notify if blocks with different xs types are being homogenized. May be undesired behavior. if len(homBlockXsTypes) > 1: msg = ( "Blocks {} with dissimilar XS IDs are being homogenized in {} between axial heights" " {} cm and {} cm. ".format( self.blockMap[homBlock], self.convReactor.core, lowerAxialZ, upperAxialZ, ) ) if self._strictHomogenization: raise ValueError(msg + "Modify mesh converter settings before proceeding.") else: runLog.extra(msg) homBlockType = self._getHomogenizedBlockType(numHexBlockByType) homBlockTemperature = homBlockTemperature / homBlockVolume for b in self.blockMap[homBlock]: self.blockVolFracs[homBlock][b] = self.blockVolFracs[homBlock][b] / homBlockVolume return homBlockAtoms, homBlockType, homBlockTemperature, homBlockVolume def _getHomogenizedBlockType(self, numHexBlockByType): """ Generate the homogenized block mixture type based on the frequency of hex block types that were merged together. Notes ----- self._BLOCK_MIXTURE_TYPE_EXCLUSIONS: The normal function of this method is to assign the mixture name based on the number of occurrences of the block type. This list stops that and assigns the mixture based on the first occurrence. (i.e. if the mixture has a set of blocks but it comes across one with the name of 'control' the process will stop and the new mixture type will be set to 'mixture control'. self._BLOCK_MIXTURE_TYPE_MAP: A dictionary that provides the name of blocks that are condensed together """ assignedMixtureBlockType = None # Find the most common block type out of the types in the block mixture type exclusions list excludedBlockTypesInBlock = set( [x for x in self._BLOCK_MIXTURE_TYPE_EXCLUSIONS for y in numHexBlockByType if x in y] ) if excludedBlockTypesInBlock: for blockType in self._BLOCK_MIXTURE_TYPE_EXCLUSIONS: if blockType in excludedBlockTypesInBlock: assignedMixtureBlockType = "mixture " + blockType return assignedMixtureBlockType # Assign block type by most common hex block type mostCommonHexBlockType = sorted(numHexBlockByType.most_common(1))[0][0] # sort needed for tie break for mixtureType in sorted(self._BLOCK_MIXTURE_TYPE_MAP): validBlockTypesInMixture = self._BLOCK_MIXTURE_TYPE_MAP[mixtureType] for validBlockType in validBlockTypesInMixture: if validBlockType in mostCommonHexBlockType: assignedMixtureBlockType = mixtureType return assignedMixtureBlockType assignedMixtureBlockType = "mixture structure" runLog.debug( f"The mixture type for this homogenized block {mostCommonHexBlockType} " f"was not determined and is defaulting to {assignedMixtureBlockType}" ) return assignedMixtureBlockType def _createBlendedXSID(self, newBlock): """Generate the blended XS id using the most common XS id in the hexIdList.""" ids = [hexBlock.getMicroSuffix() for hexBlock in self.blockMap[newBlock]] xsTypeList, envGroupList = zip(*ids) xsType, _count = collections.Counter(xsTypeList).most_common(1)[0] envGroup, _count = collections.Counter(envGroupList).most_common(1)[0] return xsType, envGroup def _writeRadialThetaZoneHeader(self, radIdx, lowerRing, upperRing, thIdx, lowerTheta, upperTheta): radialAssemType = "({})".format(self._currentRadialZoneType) if self._currentRadialZoneType is not None else "" runLog.info("Creating: Radial Zone {}, Theta Zone {} {}".format(radIdx + 1, thIdx + 1, radialAssemType)) runLog.extra( "{} Hex Rings: [{}, {}), Theta Revolutions: [{:.2f}, {:.2f})".format( 9 * STR_SPACE, lowerRing, upperRing, lowerTheta * units.RAD_TO_REV, upperTheta * units.RAD_TO_REV, ) ) runLog.debug( "{} Axial Zone - Axial Height (cm) Block Number Block Type XS ID : " "Original Hex Block XS ID(s)".format(9 * STR_SPACE) ) runLog.debug( "{} ---------- - ----------------- ------------ ---------------------- ----- : " "---------------------------".format(9 * STR_SPACE) ) def _writeRadialThetaZoneInfo(self, axIdx, axialSegmentHeight, blockObj): """ Create a summary of the mapping between the converted reactor block ids to the hex reactor block ids. """ self._newBlockNum += 1 hexBlockXsIds = [] for hexBlock in self.blockMap[blockObj]: hexBlockXsIds.append(hexBlock.getMicroSuffix()) runLog.debug( "{} {:<10} - {:<17.3f} {:<12} {:<22} {:<5} : {}".format( 9 * STR_SPACE, axIdx, axialSegmentHeight, self._newBlockNum, blockObj.getType(), blockObj.getMicroSuffix(), hexBlockXsIds, ) ) def _expandSourceReactorGeometry(self): """Expansion of the reactor geometry to build the R-Z-Theta core model.""" runLog.info("Expanding source reactor core to a full core model") reactorExpander = ThirdCoreHexToFullCoreChanger(self._cs) reactorExpander.convert(self._sourceReactor) self._sourceReactor.core.summarizeReactorStats() def plotConvertedReactor(self, fNameBase=None): """ Generate plots for the converted RZT reactor. A pass-through to preserve the API. Parameters ---------- fNameBase : str, optional A name that will form the basis of the N plots that are generated by this method. Will get split on extension and have numbers added. Should be like ``coreMap.png``. """ return plotting.plotConvertedRZTReactor(self.convReactor, fNameBase) def reset(self): """Clear out attribute data, including holding the state of the converted reactor core model.""" self.meshConverter = None self._radialMeshConversionType = None self._axialMeshConversionType = None self._previousRadialZoneAssemTypes = None self._currentRadialZoneType = None self._assemsInRadialZone = collections.defaultdict(list) self._newBlockNum = 0 self.blockMap = collections.defaultdict(list) self.blockVolFracs = collections.defaultdict(dict) self.convReactor = None super().reset() class HexToRZConverter(HexToRZThetaConverter): """ Create a new reactor with R-Z coordinates from the Hexagonal-Z reactor. This is a subclass of the HexToRZThetaConverter. See the HexToRZThetaConverter for explanation and setup of the converterSettings. """ _GEOMETRY_TYPE = geometry.GeomType.RZ class ThirdCoreHexToFullCoreChanger(GeometryChanger): """ Change third-core models to full core in place. Does not generate a new reactor object. Examples -------- >>> converter = ThirdCoreHexToFullCoreChanger() >>> converter.convert(myReactor) """ EXPECTED_INPUT_SYMMETRY = geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC) def __init__(self, cs=None): GeometryChanger.__init__(self, cs) self.listOfAssemblyVolIntegratedParamsToScale = [] self.listOfBlockVolIntegratedParamsToScale = [] self.grid = None @staticmethod def _scaleVolIntegratedParams(obj, paramList, direction): if direction == "up": op = operator.mul elif direction == "down": op = operator.truediv for param in paramList: if obj.p[param] is None: continue if type(obj.p[param]) is list: # some params like volume-integrated mg flux are lists obj.p[param] = [op(val, 3) for val in obj.p[param]] else: obj.p[param] = op(obj.p[param], 3) def convert(self, r: reactors.Reactor): """ Run the conversion. .. impl:: Convert a one-third-core geometry to a full-core geometry. :id: I_ARMI_THIRD_TO_FULL_CORE0 :implements: R_ARMI_THIRD_TO_FULL_CORE This method first checks if the input reactor is already full core. If full-core symmetry is detected, the input reactor is returned. If not, it then verifies that the input reactor has the expected one-third core symmetry and HEX geometry. Upon conversion, it loops over the assembly vector of the source one-third core model, copies and rotates each source assembly to create new assemblies, and adds them on the full-core grid. For the center assembly, it modifies its parameters. Finally, it sets the domain type to full core. Parameters ---------- sourceReactor : Reactor object The reactor to convert. """ self._sourceReactor = r if self._sourceReactor.core.isFullCore: # already full core from geometry file. No need to copy symmetry over. runLog.important("Detected that full core reactor already exists. Cannot expand.") return self._sourceReactor elif not ( self._sourceReactor.core.symmetry == self.EXPECTED_INPUT_SYMMETRY and self._sourceReactor.core.geomType == geometry.GeomType.HEX ): raise ValueError( "ThirdCoreHexToFullCoreChanger requires the input to have third core hex geometry. " "Geometry received was {} {} {}".format( self._sourceReactor.core.symmetry.domain, self._sourceReactor.core.symmetry.boundary, self._sourceReactor.core.geomType, ) ) edgeChanger = EdgeAssemblyChanger() edgeChanger.removeEdgeAssemblies(self._sourceReactor.core) runLog.info("Expanding to full core geometry") # store a copy of the 1/3 geometry grid, so that we can use it to find symmetric # locations, while the core has a full-core grid so that it does not yell at us # for adding stuff outside of the first 1/3 self.grid = copy.deepcopy(self._sourceReactor.core.spatialGrid) # Set the core grid's symmetry early, since the core uses it for error checks self._sourceReactor.core.symmetry = geometry.SymmetryType( geometry.DomainType.FULL_CORE, geometry.BoundaryType.NO_SYMMETRY ) for a in self._sourceReactor.core.getAssemblies(): # make extras and add them too. since the input is assumed to be 1/3 core. otherLocs = self.grid.getSymmetricEquivalents(a.spatialLocator.indices) thisZone = ( self._sourceReactor.core.zones.findZoneItIsIn(a) if len(self._sourceReactor.core.zones) > 0 else None ) angle = 2 * math.pi / (len(otherLocs) + 1) count = 1 for i, j in otherLocs: newAssem = copy.deepcopy(a) newAssem.makeUnique() newAssem.rotate(count * angle) count += 1 self._sourceReactor.core.add(newAssem, self._sourceReactor.core.spatialGrid[i, j, 0]) if thisZone: thisZone.addLoc(newAssem.getLocation()) self._newAssembliesAdded.append(newAssem) self._updateThirdToFullCoreLocHist(newAssem, count - 2) if a.getLocation() == "001-001": runLog.extra(f"Modifying parameters in central assembly {a} to convert from 1/3 to full core") if not self.listOfBlockVolIntegratedParamsToScale: # populate the list with all parameters that are VOLUME_INTEGRATED ( self.listOfBlockVolIntegratedParamsToScale, _, ) = _generateListOfParamsToScale(self._sourceReactor.core.getFirstBlock(), paramsToScaleSubset=[]) if not self.listOfAssemblyVolIntegratedParamsToScale: (self.listOfAssemblyVolIntegratedParamsToScale, _) = _generateListOfParamsToScale( self._sourceReactor.core.getFirstAssembly(), paramsToScaleSubset=[] ) self._scaleVolIntegratedParams(a, self.listOfAssemblyVolIntegratedParamsToScale, "up") for b in a: self._scaleVolIntegratedParams(b, self.listOfBlockVolIntegratedParamsToScale, "up") # set domain after expanding, because it isn't actually full core until it's # full core; setting the domain causes the core to clear its caches. self._sourceReactor.core.symmetry = geometry.SymmetryType( geometry.DomainType.FULL_CORE, geometry.BoundaryType.NO_SYMMETRY ) def restorePreviousGeometry(self, r=None): """Undo the changes made by convert by going back to 1/3 core. .. impl:: Restore a one-third-core geometry to a full-core geometry. :id: I_ARMI_THIRD_TO_FULL_CORE1 :implements: R_ARMI_THIRD_TO_FULL_CORE This method is a reverse process of the method ``convert``. It converts the full-core reactor model back to the original one-third core reactor model by removing the added assemblies and changing the parameters of the center assembly from full core to one third core. """ r = r or self._sourceReactor # remove the assemblies that were added when the conversion happened. if bool(self._newAssembliesAdded): for a in self._newAssembliesAdded: r.core.removeAssembly(a, discharge=False) r.core.symmetry = geometry.SymmetryType.fromAny(self.EXPECTED_INPUT_SYMMETRY) # change the central assembly params back to 1/3 a = r.core.getAssemblyWithStringLocation("001-001") runLog.extra(f"Modifying parameters in central assembly {a} to revert from full to 1/3 core") self._scaleVolIntegratedParams(a, self.listOfAssemblyVolIntegratedParamsToScale, "down") for b in a: self._scaleVolIntegratedParams(b, self.listOfBlockVolIntegratedParamsToScale, "down") self.reset() def _updateThirdToFullCoreLocHist(self, newAssembly, otherLocIndex): """ Update the assembly location history parameter to ensure created assemblies have the correct movement histories for their corresponding full core location. """ newLocHist = [] for r, p in newAssembly.p.ringPosHist: if r not in assemblies.Assembly.NOT_IN_CORE: # ring/pos may come in as strings and need to be cast as ints. i, j = self.grid.getIndicesFromRingAndPos(int(r), int(p)) otherLocs = self.grid.getSymmetricEquivalents([i, j, 0]) otherLoc = otherLocs[otherLocIndex] r, p = self.grid.indicesToRingPos(*otherLoc) newLocHist.append((r, p)) newAssembly.p.ringPosHist = newLocHist class EdgeAssemblyChanger(GeometryChanger): """ Add/remove "edge assemblies" for Finite difference or MCNP cases. Examples -------- edgeChanger = EdgeAssemblyChanger() edgeChanger.removeEdgeAssemblies(reactor.core) """ def addEdgeAssemblies(self, core): """ Add the assemblies on the 120 degree symmetric line to 1/3 symmetric cases. Needs to be called before a finite difference (DIF3D, DIFNT) or MCNP calculation. .. impl:: Add assemblies along the 120-degree line to a reactor. :id: I_ARMI_ADD_EDGE_ASSEMS0 :implements: R_ARMI_ADD_EDGE_ASSEMS Edge assemblies on the 120-degree symmetric line of a one-third core reactor model are added because they are needed for DIF3D-finite difference or MCNP models. This is done by copying the assemblies from the lower boundary and placing them in their reflective positions on the upper boundary of the symmetry line. Parameters ---------- reactor : Reactor Reactor to modify See Also -------- removeEdgeAssemblies : removes the edge assemblies """ if core.isFullCore: return if self._newAssembliesAdded: runLog.important("Skipping addition of edge assemblies because they are already there") return assembliesOnLowerBoundary = core.getAssembliesOnSymmetryLine(grids.BOUNDARY_0_DEGREES) assembliesOnUpperBoundary = [] for a in assembliesOnLowerBoundary: a.clearCache() # symmetry factors of these assemblies will change since they are now half assems. a2 = copy.deepcopy(a) a2.makeUnique() assembliesOnUpperBoundary.append(a2) if not assembliesOnUpperBoundary: runLog.extra("No edge assemblies to add") # Move the assemblies into their reflective position on symmetry line 3 for a in assembliesOnUpperBoundary: # loc will now be either an empty set [], or two different locations # in our case, we only want the first of the two locations locs = core.spatialGrid.getSymmetricEquivalents(a.spatialLocator) if locs: i, j = locs[0] spatialLocator = core.spatialGrid[i, j, 0] if core.childrenByLocator.get(spatialLocator): runLog.warning("Edge assembly already exists in {0}. Not adding.".format(locs[0])) continue # add the copied assembly to the reactor list runLog.debug("Adding edge assembly {0} to {1} to the reactor".format(a, spatialLocator)) core.add(a, spatialLocator) self._newAssembliesAdded.append(a) parameters.ALL_DEFINITIONS.resetAssignmentFlag(SINCE_LAST_GEOMETRY_TRANSFORMATION) def removeEdgeAssemblies(self, core): """ Remove the edge assemblies in preparation for the nodal diffusion approximation. This makes use of the assemblies knowledge of if it is in a region that it needs to be removed. .. impl:: Remove assemblies along the 120-degree line from a reactor. :id: I_ARMI_ADD_EDGE_ASSEMS1 :implements: R_ARMI_ADD_EDGE_ASSEMS This method is the reverse process of the method ``addEdgeAssemblies``. It is needed for the DIF3D-Nodal calculation. It removes the assemblies on the 120-degree symmetry line. See Also -------- addEdgeAssemblies : adds the edge assemblies """ if core.isFullCore: return assembliesOnLowerBoundary = core.getAssembliesOnSymmetryLine(grids.BOUNDARY_0_DEGREES) # Don't use newAssembliesAdded b/c this may be BOL cleaning of a fresh case that has edge # assems. edgeAssemblies = core.getAssembliesOnSymmetryLine(grids.BOUNDARY_120_DEGREES) for a in edgeAssemblies: runLog.debug( "Removing edge assembly {} from {} from the reactor without discharging".format( a, a.spatialLocator.getRingPos() ) ) core.removeAssembly(a, discharge=False) if edgeAssemblies: for a in assembliesOnLowerBoundary: a.clearCache() # clear cached area since symmetry factor will change # Reset the SINCE_LAST_GEOMETRY_TRANSFORMATION flag, so that subsequent geometry # conversions don't erroneously think they've been changed inside this geometry # conversion pDefs = parameters.ALL_DEFINITIONS.unchanged_since(NEVER) pDefs.setAssignmentFlag(SINCE_LAST_GEOMETRY_TRANSFORMATION) else: runLog.debug("No edge assemblies to remove.") self.reset() @staticmethod def scaleParamsRelatedToSymmetry(core: reactors.Core, paramsToScaleSubset=None): """ Scale volume-dependent params like power to account for cut-off edges. These params are at half their full hex value. Scale them right before deleting their symmetric identicals. The two operations (scaling them and then removing others) is identical to combining two half-assemblies into a full one. See Also -------- armi.reactor.converters.geometryConverter.EdgeAssemblyChanger.removeEdgeAssemblies armi.reactor.blocks.HexBlock.getSymmetryFactor """ runLog.extra("Scaling edge-assembly parameters to account for full hexes instead of two halves") completeListOfBlockParamsToScale = _generateListOfParamsToScale(core.getFirstBlock(), paramsToScaleSubset) symmetricAssems = ( core.getAssembliesOnSymmetryLine(grids.BOUNDARY_0_DEGREES), core.getAssembliesOnSymmetryLine(grids.BOUNDARY_120_DEGREES), ) if not all(symmetricAssems): runLog.extra("No edge-assemblies found to scale parameters for.") for a, aSymmetric in zip(*symmetricAssems): for b, bSymmetric in zip(a, aSymmetric): _scaleParamsInBlock(b, bSymmetric, completeListOfBlockParamsToScale) def _generateListOfParamsToScale(obj: Union["Core", "Assembly", "Block"], paramsToScaleSubset): fluxParamsToScale = ( obj.p.paramDefs.inCategory(Category.fluxQuantities).inCategory(Category.multiGroupQuantities).names ) listOfVolumeIntegratedParamsToScale = obj.p.paramDefs.atLocation(ParamLocation.VOLUME_INTEGRATED).since( SINCE_LAST_GEOMETRY_TRANSFORMATION ) listOfVolumeIntegratedParamsToScale = listOfVolumeIntegratedParamsToScale.names if paramsToScaleSubset: listOfVolumeIntegratedParamsToScale = [ pn for pn in paramsToScaleSubset if pn in listOfVolumeIntegratedParamsToScale ] return (listOfVolumeIntegratedParamsToScale, fluxParamsToScale) def _scaleParamsInBlock(b, bSymmetric, completeListOfParamsToScale): """Scale volume-integrated params to include their identical symmetric assemblies.""" listOfVolumeIntegratedParamsToScale, fluxParamsToScale = completeListOfParamsToScale for paramName in [pn for pn in listOfVolumeIntegratedParamsToScale if np.any(b.p[pn])]: runLog.debug( "Scaling {} in symmetric identical assemblies".format(paramName), single=True, ) if paramName in fluxParamsToScale: _scaleFluxValues(b, bSymmetric, paramName) # updated volume weighted fluxes else: b.p[paramName] = b.p[paramName] + bSymmetric.p[paramName] def _scaleFluxValues(b, bSymmetric, paramName): totalVol = b.getVolume() + bSymmetric.getVolume() b.p[paramName] = [f + fSymmetric for f, fSymmetric in zip(b.p[paramName], bSymmetric.p[paramName])] newTotalFlux = sum(b.p[paramName]) / totalVol if paramName == "mgFlux": b.p.flux = newTotalFlux elif paramName == "adjMgFlux": b.p.fluxAdj = newTotalFlux elif paramName == "mgFluxGamma": b.p.fluxGamma = newTotalFlux ================================================ FILE: armi/reactor/converters/meshConverters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mesh specifiers update the mesh structure of a reactor by increasing or decreasing the number of mesh coordinates.""" import collections import itertools import math import numpy as np from armi import runLog from armi.reactor import grids from armi.utils import units class MeshConverter: """ Base class for the reactor mesh conversions. Parameters ---------- converterSettings : dict A set of str, value settings used in mesh conversion. Required settings are implementation specific. """ def __init__(self, converterSettings: dict): self._converterSettings = converterSettings def generateMesh(self, r=None): raise NotImplementedError def writeMeshData(self): raise NotImplementedError class RZThetaReactorMeshConverter(MeshConverter): """ Handles mesh conversions for r-z-theta reactor geometries. Attributes ---------- converterSettings: dict This is a dictionary of settings that are used for the RZThetaReactorMeshConverter. Required converter settings: ``uniformThetaMesh``,``thetaBins`` See Also -------- RZThetaReactorMeshConverterByRingCompositionAxialBins RZThetaReactorMeshConverterByRingCompositionAxialCoordinates """ def __init__(self, converterSettings): MeshConverter.__init__(self, converterSettings) self._useUniformThetaMesh = None self._numThetaMeshBins = None self._axialSegsPerBin = None self._ringsPerBin = None self._numRingsInCore = None self._assemsInCore = None self._coreAxialMeshCoords = None self.radialMesh = None self.axialMesh = None self.thetaMesh = None self.numRingBins = None self.numAxialMeshBins = None self.numThetaMeshBins = None def generateMesh(self, r=None): core = r.core converterSettings = self._converterSettings self._useUniformThetaMesh = converterSettings["uniformThetaMesh"] self._numThetaMeshBins = converterSettings["thetaBins"] self._converterSettings = converterSettings self._numRingsInCore = core.getNumHexRings() self._assemsInCore = core.getAssemblies() self._coreAxialMeshCoords = core.findAllAxialMeshPoints(applySubMesh=False) self.setAxialMesh(core) self._checkAxialMeshList() self.setThetaMesh() self._checkThetaMeshList() self.setRingsToConvert(core) self._checkRingList(core) self.numRingBins = len(self.radialMesh) self.numAxialMeshBins = len(self.axialMesh) self.numThetaMeshBins = len(self.thetaMesh) self.writeMeshData() # Build mesh reactor mesh # thetaMesh doesn't include the zero point so add it back in. # axial mesh is handled on assemblies so make this 2-D. mesh = grids.ThetaRZGrid(bounds=([0.0] + self.thetaMesh, self.radialMesh, (0.0, 0.0))) return mesh def writeMeshData(self): """ Write a summary table of the radial, axial, and theta bins that will be used for geometry conversion. Notes ----- This should be on the ``ThetaRZGrid`` object. """ binCombinations = self.numRingBins * self.numAxialMeshBins * self.numThetaMeshBins runLog.info("Total mesh bins (r, z, theta): {0}".format(binCombinations)) runLog.info( " Radial bins: {}\n Axial bins: {}\n Theta bins: {}".format( self.numRingBins, self.numAxialMeshBins, self.numThetaMeshBins ) ) self._writeMeshLogData() def _writeMeshLogData(self): self._logMeshData(self.radialMesh, "Radial ring indices:", "int") self._logMeshData(self.axialMesh, "Axial mesh coordinates:", "float") self._logMeshData(self.thetaMesh, "Theta mesh coordinates:", "float") def _logMeshData(self, listType, listName, listDataType): if listDataType == "float": listType = ["{:<8.3f}".format(floatValue) for floatValue in listType] runLog.extra("{0} {1}".format(listName, listType)) def setRingsToConvert(self, core): raise NotImplementedError def setAxialMesh(self, core): raise NotImplementedError def setThetaMesh(self): """Generate a uniform theta mesh in radians.""" if self._useUniformThetaMesh is None: raise ValueError("useUniformThetaMesh setting was not specified in the converterSettings") if self._numThetaMeshBins is None: raise ValueError("numThetaMeshBins were specified in the converterSettings") if self._useUniformThetaMesh: self._generateUniformThetaMesh() else: self._generateNonUniformThetaMesh() def _generateUniformThetaMesh(self): """Create a uniform theta mesh over 2*pi using the user specified number of theta bins.""" self.thetaMesh = list(np.linspace(0, 2 * math.pi, self._numThetaMeshBins + 1)[1:]) def _generateNonUniformThetaMesh(self): raise NotImplementedError("Non-uniform theta mesh not implemented. Use uniform theta mesh.") def _checkRingList(self, core): """Check for any errors in the radial rings.""" minRingNum = 1 self.radialMesh = sorted(self.radialMesh) rings = checkLastValueInList(self.radialMesh, "rings", self._numRingsInCore + 1, adjustLastValue=True) maxAssemsInOuterRing = core.getMaxAssembliesInHexRing(self._numRingsInCore) assemsInOuterRing = len(core.getAssembliesInSquareOrHexRing(self._numRingsInCore)) if (maxAssemsInOuterRing - assemsInOuterRing) > 0 and len(self.thetaMesh) > 1: self._combineLastTwoRadialBins() checkListBounds(rings, "rings", minRingNum, self._numRingsInCore + 1) def _combineLastTwoRadialBins(self): if (self.radialMesh[-1] - self.radialMesh[-2]) == 1: runLog.extra( "Outermost ring of the core {} is not fully filled and will be homogenized with the " "previous ring {}".format(self.radialMesh[-1], self.radialMesh[-2]) ) self.radialMesh.pop(-1) self.radialMesh.pop(-2) self.radialMesh.append(self.radialMesh[-1]) def _checkAxialMeshList(self): """Check for errors in the axial mesh coordinates.""" minAxialCoordInReactor = self._coreAxialMeshCoords[0] maxAxialCoordInReactor = self._coreAxialMeshCoords[-1] self.axialMesh = sorted(set(self.axialMesh)) checkListBounds(self.axialMesh, "axialMesh", minAxialCoordInReactor, maxAxialCoordInReactor) self.axialMesh = checkLastValueInList(self.axialMesh, "axialMesh", maxAxialCoordInReactor, adjustLastValue=True) def _checkThetaMeshList(self): """Check for errors in the theta mesh coordinates.""" self.thetaMesh = sorted(set(self.thetaMesh)) checkListBounds(self.thetaMesh, "thetaMesh", 0.0, 2 * math.pi) self.thetaMesh = checkLastValueInList(self.thetaMesh, "axialMesh", 2 * math.pi) class _RZThetaReactorMeshConverterByAxialCoordinates(RZThetaReactorMeshConverter): """Generate an axial mesh based on user provided axial mesh coordinates.""" def setAxialMesh(self, core): """Set up the reactor's new radial rings based on a user-specified axial coordinate list (axial mesh).""" self.axialMesh = self._converterSettings["axialMesh"] class _RZThetaReactorMeshConverterByAxialBins(RZThetaReactorMeshConverter): """ Generate an axial mesh based on user provided axial bins. Notes ----- The new mesh structure is formed by merging multiply "bins" together (i.e. numPerBin = 2 and the original mesh is [1, 2, 3, 4, 5, 6, 7, 8], the new mesh structure will be [2, 4, 6, 8]). """ def setAxialMesh(self, core): """ Set up axial mesh coordinates using user-specified number of axial segments per bins. Notes ----- Example: Original core axial mesh list - [25.0, 50.0, 75.0, 100.0, 175.0] cm axialSegsPerBin = 2 Merged core axial mesh list - [50.0, 100.0, 175.0] cm """ self._axialSegsPerBin = self._converterSettings["axialSegsPerBin"] self._mergeAxialMeshByAxialSegsPerBin() def _mergeAxialMeshByAxialSegsPerBin(self): axialStartNum = 0 totalAxialSegsInCore = len(self._coreAxialMeshCoords) - 1 axialMeshIndices = generateBins(totalAxialSegsInCore, self._axialSegsPerBin, axialStartNum) self.axialMesh = [0] * len(axialMeshIndices) for axialMeshIndex, locIndex in enumerate(axialMeshIndices): self.axialMesh[axialMeshIndex] = self._coreAxialMeshCoords[locIndex] class _RZThetaReactorMeshConverterByAxialFlags(RZThetaReactorMeshConverter): """Generate an axial mesh based on examining the block flags axially across the core.""" def setAxialMesh(self, core): """ Generate an axial mesh based on examining the block flags axially across the core. Notes ----- This approach is useful as it will create the largest material regions possible to minimize number of axially regions within the converted reactor core. This class not only looks at the block flags axially, but will add new mesh points for regions where the blocks of the same flag differ by XSID. """ axialMeshCoordinates = collections.defaultdict(set) for a in core.getAssemblies(): blockFlags = set([(b.p.flags, b.getMicroSuffix()) for b in a]) for flags, xsID in blockFlags: meshes = [] for b in a.iterBlocks(flags): # Skip this block if it has a different XS ID than the # current target. if b.getMicroSuffix() != xsID: continue # Neglect any zero mesh points as zero points are implicit if b.p.zbottom != 0.0: meshes.append(round(b.p.zbottom, units.FLOAT_DIMENSION_DECIMALS)) if b.p.ztop != 0.0: meshes.append(round(b.p.ztop, units.FLOAT_DIMENSION_DECIMALS)) axialMeshCoordinates[a].add(min(meshes)) axialMeshCoordinates[a].add(max(meshes)) self.axialMesh = sorted(set(itertools.chain(*axialMeshCoordinates.values()))) class _RZThetaReactorMeshConverterByRingComposition(RZThetaReactorMeshConverter): """Generate a new mesh based on the radial compositions in the core.""" def __init__(self, cs): RZThetaReactorMeshConverter.__init__(self, cs) self._ringCompositions = None def setRingsToConvert(self, core): """Set up the reactor's new radial rings based on the ring compositions (assembly types).""" self.radialMesh, self._ringCompositions = self._getCompositionTypesPerRing(core) def _getCompositionTypesPerRing(self, core): """Set composition of each ring in the reactor by the assembly type.""" ringIndices = [] ringCompositions = [] numRings = [r for r in range(1, self._numRingsInCore + 1)] for _i, ring in enumerate(numRings): # Note that this needs to be in a HEX ring - Circular ring mode # is not supported. assemsInRing = core.getAssembliesInSquareOrHexRing(ring) compsInRing = [] for a in assemsInRing: assemType = a.getType().lower() if assemType not in compsInRing: compsInRing.append(assemType) for c in compsInRing: ringIndices.append(ring + 1) ringCompositions.append(c) return ringIndices, ringCompositions def _checkRingList(self, core): """Check for initialization errors in the radial ring list provided by the user.""" minRingNum = 1 self.radialMesh = sorted(self.radialMesh) rings = checkLastValueInList(self.radialMesh, "rings", self._numRingsInCore + 1, adjustLastValue=True) checkListBounds(rings, "rings", minRingNum, self._numRingsInCore + 1) def _writeMeshLogData(self): radialIndices = [i + 1 for i in range(len(self.radialMesh))] self._logMeshData(radialIndices, "Radial ring indices:", "int") self._logMeshData(self._ringCompositions, "Radial ring compositions:", "str") self._logMeshData(self.axialMesh, "Axial mesh coordinates:", "float") self._logMeshData(self.thetaMesh, "Theta mesh coordinates:", "float") class RZThetaReactorMeshConverterByRingCompositionAxialBins( _RZThetaReactorMeshConverterByRingComposition, _RZThetaReactorMeshConverterByAxialBins, ): """ Generate a new mesh based on the radial compositions and axial bins in the core. See Also -------- _RZThetaReactorMeshConverterByRingComposition _RZThetaReactorMeshConverterByAxialBins """ pass class RZThetaReactorMeshConverterByRingCompositionAxialCoordinates( _RZThetaReactorMeshConverterByRingComposition, _RZThetaReactorMeshConverterByAxialCoordinates, ): """ Generate a new mesh based on the radial compositions and axial coordinates in the core. See Also -------- _RZThetaReactorMeshConverterByRingComposition _RZThetaReactorMeshConverterByAxialCoordinates """ pass class RZThetaReactorMeshConverterByRingCompositionAxialFlags( _RZThetaReactorMeshConverterByRingComposition, _RZThetaReactorMeshConverterByAxialFlags, ): """ Generate a new mesh based on the radial compositions and axial material (based on block flags) regions in the core. See Also -------- _RZThetaReactorMeshConverterByRingComposition _RZThetaReactorMeshConverterByAxialFlags """ pass def checkLastValueInList(inputList, listName, expectedValue, eps=0.001, adjustLastValue=False): """Check that the last value in the list is equal to the expected value within +/- eps.""" msg = "The last value in {} is {} and should be {}".format(listName, inputList[-1], expectedValue) if not np.isclose(inputList[-1], expectedValue, eps): if adjustLastValue: del inputList[-1] inputList.append(expectedValue) runLog.extra(msg) runLog.extra("Updating {} in {} to {}".format(inputList[-1], listName, expectedValue)) else: raise ValueError(msg) return inputList def checkListBounds(inputList, listName, minVal, maxVal, eps=0.001): """Ensure that each value in a list does not exceed the allowable bounds.""" for value in inputList: minDiff = value - minVal maxDiff = value - maxVal if minDiff < -eps or maxDiff > eps: raise ValueError( "Invalid values {} out of expected bounds {} to {}".format(listName, minVal - eps, maxVal + eps) ) def generateBins(totalNumDataPoints, numPerBin, minNum): """Fill in a list based on the total number of data points and the number of data points per bin.""" listToFill = [] if numPerBin >= totalNumDataPoints: listToFill.append(totalNumDataPoints) else: currentNum = 0 while currentNum < totalNumDataPoints: currentNum += numPerBin if currentNum > totalNumDataPoints: currentNum = totalNumDataPoints if currentNum > minNum: listToFill.append(currentNum) return listToFill ================================================ FILE: armi/reactor/converters/parameterSweeps/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Parameter Sweeps package.""" ================================================ FILE: armi/reactor/converters/parameterSweeps/generalParameterSweepConverters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for general core parameter sweeps.""" from armi.physics.neutronics.settings import ( CONF_EPS_EIG, CONF_EPS_FSAVG, CONF_EPS_FSPOINT, ) from armi.reactor.converters.geometryConverters import GeometryConverter class ParameterSweepConverter(GeometryConverter): """Abstract parameter sweep converter object.""" PRIORITY = None def __init__(self, cs, parameter): GeometryConverter.__init__(self, cs) self._parameter = parameter def convert(self, r=None): self._sourceReactor = r class SettingsModifier(ParameterSweepConverter): """Modifies basic setting parameters.""" def __init__(self, cs, settingToModify, parameter): ParameterSweepConverter.__init__(self, cs, parameter) self.modifier = settingToModify def convert(self, r=None): ParameterSweepConverter.convert(self, r) sType = self._cs.getSetting(self.modifier).underlyingType if sType is not type(None): # NOTE: this won't work with "new-style" settings related to the plugin system. # Using the type of the setting._default may be more appropriate if there are issues. self._cs = self._cs.modified(newSettings={self.modifier: sType(self._parameter)}) class NeutronicConvergenceModifier(ParameterSweepConverter): """Adjusts the neutronics convergence parameters.""" def convert(self, r=None): ParameterSweepConverter.convert(self, r) fs = 1.0e-12 + self._parameter * 1.0e-3 newSettings = {} newSettings[CONF_EPS_FSAVG] = fs newSettings[CONF_EPS_FSPOINT] = fs newSettings[CONF_EPS_EIG] = 1.0e-14 + self._parameter * 1.0e-4 self._cs = self._cs.modified(newSettings=newSettings) ================================================ FILE: armi/reactor/converters/parameterSweeps/tests/__init__.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/reactor/converters/parameterSweeps/tests/test_paramSweepConverters.py ================================================ # Copyright 2021 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module to test parameter sweep converters.""" import os import unittest from armi.physics.neutronics.settings import CONF_EPS_FSPOINT from armi.reactor.converters.parameterSweeps.generalParameterSweepConverters import ( NeutronicConvergenceModifier, ParameterSweepConverter, SettingsModifier, ) from armi.testing import loadTestReactor from armi.tests import TEST_ROOT THIS_DIR = os.path.dirname(__file__) class TestParamSweepConverters(unittest.TestCase): def setUp(self): self.o, self.r = loadTestReactor(TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml") self.cs = self.o.cs def test_paramSweepConverter(self): """Basic test of the param sweep converter.""" con = ParameterSweepConverter(self.cs, "FakeParam") self.assertEqual(con._parameter, "FakeParam") con.convert(self.r) self.assertEqual(con._sourceReactor, self.r) def test_neutronicConvergenceModifier(self): """Super basic test of the Neutronic Convergence Modifier.""" custom = NeutronicConvergenceModifier(self.cs, 1000) self.assertEqual(custom._parameter, 1000) custom.convert(self.r) self.assertAlmostEqual(custom._cs[CONF_EPS_FSPOINT], 1, delta=1e-3) def test_settingsModifier(self): """Super basic test of the Settings Modifier.""" con = SettingsModifier(self.cs, "comment", "FakeParam") self.assertEqual(con._parameter, "FakeParam") con.convert(self.r) self.assertEqual(con._sourceReactor, self.r) # NOTE: Settings objects are not modified, but we point to new objects self.assertIn("Simple test input", self.cs["comment"]) self.assertEqual(con._cs["comment"], "FakeParam") ================================================ FILE: armi/reactor/converters/pinTypeBlockConverters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities that perturb specific types of block objects. This code is relatively design-specific and will only work given certain object designs. At the moment it only works on Block objects. Notes ----- These were once Block method and were moved here as part of an ongoing effort to remove design-specific assumptions from the reactor model. These operations are shared by code that modifies objects in place during runtime and also for inputModifiers that change inputs for parameter sweeping. """ import math from armi import runLog from armi.reactor.flags import Flags def adjustSmearDensity(obj, value, bolBlock=None): r""" Modifies the *cold* smear density of a fuel pin by adding or removing fuel dimension. Adjusts fuel dimension while keeping cladding ID constant sd = fuel_r**2/clad_ir**2 =(fuel_od/2)**2 / (clad_id/2)**2 = fuel_od**2 / clad_id**2 new fuel_od = sqrt(sd*clad_id**2) useful for optimization cases Parameters ---------- value : float new smear density as a fraction. This fraction must evaluate between 0.0 and 1.0 bolBlock : Block, optional See completeInitialLoading. Required for ECPT cases """ if value <= 0.0 or value > 1.0: raise ValueError("Cannot modify smear density of {0} to {1}. Must be a positive fraction".format(obj, value)) fuel = obj.getComponent(Flags.FUEL) if not fuel: runLog.warning( "Cannot modify smear density of {0} because it is not fuel".format(obj), single=True, label="adjust smear density", ) return clad = obj.getComponent(Flags.CLAD) cladID = clad.getDimension("id", cold=True) fuelID = fuel.getDimension("id", cold=True) if fuelID > 0.0: # Annular fuel (Adjust fuel ID to get new smear density) fuelOD = fuel.getDimension("od", cold=True) newID = fuelOD * math.sqrt(1.0 - value) fuel.setDimension("id", newID) else: # Slug fuel (Adjust fuel OD to get new smear density) newOD = math.sqrt(value * cladID**2) fuel.setDimension("od", newOD) # update things like hm at BOC and smear density parameters. obj.completeInitialLoading(bolBlock=bolBlock) def adjustCladThicknessByOD(obj, value): """Modifies the cladding thickness by adjusting the cladding outer diameter.""" clad = _getCladdingComponentToModify(obj, value) if clad is None: return innerDiam = clad.getDimension("id", cold=True) clad.setDimension("od", innerDiam + 2.0 * value) def adjustCladThicknessByID(obj, value): """ Modifies the cladding thickness by adjusting the cladding inner diameter. Notes ----- This WILL adjust the fuel smear density """ clad = _getCladdingComponentToModify(obj, value) if clad is None: return od = clad.getDimension("od", cold=True) clad.setDimension("id", od - 2.0 * value) def _getCladdingComponentToModify(obj, value): clad = obj.getComponent(Flags.CLAD) if not clad: runLog.warning("{} does not have a cladding component to modify.".format(obj)) if value < 0.0: raise ValueError("Cannot modify {} on {} due to a negative modifier {}".format(clad, obj, value)) return clad ================================================ FILE: armi/reactor/converters/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/reactor/converters/tests/test_assemblyAxialLinkage.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io from typing import TYPE_CHECKING, Callable, Type from unittest import TestCase from armi.reactor.assemblies import HexAssembly, grids from armi.reactor.blocks import HexBlock from armi.reactor.blueprints import Blueprints from armi.reactor.components import UnshapedComponent from armi.reactor.components.basicShapes import Circle, Hexagon, Rectangle from armi.reactor.components.complexShapes import Helix from armi.reactor.converters.axialExpansionChanger.assemblyAxialLinkage import ( AssemblyAxialLinkage, AxialLink, _checkOverlap, ) from armi.reactor.converters.tests.test_axialExpansionChanger import ( AxialExpansionTestBase, _buildDummySodium, buildTestAssemblyWithFakeMaterial, ) from armi.reactor.flags import Flags from armi.settings.caseSettings import Settings if TYPE_CHECKING: from armi.reactor.components import Component TWOPIN_BLOCK = """ fuel twoPin: &block_fuel_twoPin grid name: twoPin fuel 1: &component_fueltwoPin shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 od: 0.8 latticeIDs: [1] fuel 2: <<: *component_fueltwoPin latticeIDs: [2] coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 16.0 mult: 1.0 op: 16.6 """ ONEPIN_BLOCK = """ fuel onePin: &block_fuel_onePin grid name: onePin fuel 1: <<: *component_fueltwoPin coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 16.0 mult: 1.0 op: 16.6 """ CORRECT_ASSEMBLY = """ fuel pass: specifier: LA blocks: [*block_fuel_twoPin, *block_fuel_twoPin] height: [25.0, 25.0] axial mesh points: [1, 1] xs types: [A, A] """ WRONG_ASSEMBLY = """ fuel fail: specifier: LA blocks: [*block_fuel_twoPin, *block_fuel_onePin] height: [25.0, 25.0] axial mesh points: [1, 1] xs types: [A, A] """ TWOPIN_GRID = """ twoPin: geom: hex_corners_up symmetry: full lattice map: | - - - 1 1 1 1 - - 1 1 2 1 1 - 1 1 1 1 1 1 1 2 1 2 1 2 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 """ ONEPIN_GRID = """ onePin: geom: hex_corners_up symmetry: full lattice map: | - - - 1 1 1 1 - - 1 1 1 1 1 - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 """ def createMultipinBlueprints(blockDef: list[str], assemDef: list[str], gridDef: list[str]) -> str: multiPinDef = "blocks:" for block in blockDef: multiPinDef += block multiPinDef += "\nassemblies:" for assem in assemDef: multiPinDef += assem multiPinDef += "\ngrids:" for grid in gridDef: multiPinDef += grid return multiPinDef class TestAxialLinkHelper(TestCase): """Tests for the AxialLink dataclass / namedtuple like class.""" @classmethod def setUpClass(cls): cls.LOWER_BLOCK = _buildDummySodium(98, 10) def test_override(self): """Test lower attribute can be set after construction.""" empty = AxialLink() self.assertIsNone(empty.lower) empty.lower = self.LOWER_BLOCK self.assertIs(empty.lower, self.LOWER_BLOCK) def test_construct(self): """Test lower attributes can be set at construction.""" link = AxialLink(self.LOWER_BLOCK) self.assertIs(link.lower, self.LOWER_BLOCK) class TestAreAxiallyLinked(AxialExpansionTestBase): """Provide test coverage for the different cases in assemblyAxialLinkage.areAxiallyLinked.""" def test_mismatchComponentType(self): """Case 4; component type mismatch.""" compDims = ("test", "FakeMat", 25.0, 25.0) # name, material, Tinput, Thot comp1 = Circle(*compDims, od=1.0, id=0.0) comp2 = Hexagon(*compDims, op=1.0, ip=0.0) self.assertFalse(AssemblyAxialLinkage.areAxiallyLinked(comp1, comp2)) def test_unshapedComponents(self): """Case 1; unshaped components.""" compDims = {"Tinput": 25.0, "Thot": 25.0} comp1 = UnshapedComponent("unshaped_1", "FakeMat", **compDims) comp2 = UnshapedComponent("unshaped_2", "FakeMat", **compDims) self.assertFalse(AssemblyAxialLinkage.areAxiallyLinked(comp1, comp2)) def test_componentMult(self): """Case 3; multiplicity based linking.""" compDims = ("test", "FakeMat", 25.0, 25.0) comp1 = Circle(*compDims, od=1.0, id=0.0) comp2 = Circle(*compDims, od=1.0, id=0.0) # mult are same, comp1 and comp2 are linked self.assertTrue(AssemblyAxialLinkage.areAxiallyLinked(comp1, comp2)) # mult is different, now they are not linked comp2.p.mult = 2 self.assertFalse(AssemblyAxialLinkage.areAxiallyLinked(comp1, comp2)) def test_multiIndexLocation(self): """Case 2; block-grid based linking.""" cs = Settings() multiPinBPs = createMultipinBlueprints([TWOPIN_BLOCK], [CORRECT_ASSEMBLY], [TWOPIN_GRID]) with io.StringIO(multiPinBPs) as stream: bps = Blueprints.load(stream) bps._prepConstruction(cs) lowerB: HexBlock = bps.assemblies["fuel pass"][0] upperB: HexBlock = bps.assemblies["fuel pass"][1] lowerFuel1, lowerFuel2 = lowerB.getComponents(Flags.FUEL) upperFuel1, _upperFuel2 = upperB.getComponents(Flags.FUEL) # same grid locs, are linked self.assertTrue(AssemblyAxialLinkage.areAxiallyLinked(lowerFuel1, upperFuel1)) # different grid locs, are not linked self.assertFalse(AssemblyAxialLinkage.areAxiallyLinked(lowerFuel2, upperFuel1)) def test_multiIndexLocation_Fail(self): """Case 2; block-grid based linking.""" cs = Settings() multiPinBPs = createMultipinBlueprints( [TWOPIN_BLOCK, ONEPIN_BLOCK], [WRONG_ASSEMBLY], [TWOPIN_GRID, ONEPIN_GRID] ) with io.StringIO(multiPinBPs) as stream: bps = Blueprints.load(stream) bps._prepConstruction(cs) lowerB: HexBlock = bps.assemblies["fuel fail"][0] upperB: HexBlock = bps.assemblies["fuel fail"][1] lowerFuel1, lowerFuel2 = lowerB.getComponents(Flags.FUEL) upperFuel1 = upperB.getComponent(Flags.FUEL) # different/not exact match grid locs, are not linked self.assertFalse(AssemblyAxialLinkage.areAxiallyLinked(lowerFuel1, upperFuel1)) # different/not exact match grid locs, are not linked self.assertFalse(AssemblyAxialLinkage.areAxiallyLinked(lowerFuel2, upperFuel1)) class TestCheckOverlap(AxialExpansionTestBase): """Test axial linkage between components via the AssemblyAxialLinkage._checkOverlap.""" def setUp(self): """Contains common dimensions for all component class types.""" super().setUp() self.common = ("test", "FakeMat", 25.0, 25.0) # name, material, Tinput, Thot def runTest( self, componentsToTest: dict[Type["Component"], dict[str, float]], assertion: Callable, ): """Runs various linkage tests. Parameters ---------- componentsToTest dictionary keys indicate the component type for ``typeA`` and ``typeB`` checks. the values indicate the neccessary geometry specifications of the ``typeA`` and ``typeB`` components. assertion unittest.TestCase assertion Notes ----- - components "typeA" and "typeB" are assumed to be candidates for axial linking - two assertions: 1) comparing "typeB" component to "typeA"; 2) comparing "typeA" component to "typeB" - the different assertions are particularly useful for comparing two annuli """ for method, dims in componentsToTest.items(): typeA = method(*self.common, **dims[0]) typeB = method(*self.common, **dims[1]) msg = f"{self._testMethodName} failed for component type {str(method)}!" assertion(_checkOverlap(typeA, typeB), msg=msg) assertion(_checkOverlap(typeB, typeA), msg=msg) def test_overlappingSolidPins(self): componentTypesToTest = { Circle: [{"od": 0.5, "id": 0.0}, {"od": 1.0, "id": 0.0}], Hexagon: [{"op": 0.5, "ip": 0.0}, {"op": 1.0, "ip": 0.0}], Rectangle: [ { "lengthOuter": 0.5, "lengthInner": 0.0, "widthOuter": 0.5, "widthInner": 0.0, }, { "lengthOuter": 1.0, "lengthInner": 0.0, "widthOuter": 1.0, "widthInner": 0.0, }, ], Helix: [ {"od": 0.5, "axialPitch": 1.0, "helixDiameter": 1.0}, {"od": 1.0, "axialPitch": 1.0, "helixDiameter": 1.0}, ], } self.runTest(componentTypesToTest, self.assertTrue) def test_solidPinNotOverlappingAnnulus(self): componentTypesToTest = { Circle: [{"od": 0.5, "id": 0.0}, {"od": 1.0, "id": 0.6}], } self.runTest(componentTypesToTest, self.assertFalse) def test_solidPinOverlappingWithAnnulus(self): componentTypesToTest = { Circle: [{"od": 0.7, "id": 0.0}, {"od": 1.0, "id": 0.6}], } self.runTest(componentTypesToTest, self.assertTrue) def test_annularPinNotOverlappingWithAnnulus(self): componentTypesToTest = { Circle: [{"od": 0.6, "id": 0.3}, {"od": 1.0, "id": 0.6}], } self.runTest(componentTypesToTest, self.assertFalse) def test_annularPinOverlappingWithAnnuls(self): componentTypesToTest = { Circle: [{"od": 0.7, "id": 0.3}, {"od": 1.0, "id": 0.6}], } self.runTest(componentTypesToTest, self.assertTrue) def test_thinPinOverlapThickAnnulus(self): """Thin annular Pin overlapping with this annulus.""" componentTypesToTest = {Circle: [{"od": 0.7, "id": 0.3}, {"od": 0.6, "id": 0.5}]} self.runTest(componentTypesToTest, self.assertTrue) def test_AnnularHexOverlappingThickAnnularHex(self): componentTypesToTest = {Hexagon: [{"op": 1.0, "ip": 0.8}, {"op": 1.2, "ip": 0.8}]} self.runTest(componentTypesToTest, self.assertTrue) class TestMultipleComponentLinkage(AxialExpansionTestBase): """Ensure that multiple component axial linkage can be caught.""" def test_getLinkedComponents(self): """Test for multiple component axial linkage.""" linked = AssemblyAxialLinkage(buildTestAssemblyWithFakeMaterial("FakeMat")) b = linked.a.getFirstBlockByType("fuel") fuelComp = b.getComponent(Flags.FUEL) cladComp = b.getComponent(Flags.CLAD) fuelComp.setDimension("od", 0.5 * (cladComp.getDimension("id") + cladComp.getDimension("od"))) with self.assertRaisesRegex( RuntimeError, expected_regex="Multiple component axial linkages have been found for ", ): linked._getLinkedComponents(b, fuelComp) class TestBlockLink(TestCase): """Test the ability to link blocks in an assembly.""" def test_singleBlock(self): """Test an edge case where a single block exists.""" b = _buildDummySodium(300, 50) links = AssemblyAxialLinkage.getLinkedBlocks([b]) self.assertEqual(len(links), 1) self.assertIn(b, links) linked = links.pop(b) self.assertIsNone(linked.lower) def test_multiBlock(self): """Test links with multiple blocks.""" N_BLOCKS = 5 blocks = [_buildDummySodium(300, 50) for _ in range(N_BLOCKS)] links = AssemblyAxialLinkage.getLinkedBlocks(blocks) first = blocks[0] lowLink = links[first] self.assertIsNone(lowLink.lower) for ix in range(1, N_BLOCKS - 1): current = blocks[ix] below = blocks[ix - 1] link = links[current] self.assertIs(link.lower, below) top = blocks[-1] lastLink = links[top] self.assertIs(lastLink.lower, blocks[-2]) def test_emptyBlocks(self): """Test even smaller edge case when no blocks are passed.""" with self.assertRaisesRegex(ValueError, "No blocks passed. Cannot determine links"): AssemblyAxialLinkage.getLinkedBlocks([]) def test_onAssembly(self): """Test assembly behavior is the same as sequence of blocks.""" assembly = HexAssembly("test") N_BLOCKS = 5 assembly.spatialGrid = grids.AxialGrid.fromNCells(numCells=N_BLOCKS) assembly.spatialGrid.armiObject = assembly blocks = [] for _ in range(N_BLOCKS): b = _buildDummySodium(300, 10) assembly.add(b) blocks.append(b) fromBlocks = AssemblyAxialLinkage.getLinkedBlocks(blocks) fromAssem = AssemblyAxialLinkage.getLinkedBlocks(assembly) self.assertSetEqual(set(fromBlocks), set(fromAssem)) for b, bLink in fromBlocks.items(): aLink = fromAssem[b] self.assertIs(aLink.lower, bLink.lower) ================================================ FILE: armi/reactor/converters/tests/test_axialExpansionChanger.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test axialExpansionChanger.""" import collections import copy import os import unittest from statistics import mean from typing import Callable from numpy import array, linspace, zeros from armi import materials from armi.materials import _MATERIAL_NAMESPACE_ORDER, custom from armi.reactor.assemblies import HexAssembly, grids from armi.reactor.blocks import HexBlock from armi.reactor.components import Component, DerivedShape from armi.reactor.components.basicShapes import Circle, Hexagon from armi.reactor.converters.axialExpansionChanger import ( AssemblyAxialLinkage, AxialExpansionChanger, ExpansionData, getSolidComponents, iterSolidComponents, ) from armi.reactor.flags import Flags from armi.testing import loadTestReactor from armi.tests import TEST_ROOT from armi.utils import units from armi.utils.customExceptions import InputError class AxialExpansionTestBase(unittest.TestCase): """Common methods and variables for unit tests.""" Steel_Component_Lst = [ Flags.DUCT, Flags.GRID_PLATE, Flags.HANDLING_SOCKET, Flags.INLET_NOZZLE, Flags.CLAD, Flags.WIRE, Flags.ACLP, Flags.GUIDE_TUBE, ] @classmethod def setUpClass(cls): cls.origNameSpace = _MATERIAL_NAMESPACE_ORDER # set namespace order for materials so that fake HT9 material can be found materials.setMaterialNamespaceOrder( [ "armi.reactor.converters.tests.test_axialExpansionChanger", "armi.materials", ] ) def setUp(self): self.obj = AxialExpansionChanger() self.componentMass = collections.defaultdict(list) self.componentDensity = collections.defaultdict(list) self.totalAssemblySteelMass = [] self.blockZtop = collections.defaultdict(list) @classmethod def tearDownClass(cls): # reset global namespace materials.setMaterialNamespaceOrder(cls.origNameSpace) def _getConservationMetrics(self, a): """Retrieves and stores various conservation metrics. - useful for verification and unittesting - Finds and stores: 1. mass and density of target components 2. mass of assembly steel 3. block heights """ totalSteelMass = 0.0 for b in a: # store block ztop self.blockZtop[b].append(b.p.ztop) for c in iterSolidComponents(b): # store mass and density of component self.componentMass[c].append(c.getMass()) self.componentDensity[c].append(c.material.getProperty("density", c.temperatureInK)) # store steel mass for assembly if c.p.flags in self.Steel_Component_Lst: totalSteelMass += c.getMass() self.totalAssemblySteelMass.append(totalSteelMass) class Temperature: """Create and store temperature grid/field.""" def __init__( self, L, coldTemp=100.0, hotInletTemp=360.0, numTempGridPts=25, tempSteps=100, uniform=False, ): """ Parameters ---------- L : float length of self.tempGrid. Should be the height of the corresponding assembly. coldTemp : float component as-built temperature hotInletTemp : float temperature closest to bottom of assembly. Interpreted as inlet temp at nominal operations. numTempGridPts : integer the number of temperature measurement locations along the z-axis of the assembly tempSteps : integer the number of temperatures to create (analogous to time steps) """ self.tempSteps = tempSteps self.tempGrid = linspace(0.0, L, num=numTempGridPts) self.tempField = zeros((tempSteps, numTempGridPts)) self._generateTempField(coldTemp, hotInletTemp, uniform) def _generateTempField(self, coldTemp, hotInletTemp, uniform): """Generate temperature field and grid. - all temperatures are in C - temperature field : temperature readings (e.g., from T/H calculation) - temperature grid : physical locations in which temperature is measured """ # Generate temp field self.tempField[0, :] = coldTemp if not uniform: for i in range(1, self.tempSteps): self.tempField[i, :] = ( coldTemp + (i + 1) / (self.tempSteps / 3) * self.tempGrid + (hotInletTemp - coldTemp) * (i + 1) / self.tempSteps ) else: tmp = linspace(coldTemp, hotInletTemp, self.tempSteps) for i in range(1, self.tempSteps): self.tempField[i, :] = tmp[i] class TestAxialExpansionHeight(AxialExpansionTestBase): """Verify that test assembly is expanded correctly.""" def setUp(self): super().setUp() self.a = buildTestAssemblyWithFakeMaterial(name="FakeMat") self.temp = Temperature(self.a.getTotalHeight(), numTempGridPts=11, tempSteps=10) # get the right/expected answer self._generateComponentWiseExpectedHeight() # do the axial expansion for idt in range(self.temp.tempSteps): self.obj.performThermalAxialExpansion(self.a, self.temp.tempGrid, self.temp.tempField[idt, :], setFuel=True) self._getConservationMetrics(self.a) def test_AssemblyAxialExpansionHeight(self): """Test the axial expansion gives correct heights for component-based expansion.""" for idt in range(self.temp.tempSteps): for ib, b in enumerate(self.a): self.assertAlmostEqual( self.trueZtop[ib, idt], self.blockZtop[b][idt], places=7, msg=f"Block height is not correct. {b}; Temp Step = {idt}", ) def _generateComponentWiseExpectedHeight(self): """Calculate the expected height, external of AssemblyAxialExpansion.""" assem = buildTestAssemblyWithFakeMaterial(name="FakeMat") aveBlockTemp = zeros((len(assem), self.temp.tempSteps)) self.trueZtop = zeros((len(assem), self.temp.tempSteps)) self.trueHeight = zeros((len(assem), self.temp.tempSteps)) self.trueZtop[-1, :] = assem[-1].p.ztop for idt in range(self.temp.tempSteps): # get average block temp for ib in range(len(assem)): aveBlockTemp[ib, idt] = self._getAveTemp(ib, idt, assem) # get block ztops for ib, b in enumerate(assem[:-1]): if ib > 0: b.p.zbottom = assem[ib - 1].p.ztop if idt > 0: dll = (0.02 * aveBlockTemp[ib, idt] - 0.02 * aveBlockTemp[ib, idt - 1]) / ( 100.0 + 0.02 * aveBlockTemp[ib, idt - 1] ) thermExpansionFactor = 1.0 + dll b.p.ztop = thermExpansionFactor * b.p.height + b.p.zbottom self.trueZtop[ib, idt] = b.p.ztop # get block heights for ib, b in enumerate(assem): b.p.height = b.p.ztop - b.p.zbottom self.trueHeight[ib, idt] = b.p.height def _getAveTemp(self, ib, idt, assem): tmpMapping = [] for idz, z in enumerate(self.temp.tempGrid): if assem[ib].p.zbottom <= z <= assem[ib].p.ztop: tmpMapping.append(self.temp.tempField[idt][idz]) if z > assem[ib].p.ztop: break return mean(tmpMapping) class TestConservation(AxialExpansionTestBase): """Verify that conservation is maintained in assembly-level axial expansion.""" def setUp(self): super().setUp() self.a = buildTestAssemblyWithFakeMaterial(name="FakeMat") def expandAssemForMassConservationTest(self): """Do the thermal expansion and store conservation metrics of interest.""" # create a semi-realistic/physical variable temperature grid over the assembly temp = Temperature(self.a.getTotalHeight(), numTempGridPts=11, tempSteps=10) for idt in range(temp.tempSteps): self.obj.performThermalAxialExpansion( self.a, temp.tempGrid, temp.tempField[idt, :], ) self._getConservationMetrics(self.a) def test_thermExpansContractConserv_simple(self): """Thermally expand and then contract to ensure original state is recovered. .. test:: Thermally expand and then contract to ensure original assembly is recovered. :id: T_ARMI_AXIAL_EXP_THERM0 :tests: R_ARMI_AXIAL_EXP_THERM Notes ----- Temperature field is always isothermal and initially at 25 C. """ isothermalTempList = [100.0, 350.0, 250.0, 100.0] a = buildTestAssemblyWithFakeMaterial(name="HT9") origMesh = a.getAxialMesh()[:-1] origMasses, origNDens = self._getComponentMassAndNDens(a) origDetailedNDens = self._setComponentDetailedNDens(a, origNDens) axialExpChngr = AxialExpansionChanger(detailedAxialExpansion=True) tempGrid = linspace(0.0, a.getHeight()) for temp in isothermalTempList: # compute expected change in number densities c = a[0][0] radialGrowthFrac = c.material.getThermalExpansionDensityReduction( prevTempInC=c.temperatureInC, newTempInC=temp ) axialGrowthFrac = c.getThermalExpansionFactor(T0=c.temperatureInC, Tc=temp) totGrowthFrac = axialGrowthFrac / radialGrowthFrac # Set new isothermal temp and expand tempField = array([temp] * len(tempGrid)) oldMasses, oldNDens = self._getComponentMassAndNDens(a) oldDetailedNDens = self._getComponentDetailedNDens(a) axialExpChngr.performThermalAxialExpansion(a, tempGrid, tempField) newMasses, newNDens = self._getComponentMassAndNDens(a) newDetailedNDens = self._getComponentDetailedNDens(a) self._checkMass(oldMasses, newMasses) self._checkNDens(oldNDens, newNDens, totGrowthFrac) self._checkDetailedNDens(oldDetailedNDens, newDetailedNDens, totGrowthFrac) # make sure that the assembly returned to the original state for orig, new in zip(origMesh, a.getAxialMesh()): self.assertAlmostEqual(orig, new, places=12) self._checkMass(origMasses, newMasses) self._checkNDens(origNDens, newNDens, 1.0) self._checkDetailedNDens(origDetailedNDens, newDetailedNDens, 1.0) def test_thermExpansContractionConserv_complex(self): """Thermally expand and then contract to ensure original state is recovered. Notes ----- Assemblies with liners are not supported and not considered for conservation testing. """ _oCold, rCold = loadTestReactor( os.path.join(TEST_ROOT, "detailedAxialExpansion"), customSettings={"inputHeightsConsideredHot": False}, ) assems = list(rCold.blueprints.assemblies.values()) for a in assems: if a.hasFlags([Flags.MIDDLE, Flags.ANNULAR]): # assemblies with the above flags have liners and conservation of them is not currently supported continue self.complexConservationTest(a) def complexConservationTest(self, a: HexAssembly): # get total assembly fluid mass pre-expansion preExpAssemFluidMass = self._getTotalAssemblyFluidMass(a) origMesh = a.getAxialMesh()[:-1] origMasses, origNDens = self._getComponentMassAndNDens(a) axialExpChngr = AxialExpansionChanger(detailedAxialExpansion=True) axialExpChngr.setAssembly(a) tempAdjust = [50.0, 50.0, -50.0, -50.0] for temp in tempAdjust: # adjust component temperatures by temp for b in a: if "control" in str(b): # skirting around a problem with test B4C temperature inputs continue for c in iterSolidComponents(b): axialExpChngr.expansionData.updateComponentTemp(c, c.temperatureInC + temp) # get U235/B10 and FE56 mass pre-expansion prevFE56Mass = a.getMass("FE56") if a.hasFlags([Flags.FUEL, Flags.CONTROL]): prevMass = a.getMass("U235" if a.hasFlags(Flags.FUEL) else "B10") # compute thermal expansion coeffs and expand axialExpChngr.expansionData.computeThermalExpansionFactors() axialExpChngr.axiallyExpandAssembly() # ensure that total U235/B10 and FE56 mass is conserved post-expansion newFE56Mass = a.getMass("FE56") self.assertAlmostEqual(newFE56Mass / prevFE56Mass, 1.0, places=14, msg=f"{a}") if a.hasFlags([Flags.FUEL, Flags.CONTROL]): newMass = a.getMass("U235" if a.hasFlags(Flags.FUEL) else "B10") self.assertAlmostEqual(newMass / prevMass, 1.0, places=14, msg=f"{a}") newMasses, newNDens = self._getComponentMassAndNDens(a) # make sure that the assembly returned to the original state for orig, new in zip(origMesh, a.getAxialMesh()): self.assertAlmostEqual(orig, new, places=12, msg=f"{a}") self._checkMass(origMasses, newMasses) self._checkNDens(origNDens, newNDens, 1.0) # get total assembly fluid mass post-expansion postExpAssemFluidMass = self._getTotalAssemblyFluidMass(a) # verify that the total assembly fluid mass is preserved through expansion self.assertAlmostEqual(preExpAssemFluidMass, postExpAssemFluidMass, places=11) def test_expansionContractionConservation(self): """Expand all components and then contract back to original state. .. test:: Expand all components and then contract back to original state. :id: T_ARMI_AXIAL_EXP_PRESC0 :tests: R_ARMI_AXIAL_EXP_PRESC Notes ----- - uniform expansion over all components within the assembly - 10 total expansion steps: 5 at +1.01 L1/L0, and 5 at -(1.01^-1) L1/L0 """ a = buildTestAssemblyWithFakeMaterial(name="FakeMat") axExpChngr = AxialExpansionChanger() origMesh = a.getAxialMesh() origMasses, origNDens = self._getComponentMassAndNDens(a) componentLst = [c for b in a for c in iterSolidComponents(b)] expansionGrowthFrac = 1.01 contractionGrowthFrac = 1.0 / expansionGrowthFrac for i in range(0, 10): if i < 5: growthFrac = expansionGrowthFrac fracLst = growthFrac + zeros(len(componentLst)) else: growthFrac = contractionGrowthFrac fracLst = growthFrac + zeros(len(componentLst)) oldMasses, oldNDens = self._getComponentMassAndNDens(a) # do the expansion axExpChngr.performPrescribedAxialExpansion(a, componentLst, fracLst, setFuel=True) newMasses, newNDens = self._getComponentMassAndNDens(a) self._checkMass(oldMasses, newMasses) self._checkNDens(oldNDens, newNDens, growthFrac) # make sure that the assembly returned to the original state for orig, new in zip(origMesh, a.getAxialMesh()): self.assertAlmostEqual(orig, new, places=13) self._checkMass(origMasses, newMasses) self._checkNDens(origNDens, newNDens, 1.0) def _checkMass(self, prevMass, newMass): for prev, new in zip(prevMass.values(), newMass.values()): self.assertAlmostEqual(prev, new, places=11) def _checkNDens(self, prevNDen, newNDens, ratio): for prevComp, newComp in zip(prevNDen.values(), newNDens.values()): self.assertEqual(len(prevComp), len(newComp)) for nuc in prevComp.keys(): # some ndens values are 0.0, only check non-zero values if prevComp[nuc]: self.assertAlmostEqual(prevComp[nuc] / newComp[nuc], ratio) def _checkDetailedNDens(self, prevDetailedNDen, newDetailedNDens, ratio): """Check whether the detailedNDens of two input dictionaries containing the detailedNDens arrays for all components of an assembly are conserved. """ for prevComp, newComp in zip(prevDetailedNDen.values(), newDetailedNDens.values()): for prev, new in zip(prevComp, newComp): if prev: self.assertAlmostEqual(prev / new, ratio, msg=f"{prev} / {new}") @staticmethod def _getComponentMassAndNDens(a): masses = {} nDens = {} for b in a: for c in iterSolidComponents(b): masses[c] = c.getMass() nDens[c] = c.getNumberDensities() return masses, nDens @staticmethod def _setComponentDetailedNDens(a, nDens): """Returns a dictionary that contains detailedNDens for all components in an assembly object input which are set to the corresponding component number densities from a number density dictionary input. """ detailedNDens = {} for b in a: for c in getSolidComponents(b): c.p.detailedNDens = copy.deepcopy([val for val in nDens[c].values()]) detailedNDens[c] = c.p.detailedNDens return detailedNDens @staticmethod def _getComponentDetailedNDens(a): """Returns a dictionary containing all solid components and their corresponding detailedNDens from an assembly object input. """ detailedNDens = {} for b in a: for c in getSolidComponents(b): detailedNDens[c] = copy.deepcopy(c.p.detailedNDens) return detailedNDens def test_targetComponentMassConservation(self): """Tests mass conservation for target components.""" self.expandAssemForMassConservationTest() for cName, masses in self.componentMass.items(): for i in range(1, len(masses)): self.assertAlmostEqual(masses[i], masses[i - 1], msg=f"{cName} mass not right") for cName, density in self.componentDensity.items(): for i in range(1, len(density)): self.assertLess(density[i], density[i - 1], msg=f"{cName} density not right.") for i in range(1, len(self.totalAssemblySteelMass)): self.assertAlmostEqual( self.totalAssemblySteelMass[i], self.totalAssemblySteelMass[i - 1], msg="Total assembly steel mass is not conserved.", ) def test_noMovementACLP(self): """Ensures the above core load pad (ACLP) does not move during fuel-only expansion. .. test:: Ensure the ACLP does not move during fuel-only expansion. :id: T_ARMI_AXIAL_EXP_PRESC1 :tests: R_ARMI_AXIAL_EXP_PRESC .. test:: Ensure the component volumes are correctly updated during prescribed expansion. :id: T_ARMI_AXIAL_EXP_PRESC2 :tests: R_ARMI_AXIAL_EXP_PRESC """ # build test assembly with ACLP assembly = HexAssembly("testAssemblyType") assembly.spatialGrid = grids.AxialGrid.fromNCells(numCells=1) assembly.spatialGrid.armiObject = assembly assembly.add(_buildTestBlock("shield", "FakeMat", 100.0, 10.0)) assembly.add(_buildTestBlock("fuel", "FakeMat", 100.0, 10.0)) assembly.add(_buildTestBlock("fuel", "FakeMat", 100.0, 10.0)) assembly.add(_buildTestBlock("plenum", "FakeMat", 100.0, 10.0, True)) assembly.add(_buildTestBlock("aclp", "FakeMat", 100.0, 10.0, True)) # "aclp plenum" also works assembly.add(_buildTestBlock("plenum", "FakeMat", 100.0, 10.0, True)) assembly.add(_buildDummySodium(100.0, 10.0)) assembly.calculateZCoords() assembly.reestablishBlockOrder() # get zCoords for aclp aclp = assembly.getChildrenWithFlags(Flags.ACLP)[0] aclpZTop = aclp.p.ztop aclpZBottom = aclp.p.zbottom # expand fuel # get fuel components cList = [c for b in assembly for c in b if c.hasFlags(Flags.FUEL)] # 1.01 L1/L0 growth of fuel components pList = zeros(len(cList)) + 1.01 chngr = AxialExpansionChanger() chngr.performPrescribedAxialExpansion(assembly, cList, pList, setFuel=True) # do assertion self.assertEqual( aclpZBottom, aclp.p.zbottom, msg="ACLP zbottom has changed. It should not with fuel component only expansion!", ) self.assertEqual( aclpZTop, aclp.p.ztop, msg="ACLP ztop has changed. It should not with fuel component only expansion!", ) # verify that the component volumes are correctly updated for b in assembly: for c in b: self.assertAlmostEqual( c.getArea() * b.getHeight(), c.getVolume(), places=12, ) @staticmethod def _getTotalAssemblyFluidMass(assembly) -> float: totalAssemblyFluidMass = 0.0 for b in assembly: for c in b: if isinstance(c.material, materials.material.Fluid): totalAssemblyFluidMass += c.getMass() return totalAssemblyFluidMass def test_reset(self): self.obj.setAssembly(self.a) self.obj.reset() self.assertIsNone(self.obj.linked) self.assertIsNone(self.obj.expansionData) def test_computeThermalExpansionFactors(self): """Ensure expansion factors are as expected.""" self.obj.setAssembly(self.a) stdThermExpFactor = {} newTemp = 500.0 # apply new temp to the pin and clad components of each block for b in self.a: for c in b.iterComponents([Flags.FUEL, Flags.CLAD]): stdThermExpFactor[c] = c.getThermalExpansionFactor() self.obj.expansionData.updateComponentTemp(c, newTemp) self.obj.expansionData.computeThermalExpansionFactors() # skip dummy block, it's just coolant and doesn't expand. for b in self.a[:-1]: for c in b: if c.hasFlags([Flags.FUEL, Flags.CLAD]): self.assertNotEqual( stdThermExpFactor[c], self.obj.expansionData.getExpansionFactor(c), msg=f"Block {b}, Component {c}, thermExpCoeff not right.\n", ) else: self.assertEqual( self.obj.expansionData.getExpansionFactor(c), 1.0, msg=f"Block {b}, Component {c}, thermExpCoeff not right.\n", ) class TestManageCoreMesh(unittest.TestCase): """Verify that manage core mesh unifies the mesh for detailedAxialExpansion: False.""" def setUp(self): self.axialExpChngr = AxialExpansionChanger() _o, self.r = loadTestReactor(os.path.join(TEST_ROOT, "detailedAxialExpansion")) self.oldAxialMesh = self.r.core.p.axialMesh self.componentLst = [] for b in self.r.core.refAssem: if b.hasFlags([Flags.FUEL, Flags.PLENUM]): self.componentLst.extend(getSolidComponents(b)) # expand refAssem by 1.01 L1/L0 expansionGrowthFracs = 1.01 + zeros(len(self.componentLst)) ( self.origDetailedNDens, self.origVolumes, ) = self._getComponentDetailedNDensAndVol(self.componentLst) self.axialExpChngr.performPrescribedAxialExpansion( self.r.core.refAssem, self.componentLst, expansionGrowthFracs, setFuel=True ) def test_manageCoreMesh(self): self.axialExpChngr.manageCoreMesh(self.r) newAxialMesh = self.r.core.p.axialMesh # all solid components in fuel + plenum block expand so the first three points are not expected to change for old, new in zip(self.oldAxialMesh[3:-1], newAxialMesh[3:-1]): self.assertLess(old, new) def test_componentConservation(self): self.axialExpChngr.manageCoreMesh(self.r) newDetailedNDens, newVolumes = self._getComponentDetailedNDensAndVol(self.componentLst) for c in newVolumes.keys(): self._checkMass( self.origDetailedNDens[c], self.origVolumes[c], newDetailedNDens[c], newVolumes[c], c, ) def _getComponentDetailedNDensAndVol(self, componentLst): """Returns a tuple containing dictionaries of detailedNDens and volumes of all components from a component list input. """ detailedNDens = {} volumes = {} for c in componentLst: c.p.detailedNDens = [val for val in c.getNumberDensities().values()] detailedNDens[c] = copy.deepcopy(c.p.detailedNDens) volumes[c] = c.getVolume() return (detailedNDens, volumes) def _checkMass(self, origDetailedNDens, origVolume, newDetailedNDens, newVolume, c): for prevMass, newMass in zip(origDetailedNDens * origVolume, newDetailedNDens * newVolume): if c.parent.hasFlags(Flags.FUEL): self.assertAlmostEqual(prevMass, newMass, delta=1e-12, msg=f"{c}, {c.parent}") else: # should not conserve mass here as it is structural material above active fuel self.assertAlmostEqual(newMass / prevMass, 1.00, msg=f"{c}, {c.parent}") class TestExceptions(AxialExpansionTestBase): """Verify exceptions are caught.""" def setUp(self): super().setUp() self.a = buildTestAssemblyWithFakeMaterial(name="FakeMatException") self.obj.setAssembly(self.a) def test_isTopDummyBlockPresent(self): # build test assembly without dummy assembly = HexAssembly("testAssemblyType") assembly.spatialGrid = grids.AxialGrid.fromNCells(numCells=1) assembly.spatialGrid.armiObject = assembly assembly.add(_buildTestBlock("shield", "FakeMat", 100.0, 10.0)) assembly.calculateZCoords() assembly.reestablishBlockOrder() # create instance of expansion changer obj = AxialExpansionChanger(detailedAxialExpansion=True) with self.assertRaisesRegex( RuntimeError, "Cannot run detailedAxialExpansion without a dummy block at the top of the assembly!", ): obj.setAssembly(assembly) def test_setExpansionFactors(self): cList = self.a.getFirstBlock().getChildren() with self.assertRaisesRegex( RuntimeError, "Number of components and expansion fractions must be the same!", ): self.obj.expansionData.setExpansionFactors(cList, range(len(cList) + 1)) with self.assertRaisesRegex( RuntimeError, "L1/L0, is not physical. Expansion fractions should be greater than 0.0.", ): self.obj.expansionData.setExpansionFactors(cList, zeros(len(cList))) with self.assertRaisesRegex( RuntimeError, "L1/L0, is not physical. Expansion fractions should be greater than 0.0.", ): self.obj.expansionData.setExpansionFactors(cList, zeros(len(cList)) - 10.0) def test_updateCompTempsBy1DTempFieldValError(self): tempGrid = [5.0, 15.0, 35.0] tempField = linspace(100.0, 310.0, 3) with self.assertRaisesRegex(ValueError, "has no temperature points within it!"): self.obj.expansionData.updateComponentTempsBy1DTempField(tempGrid, tempField) def test_updateCompTempsBy1DTempFieldError(self): tempGrid = [5.0, 15.0, 35.0] tempField = linspace(100.0, 310.0, 10) with self.assertRaisesRegex(RuntimeError, "tempGrid and tempField must have the same length."): self.obj.expansionData.updateComponentTempsBy1DTempField(tempGrid, tempField) def test_AssemblyAxialExpansionException(self): """Test that negative height exception is caught.""" # manually set axial exp target component for code coverage self.a[0].p.axialExpTargetComponent = self.a[0][0].name temp = Temperature(self.a.getTotalHeight(), numTempGridPts=11, tempSteps=10) with self.assertRaisesRegex(ArithmeticError, "has a negative height"): for idt in range(temp.tempSteps): self.obj.expansionData.updateComponentTempsBy1DTempField(temp.tempGrid, 2 * temp.tempField[idt, :]) self.obj.expansionData.computeThermalExpansionFactors() self.obj.axiallyExpandAssembly() def test_isFuelLocked(self): """Ensures that the RuntimeError statement in ExpansionData::_isFuelLocked is raised appropriately. Notes ----- This is implemented by creating a fuel block that contains no fuel component and passing it to ExpansionData._isFuelLocked. """ expdata = ExpansionData(HexAssembly("testAssemblyType"), setFuel=True, expandFromTinputToThot=False) bNoFuel = HexBlock("fuel", height=10.0) shieldDims = { "Tinput": 100.0, "Thot": 100.0, "od": 0.76, "id": 0.00, "mult": 127.0, } shield = Circle("shield", "FakeMat", **shieldDims) bNoFuel.add(shield) with self.assertRaisesRegex(RuntimeError, f"No fuel component within {bNoFuel}!"): expdata._isFuelLocked(bNoFuel) class TestDetermineTargetComponent(AxialExpansionTestBase): """Verify determineTargetComponent method is properly updating _componentDeterminesBlockHeight.""" def setUp(self): super().setUp() self.expData = ExpansionData([], setFuel=True, expandFromTinputToThot=True) coolDims = {"Tinput": 100.0, "Thot": 100.0} self.coolant = DerivedShape("coolant", "Sodium", **coolDims) def test_getTargetComponent(self): b = HexBlock("fuel", height=10.0) fuelDims = {"Tinput": 100.0, "Thot": 100.0, "od": 0.76, "id": 0.00, "mult": 127.0} cladDims = {"Tinput": 100.0, "Thot": 100.0, "od": 0.80, "id": 0.77, "mult": 127.0} fuel = Circle("fuel", "FakeMat", **fuelDims) clad = Circle("clad", "FakeMat", **cladDims) b.add(fuel) b.add(clad) b.add(self.coolant) self.expData.setTargetComponent(b, True) self.assertEqual(fuel, self.expData.getTargetComponent(b)) def test_getTargetComponent_NoneFound(self): b = HexBlock("fuel", height=10.0) with self.assertRaisesRegex(RuntimeError, f"No target component found for {b} in"): self.expData.getTargetComponent(b) def test_determineTargetComponent(self): """Provides coverage for searching TARGET_FLAGS_IN_PREFERRED_ORDER.""" b = HexBlock("fuel", height=10.0) fuelDims = {"Tinput": 100.0, "Thot": 100.0, "od": 0.76, "id": 0.00, "mult": 127.0} cladDims = {"Tinput": 100.0, "Thot": 100.0, "od": 0.80, "id": 0.77, "mult": 127.0} fuel = Circle("fuel", "FakeMat", **fuelDims) clad = Circle("clad", "FakeMat", **cladDims) b.add(fuel) b.add(clad) b.add(self.coolant) self._checkTarget(b, fuel) def _checkTarget(self, b: HexBlock, expected: Component): """Call determineTargetMethod and compare what we get with expected.""" # Value unset initially self.assertFalse(b.p.axialExpTargetComponent) target = self.expData.determineTargetComponent(b) self.assertIs(target, expected) self.assertTrue( self.expData.isTargetComponent(target), msg=f"determineTargetComponent failed to recognize intended component: {expected}", ) self.assertEqual( b.p.axialExpTargetComponent, expected.name, msg=f"determineTargetComponent failed to recognize intended component: {expected}", ) def test_determineTargetCompBlockWithMultiFlags(self): """Provides coverage for searching TARGET_FLAGS_IN_PREFERRED_ORDER with multiple flags.""" # build a block that has two flags as well as a component matching each b = HexBlock("fuel poison", height=10.0) fuelDims = {"Tinput": 100.0, "Thot": 100.0, "od": 0.9, "id": 0.5, "mult": 200.0} poisonDims = {"Tinput": 100.0, "Thot": 100.0, "od": 0.5, "id": 0.0, "mult": 10.0} fuel = Circle("fuel", "FakeMat", **fuelDims) poison = Circle("poison", "FakeMat", **poisonDims) b.add(fuel) b.add(poison) b.add(self.coolant) self._checkTarget(b, fuel) def test_specifyTargetComp_NotFound(self): """Ensure RuntimeError gets raised when no target component is found.""" b = HexBlock("fuel", height=10.0) b.add(self.coolant) b.setType("fuel") with self.assertRaisesRegex(RuntimeError, "No target component found!"): self.expData.determineTargetComponent(b) with self.assertRaisesRegex(RuntimeError, "No target component found!"): self.expData.determineTargetComponent(b, Flags.FUEL) def test_specifyTargetComp_singleSolid(self): """Ensures that specifyTargetComponent is smart enough to set the only solid as the target component.""" b = HexBlock("plenum", height=10.0) ductDims = {"Tinput": 100.0, "Thot": 100.0, "op": 17, "ip": 0.0, "mult": 1.0} duct = Hexagon("duct", "FakeMat", **ductDims) b.add(duct) b.add(self.coolant) b.getVolumeFractions() b.setType("plenum") self._checkTarget(b, duct) def test_specifyTargetComp_MultiFound(self): """Ensure RuntimeError is hit when multiple target components are found. Notes ----- This can occur if a block has a mixture of fuel types. E.g., different fuel materials, or different fuel geometries. """ b = HexBlock("fuel", height=10.0) fuelAnnularDims = { "Tinput": 100.0, "Thot": 100.0, "od": 0.9, "id": 0.5, "mult": 100.0, } fuelDims = {"Tinput": 100.0, "Thot": 100.0, "od": 1.0, "id": 0.0, "mult": 10.0} fuel = Circle("fuel", "FakeMat", **fuelDims) fuelAnnular = Circle("fuel annular", "FakeMat", **fuelAnnularDims) b.add(fuel) b.add(fuelAnnular) b.add(self.coolant) b.setType("FuelBlock") with self.assertRaisesRegex( RuntimeError, "Cannot have more than one component within a block that has the target flag!", ): self.expData.determineTargetComponent(b, flagOfInterest=Flags.FUEL) def test_manuallySetTargetComponent(self): """ Ensures that target components can be manually set (is done in practice via blueprints). .. test:: Allow user-specified target axial expansion components on a given block. :id: T_ARMI_MANUAL_TARG_COMP :tests: R_ARMI_MANUAL_TARG_COMP """ b = HexBlock("dummy", height=10.0) ductDims = {"Tinput": 100.0, "Thot": 100.0, "op": 17, "ip": 0.0, "mult": 1.0} duct = Hexagon("duct", "FakeMat", **ductDims) b.add(duct) b.add(self.coolant) b.getVolumeFractions() b.setType("duct") # manually set target component b.setAxialExpTargetComp(duct) self.assertEqual( b.p.axialExpTargetComponent, duct.name, ) # check that target component is stored on expansionData object correctly self.expData._componentDeterminesBlockHeight[b.getComponentByName(b.p.axialExpTargetComponent)] = True self.assertTrue(self.expData.isTargetComponent(duct)) class TestGetSolidComponents(unittest.TestCase): """Verify that getSolidComponents returns just solid components.""" def test_getSolidComponents(self): """Show that getSolidComponents produces a list of solids, and is consistent with iterSolidComponents.""" a = buildTestAssemblyWithFakeMaterial(name="HT9") for b in a: solids = getSolidComponents(b) ids = set(map(id, solids)) for c in iterSolidComponents(b): self.assertNotEqual(c.material.name, "Sodium") self.assertIn(id(c), ids, msg=f"Found non-solid {c}") ids.remove(id(c)) self.assertFalse( ids, msg="Inconsistency between getSolidComponents and iterSolidComponents", ) def test_checkForBlocksWithoutSolids(self): a = buildTestAssemblyWithFakeMaterial(name="Sodium") changer = AxialExpansionChanger() changer.linked = AssemblyAxialLinkage(a) with self.assertRaisesRegex( InputError, expected_regex="is constructed improperly for use with the axial expansion changer", ): changer._checkForBlocksWithoutSolids() class TestInputHeightsConsideredHot(unittest.TestCase): """Verify thermal expansion for process loading of core.""" def setUp(self): """This test uses a different armiRun.yaml than the default.""" o, r = loadTestReactor( os.path.join(TEST_ROOT, "detailedAxialExpansion"), customSettings={"inputHeightsConsideredHot": True}, ) self.stdAssems = list(r.core) oCold, rCold = loadTestReactor( os.path.join(TEST_ROOT, "detailedAxialExpansion"), customSettings={"inputHeightsConsideredHot": False}, ) self.testAssems = list(rCold.core) def test_coldAssemblyExpansion(self): """Block heights are cold and should be expanded. .. test:: Preserve the total height of a compatible ARMI assembly. :id: T_ARMI_ASSEM_HEIGHT_PRES :tests: R_ARMI_ASSEM_HEIGHT_PRES .. test:: Axial expansion can be prescribed in blueprints for core construction. :id: T_ARMI_INP_COLD_HEIGHT :tests: R_ARMI_INP_COLD_HEIGHT Notes ----- For R_ARMI_INP_COLD_HEIGHT, the action of axial expansion occurs in setUp() during core construction, specifically in :py:meth:`constructAssem <armi.reactor.blueprints.Blueprints.constructAssem>` Two assertions here: 1. total assembly height should be preserved (through use of top dummy block) 2. in armi.tests.detailedAxialExpansion.refSmallReactorBase.yaml, Thot > Tinput resulting in a non-zero DeltaT. Each block in the expanded case should therefore be a different height than that of the standard case. """ for aStd, aExp in zip(self.stdAssems, self.testAssems): self.assertAlmostEqual( aStd.getTotalHeight(), aExp.getTotalHeight(), msg="Std Assem {0} ({1}) and Exp Assem {2} ({3}) are not the same height!".format( aStd, aStd.getTotalHeight(), aExp, aExp.getTotalHeight() ), ) for bStd, bExp in zip(aStd, aExp): if any(isinstance(c.material, custom.Custom) for c in bStd): checkColdBlockHeight(bStd, bExp, self.assertAlmostEqual, "the same") else: checkColdBlockHeight(bStd, bExp, self.assertNotEqual, "different") if bStd.hasFlags(Flags.FUEL): self.checkColdHeightBlockMass(bStd, bExp, "U235") elif bStd.hasFlags(Flags.CONTROL): self.checkColdHeightBlockMass(bStd, bExp, "B10") for cExp in iterSolidComponents(bExp): if cExp.zbottom == bExp.p.zbottom and cExp.ztop == bExp.p.ztop: matDens = cExp.material.density(Tc=cExp.temperatureInC) compDens = cExp.density() msg = ( f"{cExp} {cExp.material} in {bExp} in {aExp} was not at correct density. \n" + f"expansion = {bExp.p.height / bStd.p.height} \n" + f"density = {matDens}, component density = {compDens} \n" ) self.assertAlmostEqual( matDens, compDens, places=12, msg=msg, ) def checkColdHeightBlockMass(self, bStd: HexBlock, bExp: HexBlock, nuclide: str): """Checks that nuclide masses for blocks with input cold heights and "inputHeightsConsideredHot": True are underpredicted. Notes ----- If blueprints have cold blocks heights with "inputHeightsConsideredHot": True in the inputs, then the nuclide densities are thermally expanded but the block height is not. This ultimately results in nuclide masses being underpredicted relative to the case where both nuclide densities and block heights are thermally expanded. """ self.assertGreater(bExp.getMass(nuclide), bStd.getMass(nuclide)) def checkColdBlockHeight(bStd: HexBlock, bExp: HexBlock, assertType: Callable, strForAssertion: str): assertType( bStd.getHeight(), bExp.getHeight(), msg="Assembly: {0} -- Std Block {1} ({2}) and Exp Block {3} ({4}) should have {5:s} heights!".format( bStd.parent, bStd, bStd.getHeight(), bExp, bExp.getHeight(), strForAssertion, ), ) def buildTestAssemblyWithFakeMaterial(name: str, hot: bool = False): """Create test assembly consisting of list of fake material. Parameters ---------- name : string determines which fake material to use """ if not hot: hotTemp = 100.0 height = 10.0 else: hotTemp = 200.0 height = 10.0 + 0.02 * (200.0 - 100.0) assembly = HexAssembly("testAssemblyType") assembly.spatialGrid = grids.AxialGrid.fromNCells(numCells=1) assembly.spatialGrid.armiObject = assembly assembly.add(_buildTestBlock("shield", name, hotTemp, height)) assembly.add(_buildTestBlock("fuel", name, hotTemp, height)) assembly.add(_buildTestBlock("fuel", name, hotTemp, height)) assembly.add(_buildTestBlock("plenum", name, hotTemp, height, True)) assembly.add(_buildDummySodium(hotTemp, height)) assembly.calculateZCoords() assembly.reestablishBlockOrder() return assembly def _buildTestBlock(blockType: str, name: str, hotTemp: float, height: float, plenum: bool = False) -> HexBlock: """Return a simple pin type block filled with coolant and surrounded by duct. Parameters ---------- blockType determines which type of block you're building name determines which material to use hotTemp the hot temperature of the block. This is synonomous with Thot in blueprints. height the height of the block plenum boolean to indicate if this is a plenum. if true, the pin is replaced by an air-filled gap. Returns ------- HexBlock for testing. """ b = HexBlock(blockType, height=height) fuelDims = {"Tinput": 100.0, "Thot": hotTemp, "od": 0.76, "id": 0.00, "mult": 127.0} ductDims = {"Tinput": 100.0, "Thot": hotTemp, "op": 16, "ip": 15.3, "mult": 1.0} mainType = Circle(blockType, name, **fuelDims) bond = Circle("bond", "Sodium", Tinput=100.0, Thot=hotTemp, od=0.78, id=0.76, mult=127.0) clad = Circle("clad", name, Tinput=100.0, Thot=hotTemp, od=0.80, id=0.78, mult=127.0) duct = Hexagon("duct", name, **ductDims) coolant = DerivedShape("coolant", "Sodium", Tinput=100.0, Thot=hotTemp) intercoolant = Hexagon( "intercoolant", "Sodium", Tinput=100.0, Thot=hotTemp, op=17.0, ip=ductDims["op"], mult=1.0, ) if plenum: b.add(Circle("gap", "Air", **fuelDims)) else: b.add(mainType) b.add(bond) b.add(clad) b.add(duct) b.add(coolant) b.add(intercoolant) b.setType(blockType) b.getVolumeFractions() b.completeInitialLoading() return b def _buildDummySodium(hotTemp: float, height: float): """Build a dummy sodium block.""" b = HexBlock("dummy", height=height) dummy = Hexagon("dummy coolant", "Sodium", Tinput=100.0, Thot=hotTemp, op=17, ip=0.0, mult=1.0) b.add(dummy) b.getVolumeFractions() b.setType("dummy") return b class FakeMat(materials.ht9.HT9): """Fake material used to verify armi.reactor.converters.axialExpansionChanger. Notes ----- - specifically used in TestAxialExpansionHeight to verify axialExpansionChanger produces expected heights from hand calculation - also used to verify mass and height conservation resulting from even amounts of expansion and contraction. See TestConservation. """ name = "FakeMat" def linearExpansionPercent(self, Tk=None, Tc=None): """A fake linear expansion percent.""" Tc = units.getTc(Tc, Tk) return 0.02 * Tc class FakeMatException(materials.ht9.HT9): """Fake material used to verify TestExceptions. Notes ----- - the only difference between this and `class Fake(HT9)` above is that the thermal expansion factor is higher to ensure that a negative block height is caught in TestExceptions:test_AssemblyAxialExpansionException. """ name = "FakeMatException" def linearExpansionPercent(self, Tk=None, Tc=None): """A fake linear expansion percent.""" Tc = units.getTc(Tc, Tk) return 0.08 * Tc ================================================ FILE: armi/reactor/converters/tests/test_axialExpansionChanger_MultiPin.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import copy import io from dataclasses import dataclass from typing import TYPE_CHECKING, Optional from unittest.mock import MagicMock from numpy import array, array_equal, full from armi.materials.material import Fluid from armi.reactor.blueprints import Blueprints from armi.reactor.components.component import Component from armi.reactor.converters.axialExpansionChanger.axialExpansionChanger import AxialExpansionChanger from armi.reactor.converters.axialExpansionChanger.expansionData import iterSolidComponents from armi.reactor.converters.axialExpansionChanger.redistributeMass import RedistributeMass from armi.reactor.converters.tests.test_axialExpansionChanger import AxialExpansionTestBase from armi.reactor.flags import Flags, TypeSpec from armi.settings.caseSettings import Settings from armi.testing.singleMixedAssembly import BLOCK_DEFINITIONS_2PIN, GRID_DEFINITION, buildMixedPinAssembly if TYPE_CHECKING: from armi.reactor.assemblies import HexAssembly from armi.reactor.blocks import HexBlock FINE_ASSEMBLY_DEF = """ assemblies: multi pin fuel: specifier: LA blocks: [ *block_grid_plate, *block_fuel_multiPin_axial_shield, *block_fuel_multiPin, *block_fuel_multiPin, *block_fuel_multiPin, *block_fuel_multiPin, *block_fuel_multiPin, *block_fuel_multiPin, *block_fuel_multiPin, *block_fuel_multiPin, *block_mixed_multiPin, *block_mixed_multiPin, *block_aclp_multiPin, *block_plenum_multiPin, *block_duct, *block_dummy ] height: [ 1.0, 1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ] axial mesh points: [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ] xs types: [ A, A, B, B, B, B, B, B, B, B, C, C, D, D, A, A ] """ # noqa: E501 @dataclass class StoreMassAndTemp: cType: str mass: float HMmass: float HMmassBOL: float HMmolesBOL: float temp: float class TestMultiPinConservationBase(AxialExpansionTestBase): @classmethod def setUpClass(cls): super().setUpClass() cls.aRef = buildMixedPinAssembly() cls.places = 12 def setUp(self): self.a = copy.deepcopy(self.aRef) self.axialExpChngr = AxialExpansionChanger() self.axialExpChngr.setAssembly(self.a) self.initConservationValues() def initConservationValues(self): # get original masses for conservation checks self.origTotalCMassByFlag = self.getTotalCompMassByFlag(self.a) self.initialTotalHMMolesBOL = self.initialTotalHMMassBOL = 0.0 for _, b in self._iterFuelBlocks(): for c in b.iterChildrenWithFlags(Flags.FUEL): self.initialTotalHMMolesBOL += c.p.molesHmBOL self.initialTotalHMMassBOL += c.p.massHmBOL def getTotalCompMassByFlag(self, a: "HexAssembly") -> dict[TypeSpec, float]: """Get the total mass of all components in the assembly, except Bond components. Notes ----- The axial expansion changer does not consider the expansion or contraction of fluids and therefore their conservation is not guarunteed. The conservation of fluid mass is expected only if each component type on a block has 1) uniform expansion rates and 2) axially isothermal fluid temperatures. For multipin assemblies, the former is generally not met for Bond components; however since there is only one coolant and intercoolant component in general, the conservation of mass for these components expected if axially isothermal fluid temperatures are present. """ totalCMassByFlags: dict[Flags, float] = collections.defaultdict(float) for b in a: for c in iterSolidComponents(b): totalCMassByFlags[c.p.flags] += c.getMass() for c in filter(self._isFluidButNotBond, b): totalCMassByFlags[c.p.flags] += c.getMass() return totalCMassByFlags @staticmethod def _isFluidButNotBond(c): """Determine if a component is a fluid, but not Bond.""" return isinstance(c, Component) and isinstance(c.material, Fluid) and not c.hasFlags(Flags.BOND) def _iterTestFuelCompsOnBlock(self, b: "HexBlock"): """Iterate over components in b that exactly contain Flags.FUEL, Flags.TEST, and Flags.DEPLETABLE.""" yield from b.iterChildrenWithFlags(Flags.FUEL | Flags.TEST | Flags.DEPLETABLE, exactMatch=True) def _iterFuelBlocks(self): """Iterate over blocks in self.a that have Flags.FUEL. Enumerator index starts at 1 to support scaling block-wise values. """ yield from enumerate(filter(lambda b: b.hasFlags(Flags.FUEL), self.a), start=1) def checkConservation(self): """Conservation of axial expansion is measured by ensuring the following is the same post expansion: 1) total assembly mass per component flag, 2) total assembly height, and 3) total moles heavy metal at BOL. """ newTotalCMassByFlag = self.getTotalCompMassByFlag(self.a) for origMass, (cFlag, newMass) in zip(self.origTotalCMassByFlag.values(), newTotalCMassByFlag.items()): self.assertAlmostEqual(origMass, newMass, places=self.places, msg=f"{cFlag} are not the same!") self.assertAlmostEqual(self.aRef.getTotalHeight(), self.a.getTotalHeight(), places=self.places) totalHMMolesBOL = totalHMMassBOL = 0 for _, b in self._iterFuelBlocks(): for c in b.iterChildrenWithFlags(Flags.FUEL): totalHMMolesBOL += c.p.molesHmBOL totalHMMassBOL += c.p.massHmBOL self.assertAlmostEqual(totalHMMolesBOL, self.initialTotalHMMolesBOL, places=self.places) self.assertAlmostEqual(totalHMMassBOL, self.initialTotalHMMassBOL, places=self.places) class TestRedistributeMass(TestMultiPinConservationBase): b0: "HexBlock" b1: "HexBlock" c0: Component origC0Temp: float c1: Component origC1Temp: float def setUp(self): super().setUp() self.b0 = self.a.getFirstBlock(Flags.FUEL) self.b1 = self.axialExpChngr.linked.linkedBlocks[self.b0].upper self.c0 = next(filter(lambda c: c.getType() == "fuel test", self.b0)) self.c1 = self.axialExpChngr.linked.linkedComponents[self.c0].upper def test_getAllNucs(self): nucsA = ["Zr90", "Zr91", "Zr92", "U235", "U238"] nucsB = ["Zr90", "Zr91", "Zr92", "U233", "U238", "I131", "XE131", "NP237", "AM242", "AM242M"] nucsC = RedistributeMass(MagicMock(), MagicMock(), MagicMock(), MagicMock(), initOnly=True)._getAllNucs( nucsA, nucsB ) # ensure nucsA and nucsB haven't changed self.assertTrue( array_equal( array(nucsA), array(["Zr90", "Zr91", "Zr92", "U235", "U238"]), ) ) self.assertTrue( array_equal( array(nucsB), array(["Zr90", "Zr91", "Zr92", "U233", "U238", "I131", "XE131", "NP237", "AM242", "AM242M"]), ) ) # ensure nucsC is correct self.assertTrue( array_equal( array(nucsC), array(["Zr90", "Zr91", "Zr92", "I131", "XE131", "U233", "U235", "NP237", "U238", "AM242", "AM242M"]), ) ) def test_adjustMassParams(self): self._initializeTest(1.05, fromComp=self.c0) # component-level params initialFromMassBOL = self.c0.p.massHmBOL initialFromMolesBOL = self.c0.p.molesHmBOL initialToMassBOL = self.c1.p.massHmBOL initialToMolesBOL = self.c1.p.molesHmBOL dist = RedistributeMass( fromComp=self.c0, toComp=self.c1, assemName=repr(self.a), deltaZTop=self.deltaZTop, initOnly=True ) dist._adjustMassParams() self.assertLess(self.c0.p.massHmBOL, initialFromMassBOL) self.assertLess(self.c0.p.molesHmBOL, initialFromMolesBOL) self.assertGreater(self.c1.p.massHmBOL, initialToMassBOL) self.assertGreater(self.c1.p.molesHmBOL, initialToMolesBOL) self.assertAlmostEqual(self.c0.p.massHmBOL + self.c1.p.massHmBOL, initialFromMassBOL + initialToMassBOL) self.assertAlmostEqual(self.c0.p.molesHmBOL + self.c1.p.molesHmBOL, initialFromMolesBOL + initialToMolesBOL) # block-level params initialFromMassBOL = self.b0.p.massHmBOL initialFromMolesBOL = self.b0.p.molesHmBOL initialToMassBOL = self.b1.p.massHmBOL initialToMolesBOL = self.b1.p.molesHmBOL self.axialExpChngr._recomputeBlockMassParams(self.b0) self.axialExpChngr._recomputeBlockMassParams(self.b1) self.assertLess(self.b0.p.massHmBOL, initialFromMassBOL) self.assertLess(self.b0.p.molesHmBOL, initialFromMolesBOL) self.assertGreater(self.b1.p.massHmBOL, initialToMassBOL) self.assertGreater(self.b1.p.molesHmBOL, initialToMolesBOL) self.assertAlmostEqual(self.b0.p.massHmBOL + self.b1.p.massHmBOL, initialFromMassBOL + initialToMassBOL) self.assertAlmostEqual(self.b0.p.molesHmBOL + self.b1.p.molesHmBOL, initialFromMolesBOL + initialToMolesBOL) def test_shiftLinkedCompsForDelta(self): """Ensure that given a deltaZTop, component elevations are adjusted appropriately.""" self._initializeTest(growFrac=1.0, fromComp=self.c0) # setting fromComp is meaningless here # set what they should be after adjusting delta = 0.1 refC0Height = self.c0.height + delta refC0Ztop = self.c0.ztop + delta refC1Height = self.c1.height - delta refC1Zbottom = self.c1.zbottom + delta self.axialExpChngr._shiftLinkedCompsForDelta(self.c0, self.c1, delta) self.assertAlmostEqual(refC0Height, self.c0.height, places=self.places) self.assertAlmostEqual(refC1Height, self.c1.height, places=self.places) self.assertAlmostEqual(refC0Ztop, self.c0.ztop, places=self.places) self.assertAlmostEqual(refC1Zbottom, self.c1.zbottom, places=self.places) def test_redistributeMassNonTargetExpNoTherm(self): """With no temperature changes anywere, grow c0 by 10% and show that 10% of the c0 mass is moved to c1. Notes ----- - C0 grows resulting in c0 giving 10% of its mass to c1. c1 height does not change so its mass gains 10%. - Additional assertions on temperature exist to ensure that the component temperatures are managed correctly during the transfer of mass. For this test, since this is not thermal expansion, we show that the component temperatures do not change. """ growFrac = 1.10 self._initializeTest(growFrac, fromComp=self.c0) self._redistributeMassWithTempAssert(fromComp=self.c0, toComp=self.c1, thermalExp=False) def test_addMassToCompNonTargetCompNoTherm(self): """With no temperature changes anywere, shrink c0 by 10% and show that 10% of the c1 mass is moved to c0. Notes ----- - C0 shrinks resulting in c1 giving 10% of its mass to c0. c1 height does not change so it's mass loses 10%. - Additional assertions on temperature exist to ensure that the component temperatures are managed correctly during the transfer of mass. For this test, since this is not thermal expansion, we show that the component temperatures do not change. """ growFrac = 0.9 self._initializeTest(growFrac, fromComp=self.c1) self._redistributeMassWithTempAssert(fromComp=self.c1, toComp=self.c0, thermalExp=False) def test_addMassToCompNonTargetComprYesTherm(self): """Decrease c0 by 100 deg C and and show that c1 mass is moved to c0. Notes ----- - C0 shrinks resulting in c1 giving X% of its mass to c0. c1 height does not change so its mass loses X%. - Additional assertions on temperature exist to ensure that the component temperatures are managed correctly during the transfer of mass. For this test, we show that the temperature of c0 increases and the temperature of c1 does not change. The increase in temperature for c0 is due to the contribution from the hotter c1 component. """ newTemp = self.c0.temperatureInC - 100.0 # updateComponentTemp updates ndens for update in AREA only self.axialExpChngr.expansionData.updateComponentTemp(self.c0, newTemp) self.axialExpChngr.expansionData.computeThermalExpansionFactors() growFrac = self.axialExpChngr.expansionData.getExpansionFactor(self.c0) self._initializeTest(growFrac, fromComp=self.c1) self._redistributeMassWithTempAssert(fromComp=self.c1, toComp=self.c0, thermalExp=True) def test_addMassToCompNonTargetExpanYesTherm(self): """Increase c0 by 100 deg C and and show that c0 mass is moved to c1. Notes ----- - C0 expands resulting in c0 giving X% of its mass to c1. c0 height does not change so its mass loses X%. - Additional assertions on temperature exist to ensure that the component temperatures are managed correctly during the transfer of mass. For this test, we show that the temperature of c1 increases and the temperature of c0 does not change. The increase in temperature is due to the contribution from the hotter c0 component. """ newTemp = self.c0.temperatureInC + 100.0 # updateComponentTemp updates ndens for update in AREA only self.axialExpChngr.expansionData.updateComponentTemp(self.c0, newTemp) self.axialExpChngr.expansionData.computeThermalExpansionFactors() growFrac = self.axialExpChngr.expansionData.getExpansionFactor(self.c0) self._initializeTest(growFrac, fromComp=self.c0) self._redistributeMassWithTempAssert(fromComp=self.c0, toComp=self.c1, thermalExp=True) def _updateToCompElevations(self, toComp: Component): """Shift ``toComp`` based on expansion or contraction of ``fromComp``, as indicated by ``self.deltaZTop``. Notes ----- If deltaZTop is negative, this indicates that ``fromComp`` has expanded and ``toComp`` needs to be shifted upwards. If deltaZtop is positive, this indicates that ``fromComp`` has contracted and ``toComp`` need to be shifted downwards. """ if self.deltaZTop < 0.0: toComp.zbottom -= self.deltaZTop toComp.height -= self.deltaZTop toComp.ztop = toComp.zbottom + toComp.height else: toComp.ztop += self.deltaZTop toComp.height += self.deltaZTop # adjust b1 elevations based on c1 toComp.parent.ztop = toComp.ztop toComp.parent.zbottom = toComp.zbottom toComp.parent.p.height = toComp.height toComp.parent.clearCache() def _updateFromCompElevations(self, fromComp: Component): if self.deltaZTop < 0.0: # adjust b1 elevations based on c1 fromComp.ztop += self.deltaZTop fromComp.height += self.deltaZTop else: fromComp.zbottom += self.deltaZTop fromComp.height -= self.deltaZTop # adjust b0 elevations based on c0 fromComp.parent.ztop = fromComp.ztop fromComp.parent.zbottom = fromComp.zbottom fromComp.parent.p.height = fromComp.parent.ztop - fromComp.parent.zbottom # clear the cache to update volume calculations fromComp.parent.clearCache() def _initializeTest(self, growFrac: float, fromComp: Component): """Initialize the tests. Notes ----- 1) Store reference mass and temperature information. 1) Set elevations of components and blocks post-expansion. 3) Store the amount of mass expeceted to be redistributed between components. """ # set the original mass and temperature of the components post expansion and pre redistribution self.originalC0 = StoreMassAndTemp( self.c0.parent.name, self.c0.getMass(), self.c0.getHMMass(), self.c0.p.massHmBOL, self.c0.p.molesHmBOL, self.c0.temperatureInC, ) self.originalC1 = StoreMassAndTemp( self.c1.parent.name, self.c1.getMass(), self.c1.getHMMass(), self.c1.p.massHmBOL, self.c1.p.molesHmBOL, self.c1.temperatureInC, ) # adjust c0 elevations per growFrac self.c0.zbottom = self.b0.p.zbottom self.c0.height = self.b0.getHeight() * growFrac self.c0.ztop = self.c0.zbottom + self.c0.height # update the ndens of c0 for the change in height self.c0.changeNDensByFactor(1.0 / growFrac) # calculate deltaZTop to inform how much mass will be redistributed self.deltaZTop = self.b0.p.ztop - self.c0.ztop # initialize component elevations for self.b1 for c in self.b1: c.zbottom = self.b1.p.zbottom c.height = self.b1.getHeight() c.ztop = c.zbottom + c.height self.b1.clearCache() if fromComp is self.c0: fromHeight = self.c0.height self.redistributedMass = self.originalC0.mass * abs(self.deltaZTop) / fromHeight self.redistributedBOLMass = self.originalC0.HMmassBOL * abs(self.deltaZTop) / fromHeight self.redistributedBOLMoles = self.originalC0.HMmolesBOL * abs(self.deltaZTop) / fromHeight else: fromHeight = self.c1.height self.redistributedMass = self.originalC1.mass * abs(self.deltaZTop) / fromHeight self.redistributedBOLMass = self.originalC1.HMmassBOL * abs(self.deltaZTop) / fromHeight self.redistributedBOLMoles = self.originalC1.HMmolesBOL * abs(self.deltaZTop) / fromHeight def _getReferenceData(self, fromComp: Component, toComp: Optional[Component]): """Pull the reference data needed for ``fromComp`` and ``toComp``.""" fromCompRefData = self.originalC0 if fromComp.parent.name == self.originalC0.cType else self.originalC1 if toComp is None: toCompRefData = None else: toCompRefData = self.originalC0 if toComp.parent.name == self.originalC0.cType else self.originalC1 return fromCompRefData, toCompRefData def _redistributeMassWithTempAssert(self, fromComp: Component, toComp: Component, thermalExp: bool): """Perform the mass redistribution from ``fromComp`` to ``toComp``. Notes ----- Two assertions are done: 1) the correct amount of mass is moved to ``toComp``. 2) the resulting temperatures for ``fromComp`` and ``toComp`` are correct. """ # move mass from ``fromComp`` to ``toComp`` RedistributeMass(fromComp=fromComp, toComp=toComp, assemName=repr(self.a), deltaZTop=self.deltaZTop) fromCompRefData, toCompRefData = self._getReferenceData(fromComp, toComp) self._updateToCompElevations(toComp=toComp) self._updateFromCompElevations(fromComp=fromComp) # ensure the toComp mass increases by amountBeingRedistributed self.assertAlmostEqual( toComp.getMass(), toCompRefData.mass + self.redistributedMass, places=self.places, ) HMfrac = toCompRefData.HMmass / toCompRefData.mass self.assertAlmostEqual( toComp.getHMMass(), toCompRefData.HMmass + self.redistributedMass * HMfrac, places=self.places, ) self.assertAlmostEqual( toComp.p.massHmBOL, toCompRefData.HMmassBOL + self.redistributedBOLMass, places=self.places, ) self.assertAlmostEqual( toComp.p.molesHmBOL, toCompRefData.HMmolesBOL + self.redistributedBOLMoles, places=self.places, ) # fromComp temperature should not change because we've only removed mass self.assertEqual(fromComp.temperatureInC, fromCompRefData.temp) # we expect the new temperature to be greater because we added mass from a # material with a higher temperature if thermalExp: self.assertGreater(toComp.temperatureInC, toCompRefData.temp) else: self.assertEqual(toComp.temperatureInC, toCompRefData.temp) # ensure the fromComp mass decreases by redisributedMass self.assertAlmostEqual(fromComp.getMass(), fromCompRefData.mass - self.redistributedMass, places=self.places) HMfrac = fromCompRefData.HMmass / fromCompRefData.mass self.assertAlmostEqual( fromComp.getHMMass(), fromCompRefData.HMmass - self.redistributedMass * HMfrac, places=self.places, ) self.assertAlmostEqual( fromComp.p.massHmBOL, fromCompRefData.HMmassBOL - self.redistributedBOLMass, places=self.places, ) self.assertAlmostEqual( fromComp.p.molesHmBOL, fromCompRefData.HMmolesBOL - self.redistributedBOLMoles, places=self.places, ) class TestMultiPinConservation(TestMultiPinConservationBase): def setUp(self): super().setUp() def test_expandThermalBothFuel(self): """Perform thermal expansion on both fuel and test fuel components. Notes ----- - Each block is scaled by an increasing temperature to simulate a variable axial temperature distribution. - The test fuel and fuel components are scaled by different temperatures to simulate each pin design existing at different temperatures. - The 150 deg C and 50 deg C based temperature changes are arbitrarily chosen. """ for i, b in self._iterFuelBlocks(): for c in b.iterChildrenWithFlags(Flags.FUEL): if c.hasFlags(Flags.TEST): newTemp = c.temperatureInC + 150.0 * i else: newTemp = c.temperatureInC + 50.0 * i self.axialExpChngr.expansionData.updateComponentTemp(c, newTemp) self.axialExpChngr.expansionData.computeThermalExpansionFactors() self.axialExpChngr.axiallyExpandAssembly() self.checkConservation() def test_roundTripThermalBothFuel(self): """Perform thermal expansion on both fuel and test fuel components and ensure that mass and total assembly height is recovered. Notes ----- - Each block is scaled by an increasing temperature to simulate a variable axial temperature distribution. - The test fuel and fuel components are scaled by different temperatures to simulate each pin design existing at different temperatures. - The 75 deg C and 50 deg C based temperature changes are arbitrarily chosen. """ tempAdjust = [50, -50] for temp in tempAdjust: for i, b in self._iterFuelBlocks(): for c in b.iterChildrenWithFlags(Flags.FUEL): if c.hasFlags(Flags.TEST): testTemp = temp + 25 if temp > 0 else temp - 25 newTemp = c.temperatureInC + testTemp * i else: newTemp = c.temperatureInC + temp * i self.axialExpChngr.expansionData.updateComponentTemp(c, newTemp) self.axialExpChngr.expansionData.computeThermalExpansionFactors() self.axialExpChngr.axiallyExpandAssembly() self.checkConservation() def test_expandThermal(self): """Perform thermal expansion on the test fuel component. Notes ----- - Each block is scaled by an increasing temperature to simulate a variable axial temperature distribution. - The 100 deg C based temperature changes is arbitrarily chosen. - An extra assertion in done in this test to ensure that isotopes uniquely found in each test are not dropped when moving mass between blocks. See the tables below for additional information on what is expected. ========== ================== Component Isotopes Present ========== ================== 0 XE131 1 I131 2 NP237 3 CM242 ========== ================== then after the axial expansion routine, we show that the following exists, ========== ================== Component Isotopes Present ========== ================== 0 XE131 1 I131, XE131 2 NP237, I131 3 CM242, NP237 ========== ================== """ nucs = ["XE131", "I131", "NP237", "CM242"] for i, c in enumerate(self.a.iterComponents([Flags.FUEL, Flags.TEST, Flags.DEPLETABLE], exact=True)): self.assertEqual(c.getNumberDensity(nucs[i]), 0.0) c.setNumberDensity(nucs[i], 1e-3) # recalcualte the initial mass with the new isotope additions self.origTotalCMassByFlag = self.getTotalCompMassByFlag(self.a) for i, b in self._iterFuelBlocks(): for c in self._iterTestFuelCompsOnBlock(b): newTemp = c.temperatureInC + 100.0 * i self.axialExpChngr.expansionData.updateComponentTemp(c, newTemp) self.axialExpChngr.expansionData.computeThermalExpansionFactors() self.axialExpChngr.axiallyExpandAssembly() self.checkConservation() expectedNucsPresent = [["XE131"], ["XE131", "I131"], ["I131", "NP237"], ["NP237", "CM242"]] for i, c in enumerate(self.a.iterComponents([Flags.FUEL, Flags.TEST, Flags.DEPLETABLE], exact=True)): for nuc in expectedNucsPresent[i]: self.assertNotEqual(c.getNumberDensity(nuc), 0.0, msg=f"{nuc} not present in {c}!") def test_contractThermal(self): """Perform thermal contraction on the test fuel component. Notes ----- - Each block is scaled by a decreasing temperature to simulate a variable axial temperature distribution. - The -100 deg C based temperature changes is arbitrarily chosen. """ for i, b in self._iterFuelBlocks(): for c in self._iterTestFuelCompsOnBlock(b): newTemp = c.temperatureInC - 100.0 * i self.axialExpChngr.expansionData.updateComponentTemp(c, newTemp) self.axialExpChngr.expansionData.computeThermalExpansionFactors() self.axialExpChngr.axiallyExpandAssembly() self.checkConservation() def test_expandPrescribed(self): """Perform prescribed expansion on the test fuel component. Notes ----- - The factor of 1.2 for component expansion is arbitrarily chosen. Note, if too large of a value is chosen, the upper block heights will go negative and the axial expansion changer will hit a RuntimeError. """ cList = [] for _i, b in self._iterFuelBlocks(): for c in self._iterTestFuelCompsOnBlock(b): cList.append(c) pList = full(len(cList), 1.2) self.axialExpChngr.expansionData.setExpansionFactors(cList, pList) self.axialExpChngr.axiallyExpandAssembly() self.checkConservation() def test_contractPrescribed(self): """Perform prescribed contraction on the test fuel component. Notes ----- - The factor of 0.9 for component contraction is arbitrarily chosen. """ cList = [] for _i, b in self._iterFuelBlocks(): for c in self._iterTestFuelCompsOnBlock(b): cList.append(c) pList = full(len(cList), 0.9) self.axialExpChngr.expansionData.setExpansionFactors(cList, pList) self.axialExpChngr.axiallyExpandAssembly() self.checkConservation() def test_expandAndContractPrescribed(self): """Perform prescribed expansion and contraction on the test fuel component. Notes ----- - Each block is scaled by a different value to simulate a variable axial expansion profile (e.g., burnup driven axial expansion commonly found in sodium fast reactors). - The factor of +/- 0.01 for component expansion/contraction is arbitrarily chosen. Note, if too large of a value is chosen, the upper block heights will go negative and the axial expansion changer will hit a RuntimeError. """ cList = [] pList = [] for i, b in self._iterFuelBlocks(): for c in b.iterChildrenWithFlags(Flags.FUEL): if c.hasFlags(Flags.TEST): pList.append(1.0 + 0.01 * i) else: pList.append(1.0 - 0.01 * i) cList.append(c) self.axialExpChngr.expansionData.setExpansionFactors(cList, pList) self.axialExpChngr.axiallyExpandAssembly() self.checkConservation() class TestExceptionForMultiPin(TestMultiPinConservationBase): def setUp(self): cs = Settings() with io.StringIO(BLOCK_DEFINITIONS_2PIN + FINE_ASSEMBLY_DEF + GRID_DEFINITION) as stream: blueprints = Blueprints.load(stream) blueprints._prepConstruction(cs) self.a = list(blueprints.assemblies.values())[0] self.axialExpChngr = AxialExpansionChanger() self.axialExpChngr.setAssembly(self.a) def test_failExpansionNegativeCompHeight(self): """Show that the negative component height check can be caught.""" cList = [] for _i, b in self._iterFuelBlocks(): for c in b.iterChildrenWithFlags(Flags.FUEL | Flags.DEPLETABLE, exactMatch=True): cList.append(c) pList = full(len(cList), 1.3) self.axialExpChngr.expansionData.setExpansionFactors(cList, pList) with self.assertRaisesRegex(ArithmeticError, expected_regex="has a negative height"): self.axialExpChngr.axiallyExpandAssembly() ================================================ FILE: armi/reactor/converters/tests/test_blockConverter.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test block conversions.""" import math import os import unittest import numpy as np from armi.physics.neutronics.isotopicDepletion.isotopicDepletionInterface import ( isDepletable, ) from armi.reactor import blocks, components, grids from armi.reactor.converters import blockConverters from armi.reactor.flags import Flags from armi.reactor.tests.test_blocks import buildLinkedFuelBlock, loadTestBlock from armi.testing import TEST_ROOT, loadTestReactor from armi.testing.singleMixedAssembly import buildMixedThreePinAssembly from armi.utils import hexagon from armi.utils.directoryChangers import TemporaryDirectoryChanger def buildSimpleFuelBlockNegativeArea(): """ Return a simple block containing fuel, clad, duct, and coolant. The block has a negative-area gap between fuel and cladding for testing. """ b = blocks.HexBlock("fuel", height=10.0) fuelDims = {"Tinput": 25, "Thot": 600, "od": 0.76, "id": 0.00, "mult": 127.0} cladDims = {"Tinput": 25, "Thot": 600, "od": 0.80, "id": 0.76, "mult": 127.0} ductDims = {"Tinput": 25, "Thot": 600, "op": 16, "ip": 15.3, "mult": 1.0} intercoolantDims = { "Tinput": 400, "Thot": 400, "op": 17.0, "ip": ductDims["op"], "mult": 1.0, } coolDims = {"Tinput": 25.0, "Thot": 400} fuel = components.Circle("fuel", "UZr", **fuelDims) clad = components.Circle("clad", "HT9", **cladDims) gapDims = { "Tinput": 25, "Thot": 600, "od": "clad.id", "id": "fuel.od", "mult": 127.0, } gapDims["components"] = {"fuel": fuel, "clad": clad} gap = components.Circle("gap", "Void", **gapDims) duct = components.Hexagon("duct", "HT9", **ductDims) coolant = components.DerivedShape("coolant", "Sodium", **coolDims) intercoolant = components.Hexagon("intercoolant", "Sodium", **intercoolantDims) b.add(fuel) b.add(gap) b.add(clad) b.add(duct) b.add(coolant) b.add(intercoolant) b.getVolumeFractions() return b def buildSimpleFuelBlockNegativeAreaBond(): """ Return a simple block containing fuel, clad, duct, and coolant. The block has a negative-area bond between fuel and cladding for testing. """ b = blocks.HexBlock("fuel", height=10.0) fuelDims = {"Tinput": 25, "Thot": 600, "od": 0.76, "id": 0.00, "mult": 127.0} cladDims = {"Tinput": 25, "Thot": 600, "od": 0.80, "id": 0.76, "mult": 127.0} ductDims = {"Tinput": 25, "Thot": 600, "op": 16, "ip": 15.3, "mult": 1.0} intercoolantDims = { "Tinput": 400, "Thot": 400, "op": 17.0, "ip": ductDims["op"], "mult": 1.0, } coolDims = {"Tinput": 25.0, "Thot": 400} fuel = components.Circle("fuel", "UZr", **fuelDims) clad = components.Circle("clad", "HT9", **cladDims) bondDims = { "Tinput": 25, "Thot": 600, "od": "clad.id", "id": "fuel.od", "mult": 127.0, } bondDims["components"] = {"fuel": fuel, "clad": clad} bond = components.Circle("bond", "Sodium", **bondDims) duct = components.Hexagon("duct", "HT9", **ductDims) coolant = components.DerivedShape("coolant", "Sodium", **coolDims) intercoolant = components.Hexagon("intercoolant", "Sodium", **intercoolantDims) b.add(fuel) b.add(bond) b.add(clad) b.add(duct) b.add(coolant) b.add(intercoolant) b.getVolumeFractions() return b class TestBlockConverter(unittest.TestCase): def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() def tearDown(self): self.td.__exit__(None, None, None) def test_dissolveWireIntoCoolant(self): """ Test dissolving wire into coolant. .. test:: Homogenize one component into another. :id: T_ARMI_BLOCKCONV0 :tests: R_ARMI_BLOCKCONV """ self._test_dissolve(loadTestBlock(), "wire", "coolant") hotBlock = loadTestBlock(cold=False) self._test_dissolve(hotBlock, "wire", "coolant") hotBlock = self._perturbTemps(hotBlock, "wire", 127, 700) self._test_dissolve(hotBlock, "wire", "coolant") def test_dissolveLinerIntoClad(self): """ Test dissolving liner into clad. .. test:: Homogenize one component into another. :id: T_ARMI_BLOCKCONV1 :tests: R_ARMI_BLOCKCONV """ self._test_dissolve(loadTestBlock(), "outer liner", "clad") hotBlock = loadTestBlock(cold=False) self._test_dissolve(hotBlock, "outer liner", "clad") hotBlock = self._perturbTemps(hotBlock, "outer liner", 127, 700) self._test_dissolve(hotBlock, "outer liner", "clad") def test_dissolveBondIntoClad(self): """ Test dissolving linked bond into coolant. .. test:: Homogenize a linked component into another. :id: T_ARMI_BLOCKCONV2 :tests: R_ARMI_BLOCKCONV """ self._test_dissolve(buildLinkedFuelBlock(), "bond", "clad") def _perturbTemps(self, block, cName, tCold, tHot): """Give the component different ref and hot temperatures than in test_Blocks.""" c = block.getComponent(Flags.fromString(cName)) c.refTemp, c.refHot = tCold, tHot c.setTemperature(tHot) return block def _test_dissolve(self, block, soluteName, solventName): converter = blockConverters.ComponentMerger(block, soluteName, solventName) convertedBlock = converter.convert() self.assertNotIn(soluteName, convertedBlock.getComponentNames()) self._checkAreaAndComposition(block, convertedBlock) def test_dissolveMultiple(self): """Test dissolving multiple components into another.""" self._test_dissolve_multi(loadTestBlock(), ["wire", "clad"], "coolant") self._test_dissolve_multi(loadTestBlock(), ["inner liner", "outer liner"], "clad") def test_dissolveMixedAssembly(self): """Test dissolving multiple components into another in a mixed assembly.""" mixedAssem = buildMixedThreePinAssembly() b = mixedAssem.getBlocks(Flags.FUEL)[1] annularPin = b.getComponents([Flags.ANNULAR, Flags.LINER, Flags.GAP]) testPin = [] hostPin = [] for c in b: if c in annularPin: continue if c.hasFlags([Flags.COOLANT, Flags.INTERCOOLANT, Flags.DUCT]): continue if c.hasFlags(Flags.TEST): testPin.append(c) hostPin.append(c) convertedBlock1 = self._test_dissolve_mixedAssembly(b, ["wire", "clad"], "coolant", hostPin) convertedBlock2 = self._test_dissolve_mixedAssembly(convertedBlock1, ["clad test"], "coolant", testPin) convertedBlock3 = self._test_dissolve_mixedAssembly( convertedBlock2, ["annular void"], "annular fuel test", testPin ) convertedBlock4 = self._test_dissolve_mixedAssembly( convertedBlock3, ["gap2", "liner", "gap1"], "annular clad test", testPin ) self._checkAreaAndComposition(b, convertedBlock4) def test_dissolveZeroArea(self): """Test dissolving a zero-area component into another.""" self._test_dissolve(loadTestBlock(), "gap2", "outer liner") def test_dissolveIntoZeroArea(self): """Test dissolving a component into a zero-area solvent (raises ValueError).""" with self.assertRaises(ValueError): self._test_dissolve(loadTestBlock(), "outer liner", "gap2") def test_dissolveNegativeArea(self): """Test dissolving a zero-area gap component into another.""" self._test_dissolve(buildSimpleFuelBlockNegativeArea(), "gap", "clad") def test_dissolveNegativeAreaBond(self): """Test dissolving a zero-area non-gap component into another.""" with self.assertRaises(ValueError): self._test_dissolve(buildSimpleFuelBlockNegativeAreaBond(), "bond", "clad") def test_dissolveIntoNegativeArea(self): """Test dissolving a zero-area component into another.""" with self.assertRaises(ValueError): self._test_dissolve(buildSimpleFuelBlockNegativeArea(), "clad", "gap") def _test_dissolve_multi(self, block, soluteNames, solventName): converter = blockConverters.MultipleComponentMerger(block, soluteNames, solventName) convertedBlock = converter.convert() for soluteName in soluteNames: self.assertNotIn(soluteName, convertedBlock.getComponentNames()) self._checkAreaAndComposition(block, convertedBlock) def _test_dissolve_mixedAssembly(self, block, soluteNames, solventName, pin): converter = blockConverters.MixedPinComponentMerger(block, soluteNames, solventName, pin) convertedBlock = converter.convert() for soluteName in soluteNames: self.assertNotIn(soluteName, convertedBlock.getComponentNames()) self._checkAreaAndComposition(block, convertedBlock) return convertedBlock def test_build_NthRing(self): """Test building of one ring.""" RING = 6 block = loadTestBlock(cold=False) block.spatialGrid = grids.HexGrid.fromPitch(1.0) numPinsInRing = 30 converter = blockConverters.HexComponentsToCylConverter(block) fuel, clad = _buildJoyoFuel() pinComponents = [fuel, clad] converter._buildFirstRing(pinComponents) converter.pinPitch = 0.76 converter._buildNthRing(pinComponents, RING) components = converter.convertedBlock self.assertEqual(components[3].name.split()[0], components[-1].name.split()[0]) self.assertAlmostEqual(clad.getNumberDensity("FE56"), components[1].getNumberDensity("FE56")) self.assertAlmostEqual( components[3].getArea() + components[-1].getArea(), clad.getArea() * numPinsInRing / clad.getDimension("mult"), ) def test_buildInsideDuct(self): """Test building inside the duct.""" block = loadTestBlock(cold=False) block.spatialGrid = grids.HexGrid.fromPitch(1.0) converter = blockConverters.HexComponentsToCylConverter(block) converter._buildInsideDuct() insideBlock = converter.convertedBlock ductIP = block.getComponent(Flags.DUCT).getDimension("ip") bondMass = block.getComponent(Flags.BOND).getMass("NA") coolantMass = block.getComponent(Flags.COOLANT).getMass("NA") self.assertAlmostEqual(insideBlock.getMass("U235"), block.getMass("U235")) self.assertAlmostEqual(insideBlock.getMass("NA"), bondMass + coolantMass) self.assertAlmostEqual(insideBlock.getArea(), ductIP**2 * math.sqrt(3) / 2) def test_convert(self): """Test conversion with no fuel driver. .. test:: Convert hex blocks to cylindrical blocks. :id: T_ARMI_BLOCKCONV_HEX_TO_CYL1 :tests: R_ARMI_BLOCKCONV_HEX_TO_CYL """ block = loadTestReactor(TEST_ROOT)[1].core.getAssemblies(Flags.FUEL)[2].getFirstBlock(Flags.FUEL) block.spatialGrid = grids.HexGrid.fromPitch(1.0) converter = blockConverters.HexComponentsToCylConverter(block) converter.convert() for compType in [Flags.FUEL, Flags.CLAD, Flags.DUCT]: self.assertAlmostEqual( block.getComponent(compType).getArea(), sum([component.getArea() for component in converter.convertedBlock if component.hasFlags(compType)]), ) for c in converter.convertedBlock.getComponents(compType): self.assertEqual(block.getComponent(compType).temperatureInC, c.temperatureInC) self.assertEqual(block.getHeight(), converter.convertedBlock.getHeight()) self._checkAreaAndComposition(block, converter.convertedBlock) self._checkCiclesAreInContact(converter.convertedBlock) def test_convertHexWithFuelDriver(self): """Test conversion with fuel driver. .. test:: Convert hex blocks to cylindrical blocks. :id: T_ARMI_BLOCKCONV_HEX_TO_CYL0 :tests: R_ARMI_BLOCKCONV_HEX_TO_CYL """ driverBlock = loadTestReactor(TEST_ROOT)[1].core.getAssemblies(Flags.FUEL)[2].getFirstBlock(Flags.FUEL) block = loadTestReactor(TEST_ROOT)[1].core.getFirstBlock(Flags.CONTROL) control = block.getComponent(Flags.CONTROL) # add depletable flag to see if it is carried control.p.flags |= Flags.DEPLETABLE driverBlock.spatialGrid = None block.spatialGrid = grids.HexGrid.fromPitch(1.0) convertedWithoutDriver = self._testConvertWithDriverRings( block, driverBlock, blockConverters.HexComponentsToCylConverter, hexagon.numPositionsInRing, ) self.assertEqual(5, len([c for c in convertedWithoutDriver if isDepletable(c)])) self.assertEqual(5, len([c for c in convertedWithoutDriver if c.hasFlags(Flags.CONTROL)])) self.assertEqual(9, len([c for c in convertedWithoutDriver if c.hasFlags(Flags.CLAD)])) # This should fail because a spatial grid is required on the block. driverBlock.spatialGrid = None block.spatialGrid = None with self.assertRaises(ValueError): self._testConvertWithDriverRings( block, driverBlock, blockConverters.HexComponentsToCylConverter, hexagon.numPositionsInRing, ) # The ``BlockAvgToCylConverter`` should work without any spatial grid defined because it assumes the grid based # on the block type. driverBlock.spatialGrid = None block.spatialGrid = None convertedWithoutDriver = self._testConvertWithDriverRings( block, driverBlock, blockConverters.BlockAvgToCylConverter, hexagon.numPositionsInRing, ) # block went to 1 component self.assertEqual(1, len([c for c in convertedWithoutDriver])) def test_convertHexWithFuelDrOnNegCompAreaBlock(self): """ Tests the conversion of a control block with linked components, where a component contains a negative area due to thermal expansion. """ driverBlock = loadTestReactor(TEST_ROOT)[1].core.getAssemblies(Flags.FUEL)[2].getFirstBlock(Flags.FUEL) block = buildControlBlockWithLinkedNegativeAreaComponent() areas = [c.getArea() for c in block] # Check that a negative area component exists. self.assertLess(min(areas), 0.0) driverBlock.spatialGrid = None block.spatialGrid = grids.HexGrid.fromPitch(1.0) converter = blockConverters.HexComponentsToCylConverter(block, driverFuelBlock=driverBlock, numExternalRings=2) convertedBlock = converter.convert() # The area is increased because the negative area components are # removed. self.assertGreater(convertedBlock.getArea(), block.getArea()) def test_convertCartesianLatticeWithFuelDriver(self): """Test conversion with fuel driver.""" r = loadTestReactor(TEST_ROOT, inputFileName="zpprTest.yaml")[1] driverBlock = r.core.getAssemblies(Flags.FUEL)[2].getFirstBlock(Flags.FUEL) block = r.core.getAssemblies(Flags.FUEL)[2].getFirstBlock(Flags.BLANKET) driverBlock.spatialGrid = grids.CartesianGrid.fromRectangle(1.0, 1.0) block.spatialGrid = grids.CartesianGrid.fromRectangle(1.0, 1.0) converter = blockConverters.BlockAvgToCylConverter self._testConvertWithDriverRings(block, driverBlock, converter, lambda n: (n - 1) * 8) def _testConvertWithDriverRings(self, block, driverBlock, converterToTest, getNumInRing): area = block.getArea() numExternalFuelRings = [1, 2, 3, 4] numBlocks = 1 for externalRings in numExternalFuelRings: numBlocks += getNumInRing(externalRings + 1) converter = converterToTest(block, driverFuelBlock=driverBlock, numExternalRings=externalRings) convertedBlock = converter.convert() self.assertAlmostEqual(area * numBlocks, convertedBlock.getArea()) self._checkCiclesAreInContact(convertedBlock) plotFile = "convertedBlock_{0}.svg".format(externalRings) converter.plotConvertedBlock(fName=plotFile) os.remove(plotFile) for c in list(reversed(convertedBlock))[:externalRings]: self.assertTrue(c.isFuel(), "c was {}".format(c.name)) # remove external driver rings in preparation to check composition convertedBlock.remove(c) convBlockWithoutDriver = convertedBlock self._checkAreaAndComposition(block, convBlockWithoutDriver) return convBlockWithoutDriver def _checkAreaAndComposition(self, block, convertedBlock): self.assertAlmostEqual(block.getArea(), convertedBlock.getArea()) unmergedNucs = block.getNumberDensities() convDens = convertedBlock.getNumberDensities() errorMessage = "" nucs = set(unmergedNucs) | set(convDens) for nucName in nucs: n1, n2 = unmergedNucs[nucName], convDens[nucName] try: self.assertAlmostEqual(n1, n2) except AssertionError: errorMessage += "\nnuc {} not equal. unmerged: {} merged: {}".format(nucName, n1, n2) self.assertTrue(not errorMessage, errorMessage) bMass = block.getMass() self.assertAlmostEqual(bMass, convertedBlock.getMass()) self.assertGreater(bMass, 0.0) # verify it isn't empty def _checkCiclesAreInContact(self, convertedCircleBlock): numComponents = len(convertedCircleBlock) self.assertGreater(numComponents, 1) self.assertTrue(all(isinstance(c, components.Circle) for c in convertedCircleBlock)) lastCompOD = None lastComp = None for c in sorted(convertedCircleBlock): thisID = c.getDimension("id") thisOD = c.getDimension("od") if lastCompOD is None: self.assertTrue( thisID == 0, "The inner component {} should have an ID of zero".format(c), ) else: self.assertTrue( thisID == lastCompOD, "The component {} with id {} was not in contact with the " "previous component ({}) that had od {}".format(c, thisID, lastComp, lastCompOD), ) lastCompOD = thisOD lastComp = c class TestToCircles(unittest.TestCase): def test_fromHex(self): actualRadii = blockConverters.radiiFromHexPitches([7.47, 7.85, 8.15]) expected = [3.92203, 4.12154, 4.27906] self.assertTrue(np.allclose(expected, actualRadii, rtol=1e-5)) def test_fromRingOfRods(self): # JOYO-LMFR-RESR-001, rev 1, Table A.2, 5th layer (ring 6) actualRadii = blockConverters.radiiFromRingOfRods(0.76 * 5, 6 * 5, [0.28, 0.315]) expected = [3.24034, 3.28553, 3.62584, 3.67104] self.assertTrue(np.allclose(expected, actualRadii, rtol=1e-5)) def _buildJoyoFuel(): """Build some JOYO components.""" fuel = components.Circle( name="fuel", material="UO2", Tinput=20.0, Thot=20.0, od=0.28 * 2, id=0.0, mult=91, ) clad = components.Circle( name="clad", material="HT9", Tinput=20.0, Thot=20.0, od=0.315 * 2, id=0.28 * 2, mult=91, ) return fuel, clad def buildControlBlockWithLinkedNegativeAreaComponent(): """ Return a block that contains a bond component that resolves to a negative area once the fuel and clad thermal expansion have occurred. """ b = blocks.HexBlock("control", height=10.0) controlDims = {"Tinput": 25.0, "Thot": 600, "od": 0.77, "id": 0.00, "mult": 127.0} bondDims = { "Tinput": 600, "Thot": 600, "od": "clad.id", "id": "control.od", "mult": 127.0, } cladDims = {"Tinput": 25.0, "Thot": 450, "od": 0.80, "id": 0.77, "mult": 127.0} wireDims = { "Tinput": 25.0, "Thot": 450, "od": 0.1, "id": 0.0, "mult": 127.0, "axialPitch": 30.0, "helixDiameter": 0.9, } ductDims = {"Tinput": 25.0, "Thot": 400, "op": 16, "ip": 15.3, "mult": 1.0} intercoolantDims = { "Tinput": 400, "Thot": 400, "op": 17.0, "ip": ductDims["op"], "mult": 1.0, } coolDims = {"Tinput": 25.0, "Thot": 400} control = components.Circle("control", "UZr", **controlDims) clad = components.Circle("clad", "HT9", **cladDims) # This sets up the linking of the bond to the fuel and the clad components. bond = components.Circle("bond", "Sodium", components={"control": control, "clad": clad}, **bondDims) wire = components.Helix("wire", "HT9", **wireDims) duct = components.Hexagon("duct", "HT9", **ductDims) coolant = components.DerivedShape("coolant", "Sodium", **coolDims) intercoolant = components.Hexagon("intercoolant", "Sodium", **intercoolantDims) b.add(control) b.add(bond) b.add(clad) b.add(wire) b.add(duct) b.add(coolant) b.add(intercoolant) return b ================================================ FILE: armi/reactor/converters/tests/test_geometryConverters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module to test geometry converters.""" import math import os import unittest from numpy.testing import assert_allclose from armi import runLog from armi.reactor import blocks, geometry, grids from armi.reactor.converters import geometryConverters, uniformMesh from armi.reactor.flags import Flags from armi.testing import TESTING_ROOT, loadTestReactor, reduceTestReactorRings from armi.tests import TEST_ROOT, mockRunLogs from armi.utils import directoryChangers, plotting from armi.utils.directoryChangers import TemporaryDirectoryChanger THIS_DIR = os.path.dirname(__file__) class TestGeometryConverters(unittest.TestCase): def setUp(self): self.o, self.r = loadTestReactor(TEST_ROOT) self.cs = self.o.cs def test_addRing(self): """Tests that ``addRing`` adds the correct number of fuel assemblies to the test reactor.""" converter = geometryConverters.FuelAssemNumModifier(self.cs) converter.numFuelAssems = 7 converter.ringsToAdd = 1 * ["radial shield"] converter.convert(self.r) numAssems = len(self.r.core) self.assertEqual(numAssems, 13) # should end up with 6 reflector assemblies per 1/3rd Core locator = self.r.core.spatialGrid.getLocatorFromRingAndPos(4, 1) shieldtype = self.r.core.childrenByLocator[locator].getType() self.assertEqual(shieldtype, "radial shield") # check that the right thing was added # one more test with an uneven number of rings converter.numFuelAssems = 8 converter.convert(self.r) numAssems = len(self.r.core) self.assertEqual(numAssems, 19) # should wind up with 11 reflector assemblies per 1/3rd core def test_setNumberOfFuelAssems(self): """Tests that ``setNumberOfFuelAssems`` properly changes the number of fuel assemblies.""" # tests ability to add fuel assemblies converter = geometryConverters.FuelAssemNumModifier(self.cs) converter.numFuelAssems = 60 converter.convert(self.r) numFuelAssems = 0 for assem in self.r.core: if assem.hasFlags(Flags.FUEL): numFuelAssems += 1 self.assertEqual(numFuelAssems, 60) # checks that existing fuel assemblies are preserved locator = self.r.core.spatialGrid.getLocatorFromRingAndPos(1, 1) fueltype = self.r.core.childrenByLocator[locator].getType() self.assertEqual(fueltype, "igniter fuel") # checks that existing control rods are preserved locator = self.r.core.spatialGrid.getLocatorFromRingAndPos(5, 1) controltype = self.r.core.childrenByLocator[locator].getType() self.assertEqual(controltype, "primary control") # checks that existing reflectors are overwritten with feed fuel locator = self.r.core.spatialGrid.getLocatorFromRingAndPos(9, 5) oldshieldtype = self.r.core.childrenByLocator[locator].getType() self.assertEqual(oldshieldtype, "feed fuel") # checks that outer assemblies are removed locator = self.r.core.spatialGrid.getLocatorFromRingAndPos(9, 1) with self.assertRaises(KeyError): _ = self.r.core.childrenByLocator[locator] # tests ability to remove fuel assemblies converter.numFuelAssems = 20 converter.convert(self.r) numFuelAssems = 0 for assem in self.r.core: if assem.hasFlags(Flags.FUEL): numFuelAssems += 1 self.assertEqual(numFuelAssems, 20) def test_getAssembliesInSector(self): allAssems = self.r.core.getAssemblies() fullSector = geometryConverters.HexToRZConverter._getAssembliesInSector(self.r.core, 0, 360) self.assertGreaterEqual(len(fullSector), len(allAssems)) # could be > due to edge assems third = geometryConverters.HexToRZConverter._getAssembliesInSector(self.r.core, 0, 30) # could solve this analytically based on test core size self.assertAlmostEqual(25, len(third)) oneLine = geometryConverters.HexToRZConverter._getAssembliesInSector(self.r.core, 0, 0.001) self.assertAlmostEqual(5, len(oneLine)) # same here class TestHexToRZConverter(unittest.TestCase): def setUp(self): self.o, self.r = loadTestReactor(TEST_ROOT) reduceTestReactorRings(self.r, self.o.cs, 2) self.cs = self.o.cs runLog.setVerbosity("extra") self._expandReactor = False self._massScaleFactor = 1.0 if not self._expandReactor: self._massScaleFactor = 3.0 def tearDown(self): del self.o del self.cs del self.r def test_convert(self): """Test HexToRZConverter.convert(). Notes ----- Ensure the converted reactor has 1) nuclides and nuclide masses that match the original reactor, 2) for a given (r,z,theta) location the expected block type exists, 3) the converted reactor has the right (r,z,theta) coordinates, and 4) the converted reactor blocks all have a single (homogenized) component. .. test:: Convert a 3D hex reactor core to an RZ-Theta core. :id: T_ARMI_CONV_3DHEX_TO_2DRZ :tests: R_ARMI_CONV_3DHEX_TO_2DRZ """ # make the reactor smaller, because of a test parallelization edge case for ring in [9, 8, 7, 6, 5, 4, 3]: self.r.core.removeAssembliesInRing(ring, self.o.cs) converterSettings = { "radialConversionType": "Ring Compositions", "axialConversionType": "Axial Coordinates", "uniformThetaMesh": True, "thetaBins": 1, "axialMesh": [25, 50, 75, 100, 150, 175], "thetaMesh": [2 * math.pi], } expectedMassDict, expectedNuclideList = self._getExpectedData() geomConv = geometryConverters.HexToRZConverter(self.cs, converterSettings, expandReactor=self._expandReactor) geomConv.convert(self.r) newR = geomConv.convReactor self._checkBlockComponents(newR) self._checkNuclidesMatch(expectedNuclideList, newR) self._checkNuclideMasses(expectedMassDict, newR) self._checkBlockAtMeshPoint(geomConv) self._checkReactorMeshCoordinates(geomConv) _figs = geomConv.plotConvertedReactor() with directoryChangers.TemporaryDirectoryChanger(): geomConv.plotConvertedReactor("fname") # bonus test: reset() works after converter has filled in values geomConv.reset() self.assertIsNone(geomConv.convReactor) self.assertIsNone(geomConv._radialMeshConversionType) self.assertIsNone(geomConv._axialMeshConversionType) self.assertIsNone(geomConv._currentRadialZoneType) self.assertEqual(geomConv._newBlockNum, 0) def _checkBlockAtMeshPoint(self, geomConv): b = plotting._getBlockAtMeshPoint(geomConv.convReactor, 0.0, 2.0 * math.pi, 0.0, 12.0, 50.0, 75.0) self.assertTrue(b.hasFlags(Flags.FUEL)) def _checkReactorMeshCoordinates(self, geomConv): thetaMesh, radialMesh, axialMesh = plotting._getReactorMeshCoordinates(geomConv.convReactor) expectedThetaMesh = [math.pi * 2.0] expectedAxialMesh = [25.0, 50.0, 75.0, 100.0, 150.0, 175.0] expectedRadialMesh = [ 8.794379, 23.26774, ] assert_allclose(expectedThetaMesh, thetaMesh) assert_allclose(expectedRadialMesh, radialMesh) assert_allclose(expectedAxialMesh, axialMesh) def _getExpectedData(self): """Retrieve the mass of all nuclides in the reactor prior to converting.""" expectedMassDict = {} expectedNuclideList = self.r.blueprints.allNuclidesInProblem for nuclide in sorted(expectedNuclideList): expectedMassDict[nuclide] = self.r.core.getMass(nuclide) return expectedMassDict, expectedNuclideList def _checkBlockComponents(self, newR): for b in newR.core.iterBlocks(): if len(b) != 1: raise ValueError("Block {} has {} components and should only have 1".format(b, len(b))) def _checkNuclidesMatch(self, expectedNuclideList, newR): """Check that the nuclide lists match before and after conversion.""" actualNuclideList = newR.blueprints.allNuclidesInProblem if set(expectedNuclideList) != set(actualNuclideList): diffList = sorted(set(expectedNuclideList).difference(actualNuclideList)) diffList += sorted(set(actualNuclideList).difference(expectedNuclideList)) runLog.warning(diffList) raise ValueError( "{0} nuclides do not match between the original and converted reactor".format(len(diffList)) ) def _checkNuclideMasses(self, expectedMassDict, newR): """Check that all nuclide masses in the new reactor are equivalent to before the conversion.""" massMismatchCount = 0 for nuclide in expectedMassDict.keys(): expectedMass = expectedMassDict[nuclide] actualMass = newR.core.getMass(nuclide) / self._massScaleFactor if round(abs(expectedMass - actualMass), 7) != 0.0: print("{:6s} {:10.2f} {:10.2f}".format(nuclide, expectedMass, actualMass)) massMismatchCount += 1 # Raise error if there are any inconsistent masses if massMismatchCount > 0: raise ValueError( "{0} nuclides have masses that are not consistent after the conversion".format(massMismatchCount) ) def test_createHomogenizedRZTBlock(self): newBlock = blocks.ThRZBlock("testBlock", self.cs) a = self.r.core[0] converterSettings = {} geomConv = geometryConverters.HexToRZConverter(self.cs, converterSettings, expandReactor=self._expandReactor) volumeExpected = a.getVolume() ( _atoms, _newBlockType, _newBlockTemp, newBlockVol, ) = geomConv.createHomogenizedRZTBlock(newBlock, 0, a.getHeight(), [a]) # The volume of the radialZone and the radialThetaZone should be equal for RZ geometry self.assertAlmostEqual(volumeExpected, newBlockVol) class TestEdgeAssemblyChanger(unittest.TestCase): def setUp(self): """Use the related setup in the testFuelHandlers module.""" self.o, self.r = loadTestReactor(TEST_ROOT) reduceTestReactorRings(self.r, self.o.cs, 3) def tearDown(self): del self.o del self.r def test_edgeAssemblies(self): """Sanity check on adding edge assemblies. .. test:: Test adding/removing assemblies from a reactor. :id: T_ARMI_ADD_EDGE_ASSEMS :tests: R_ARMI_ADD_EDGE_ASSEMS """ def getAssemByRingPos(ringPos: tuple): for a in self.r.core: if a.spatialLocator.getRingPos() == ringPos: return a return None numAssemsOrig = len(self.r.core) # assert that there is no assembly in the (3, 4) (ring, position). self.assertIsNone(getAssemByRingPos((3, 4))) # add the assembly converter = geometryConverters.EdgeAssemblyChanger() converter.addEdgeAssemblies(self.r.core) numAssemsWithEdgeAssem = len(self.r.core) # assert that there is an assembly in the (3, 4) (ring, position). self.assertIsNotNone(getAssemByRingPos((3, 4))) self.assertTrue(numAssemsWithEdgeAssem > numAssemsOrig) # try to add the assembly again (you can't) with mockRunLogs.BufferLog() as mock: converter.addEdgeAssemblies(self.r.core) self.assertIn("Skipping addition of edge assemblies", mock.getStdout()) self.assertTrue(numAssemsWithEdgeAssem, len(self.r.core)) # must be added after geom transform for b in self.o.r.core.iterBlocks(): b.p.power = 1.0 converter.scaleParamsRelatedToSymmetry(self.r.core) a = self.r.core.getAssembliesOnSymmetryLine(grids.BOUNDARY_0_DEGREES)[0] self.assertTrue(all(b.p.power == 2.0 for b in a), "Powers were not scaled") # remove the assembly that was added converter.removeEdgeAssemblies(self.r.core) self.assertIsNone(getAssemByRingPos((3, 4))) self.assertEqual(numAssemsOrig, len(self.r.core)) class TestThirdCoreHexToFullCoreChanger(unittest.TestCase): def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() self.o, self.r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml" ) # initialize the block powers to a uniform power profile, accounting for the loaded reactor being 1/3 core numBlocksInFullCore = 0 for a in self.r.core: if a.getLocation() == "001-001": for b in a: numBlocksInFullCore += 1 else: for b in a: # account for the 1/3 symmetry numBlocksInFullCore += 3 for a in self.r.core: if a.getLocation() == "001-001": for b in a: b.p["power"] = self.o.cs["power"] / numBlocksInFullCore / 3 else: for b in a: b.p["power"] = self.o.cs["power"] / numBlocksInFullCore def tearDown(self): del self.o del self.r self.td.__exit__(None, None, None) def test_growToFullCoreFromThirdCore(self): """Test that a hex core can be converted from a third core to a full core geometry. .. test:: Convert a third-core to a full-core geometry and then restore it. :id: T_ARMI_THIRD_TO_FULL_CORE0 :tests: R_ARMI_THIRD_TO_FULL_CORE """ def getLTAAssems(): aList = [] for a in self.r.core: if a.getType() == "lta fuel": aList.append(a) return aList # Check the initialization of the third core model self.assertFalse(self.r.core.isFullCore) self.assertEqual( self.r.core.symmetry, geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC), ) initialNumBlocks = len(self.r.core.getBlocks()) assems = getLTAAssems() expectedLoc = [(3, 2)] # set ringPosHist to be propagated to full core assem = self.r.core.getAssemblyWithStringLocation("003-002") assem.p.ringPosHist = [(3, 2), (3, 12), (2, 2), (3, 2)] for i, a in enumerate(assems): self.assertEqual(a.spatialLocator.getRingPos(), expectedLoc[i]) self.assertAlmostEqual(self.r.core.getTotalBlockParam("power"), self.o.cs["power"] / 3, places=5) self.assertGreater( self.r.core.getTotalBlockParam("power", calcBasedOnFullObj=True), self.o.cs["power"] / 3, ) # Perform reactor conversion changer = geometryConverters.ThirdCoreHexToFullCoreChanger(self.o.cs) changer.convert(self.r) # Check the full core conversion is successful self.assertTrue(self.r.core.isFullCore) self.assertGreater(len(self.r.core.getBlocks()), initialNumBlocks) self.assertEqual(self.r.core.symmetry.domain, geometry.DomainType.FULL_CORE) assems = getLTAAssems() expectedLoc = [(3, 2), (3, 6), (3, 10)] expectedRingPosHists = [ [(3, 2), (3, 12), (2, 2), (3, 2)], [(3, 6), (3, 4), (2, 4), (3, 6)], [(3, 10), (3, 8), (2, 6), (3, 10)], ] for i, a in enumerate(assems): self.assertEqual(a.spatialLocator.getRingPos(), expectedLoc[i]) self.assertListEqual(a.p.ringPosHist, expectedRingPosHists[i]) # ensure that block power is handled correctly self.assertAlmostEqual(self.r.core.getTotalBlockParam("power"), self.o.cs["power"], places=5) self.assertAlmostEqual( self.r.core.getTotalBlockParam("power", calcBasedOnFullObj=True), self.o.cs["power"], places=5, ) # Check that the geometry can be restored to a third core changer.restorePreviousGeometry(self.r) self.assertEqual(initialNumBlocks, len(self.r.core.getBlocks())) self.assertEqual( self.r.core.symmetry, geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC), ) self.assertFalse(self.r.core.isFullCore) self.assertAlmostEqual(self.r.core.getTotalBlockParam("power"), self.o.cs["power"] / 3, places=5) assems = getLTAAssems() expectedLoc = [(3, 2)] for i, a in enumerate(assems): self.assertEqual(a.spatialLocator.getRingPos(), expectedLoc[i]) def test_initNewFullReactor(self): """Test that initNewReactor will growToFullCore if necessary.""" # Perform reactor conversion changer = geometryConverters.ThirdCoreHexToFullCoreChanger(self.o.cs) changer.convert(self.r) converter = uniformMesh.NeutronicsUniformMeshConverter(self.o.cs) newR = converter.initNewReactor(self.r, self.o.cs) # Check the full core conversion is successful self.assertTrue(self.r.core.isFullCore) self.assertTrue(newR.core.isFullCore) self.assertEqual(newR.core.symmetry.domain, geometry.DomainType.FULL_CORE) def test_skipGrowToFullCoreWhenAlreadyFullCore(self): """Test that hex core is not modified when third core to full core changer is called on an already full core geometry. .. test: Convert a one-third core to full core and restore back to one-third core. :id: T_ARMI_THIRD_TO_FULL_CORE2 :tests: R_ARMI_THIRD_TO_FULL_CORE """ # Check the initialization of the third core model and convert to a full core self.assertFalse(self.r.core.isFullCore) self.assertEqual( self.r.core.symmetry, geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC), ) numBlocksThirdCore = len(self.r.core.getBlocks()) # convert the third core to full core changer = geometryConverters.ThirdCoreHexToFullCoreChanger(self.o.cs) with mockRunLogs.BufferLog() as mock: changer.convert(self.r) self.assertIn("Expanding to full core geometry", mock.getStdout()) numBlocksFullCore = len(self.r.core.getBlocks()) self.assertEqual(self.r.core.symmetry.domain, geometry.DomainType.FULL_CORE) # try to convert to full core again (it shouldn't do anything) with mockRunLogs.BufferLog() as mock: changer.convert(self.r) self.assertIn( "Detected that full core reactor already exists. Cannot expand.", mock.getStdout(), ) self.assertEqual(self.r.core.symmetry.domain, geometry.DomainType.FULL_CORE) self.assertEqual(numBlocksFullCore, len(self.r.core.getBlocks())) # restore back to 1/3 core with mockRunLogs.BufferLog() as mock: changer.restorePreviousGeometry(self.r) self.assertIn("revert from full to 1/3 core", mock.getStdout()) self.assertEqual(numBlocksThirdCore, len(self.r.core.getBlocks())) self.assertEqual( self.r.core.symmetry, geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC), ) ================================================ FILE: armi/reactor/converters/tests/test_meshConverters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests of RZ Mesh Converter.""" import math import unittest from armi.reactor.converters import geometryConverters, meshConverters from armi.testing import TESTING_ROOT, loadTestReactor class TestRZReactorMeshConverter(unittest.TestCase): """Loads a hex reactor and converts its mesh to RZTheta coordinates.""" def setUp(self): self.o, self.r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml" ) self._converterSettings = { "uniformThetaMesh": True, "thetaBins": 1, "thetaMesh": [2 * math.pi], "axialMesh": [25.0, 50.0, 174.0], "axialSegsPerBin": 1, } def test_meshByRingCompAxialBinsSmallCore(self): expectedRadialMesh = [2, 3, 4, 4] expectedAxialMesh = [15.0, 35.32, 226.46] expectedThetaMesh = [2 * math.pi] meshConvert = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialBins(self._converterSettings) meshConvert.generateMesh(self.r) self.assertListEqual(meshConvert.radialMesh, expectedRadialMesh) self.assertListEqual(meshConvert.axialMesh, expectedAxialMesh) self.assertListEqual(meshConvert.thetaMesh, expectedThetaMesh) def test_meshByRingCompoAxialCoordsSmallCore(self): expectedRadialMesh = [2, 3, 4, 4] expectedAxialMesh = [25.0, 50.0, 226.46] expectedThetaMesh = [2 * math.pi] meshConvert = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialCoordinates( self._converterSettings ) meshConvert.generateMesh(self.r) self.assertListEqual(meshConvert.radialMesh, expectedRadialMesh) self.assertListEqual(meshConvert.axialMesh, expectedAxialMesh) self.assertListEqual(meshConvert.thetaMesh, expectedThetaMesh) def test_meshByRingCompAxialFlagsSmallCore(self): expectedRadialMesh = [2, 3, 4, 4] expectedAxialMesh = [15.0, 35.32, 226.46] expectedThetaMesh = [2 * math.pi] meshConvert = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialFlags(self._converterSettings) meshConvert.generateMesh(self.r) self.assertListEqual(meshConvert.radialMesh, expectedRadialMesh) self.assertListEqual(meshConvert.axialMesh, expectedAxialMesh) self.assertListEqual(meshConvert.thetaMesh, expectedThetaMesh) def _growReactor(self): modifier = geometryConverters.FuelAssemNumModifier(self.o.cs) modifier.numFuelAssems = 1 modifier.ringsToAdd = 3 * ["inner fuel"] + ["middle core fuel"] modifier.convert(self.r) self._converterSettingsLargerCore = { "uniformThetaMesh": True, "thetaBins": 1, "thetaMesh": [2 * math.pi], "axialMesh": [25.0, 30.0, 60.0, 90.0, 105.2151, 152.0, 174.0], "axialSegsPerBin": 2, } def test_meshByRingCompAxialBinsLargeCore(self): self._growReactor() expectedRadialMesh = [2, 3, 4, 5, 6] expectedAxialMesh = [35.32, 226.46] expectedThetaMesh = [2 * math.pi] meshConvert = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialBins( self._converterSettingsLargerCore ) meshConvert.generateMesh(self.r) self.assertListEqual(meshConvert.radialMesh, expectedRadialMesh) self.assertListEqual(meshConvert.axialMesh, expectedAxialMesh) self.assertListEqual(meshConvert.thetaMesh, expectedThetaMesh) def test_meshByRingCompAxialCoordsLargeCore(self): self._growReactor() expectedRadialMesh = [2, 3, 4, 5, 6] expectedAxialMesh = [25.0, 30.0, 60.0, 90.0, 105.2151, 152.0, 226.46] expectedThetaMesh = [2 * math.pi] meshConvert = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialCoordinates( self._converterSettingsLargerCore ) meshConvert.generateMesh(self.r) self.assertListEqual(meshConvert.radialMesh, expectedRadialMesh) self.assertListEqual(meshConvert.axialMesh, expectedAxialMesh) self.assertListEqual(meshConvert.thetaMesh, expectedThetaMesh) def test_meshByRingCompAxialFlagsLargeCore(self): self._growReactor() expectedRadialMesh = [2, 3, 4, 5, 6] expectedAxialMesh = [15.0, 35.32, 226.46] expectedThetaMesh = [2 * math.pi] meshConvert = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialFlags( self._converterSettingsLargerCore ) meshConvert.generateMesh(self.r) self.assertListEqual(meshConvert.radialMesh, expectedRadialMesh) self.assertListEqual(meshConvert.axialMesh, expectedAxialMesh) self.assertListEqual(meshConvert.thetaMesh, expectedThetaMesh) ================================================ FILE: armi/reactor/converters/tests/test_pinTypeBlockConverters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for pin type block converters.""" import copy import unittest from armi.reactor.converters.pinTypeBlockConverters import ( adjustCladThicknessByID, adjustCladThicknessByOD, adjustSmearDensity, ) from armi.reactor.flags import Flags from armi.reactor.tests.test_blocks import buildSimpleFuelBlock, loadTestBlock class TestPinTypeConverters(unittest.TestCase): def setUp(self): self.block = loadTestBlock() def test_adjustCladThicknessByOD(self): thickness = 0.05 clad = self.block.getComponent(Flags.CLAD) ref = clad.getDimension("id", cold=True) + 2.0 * thickness adjustCladThicknessByOD(self.block, thickness) cur = clad.getDimension("od", cold=True) curThickness = (clad.getDimension("od", cold=True) - clad.getDimension("id", cold=True)) / 2.0 self.assertAlmostEqual(cur, ref) self.assertAlmostEqual(curThickness, thickness) def test_adjustCladThicknessByID(self): thickness = 0.05 clad = self.block.getComponent(Flags.CLAD) ref = clad.getDimension("od", cold=True) - 2.0 * thickness adjustCladThicknessByID(self.block, thickness) cur = clad.getDimension("id", cold=True) curThickness = (clad.getDimension("od", cold=True) - clad.getDimension("id", cold=True)) / 2.0 self.assertAlmostEqual(cur, ref) self.assertAlmostEqual(curThickness, thickness) class MassConservationTests(unittest.TestCase): r"""Tests designed to verify mass conservation during thermal expansion.""" def setUp(self): self.b = buildSimpleFuelBlock() def test_adjustSmearDensity(self): r"""Tests the getting, setting, and getting of smear density functions.""" bolBlock = copy.deepcopy(self.b) s = self.b.getSmearDensity(cold=False) fuel = self.b.getComponent(Flags.FUEL) clad = self.b.getComponent(Flags.CLAD) self.assertAlmostEqual(s, (fuel.getDimension("od") ** 2) / clad.getDimension("id") ** 2, 8) adjustSmearDensity(self.b, self.b.getSmearDensity(), bolBlock=bolBlock) s2 = self.b.getSmearDensity(cold=False) self.assertAlmostEqual(s, s2, 8) adjustSmearDensity(self.b, 0.733, bolBlock=bolBlock) self.assertAlmostEqual(0.733, self.b.getSmearDensity(), 8) # try annular fuel clad = self.b.getComponent(Flags.CLAD) fuel = self.b.getComponent(Flags.FUEL) fuel.setDimension("od", clad.getDimension("id", cold=True)) fuel.setDimension("id", 0.0001) adjustSmearDensity(self.b, 0.733, bolBlock=bolBlock) self.assertAlmostEqual(0.733, self.b.getSmearDensity(), 8) ================================================ FILE: armi/reactor/converters/tests/test_uniformMesh.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the uniform mesh geometry converter.""" import collections import copy import os import random import unittest from unittest.mock import Mock import numpy as np from armi.nuclearDataIO.cccc import isotxs from armi.physics.neutronics.settings import CONF_XS_KERNEL from armi.reactor.converters import uniformMesh from armi.reactor.flags import Flags from armi.reactor.tests import test_assemblies, test_blocks from armi.settings.fwSettings.globalSettings import CONF_UNIFORM_MESH_MINIMUM_SIZE from armi.testing import TESTING_ROOT, loadTestReactor, reduceTestReactorRings from armi.tests import ISOAA_PATH, TEST_ROOT _ISOTXS_CACHE = None def _getIsotxsLibrary(): """These tests don't modify the isotxs lib, so we only need to load it once.""" global _ISOTXS_CACHE if _ISOTXS_CACHE is None: _ISOTXS_CACHE = isotxs.readBinary(ISOAA_PATH) return _ISOTXS_CACHE class DummyFluxOptions: def __init__(self, cs): self.cs = cs self.photons = False self.calcReactionRatesOnMeshConversion = True class TestConverterFactory(unittest.TestCase): def setUp(self): self.o, self.r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml", ) self.dummyOptions = DummyFluxOptions(self.o.cs) def test_converterFactory(self): self.dummyOptions.photons = False neutronConverter = uniformMesh.converterFactory(self.dummyOptions) self.assertTrue(neutronConverter, uniformMesh.NeutronicsUniformMeshConverter) self.dummyOptions.photons = True gammaConverter = uniformMesh.converterFactory(self.dummyOptions) self.assertTrue(gammaConverter, uniformMesh.GammaUniformMeshConverter) class TestAssemblyUniformMesh(unittest.TestCase): """ Tests individual operations of the uniform mesh converter. Uses the test reactor for detailedAxialExpansion """ @classmethod def setUpClass(cls): cls.o, cls.r = loadTestReactor(inputFilePath=os.path.join(TEST_ROOT, "detailedAxialExpansion")) cls.converter = uniformMesh.NeutronicsUniformMeshConverter(cs=cls.o.cs) cls.converter._sourceReactor = cls.r cls.converter._setParamsToUpdate("in") def test_makeAssemWithUniformMesh(self): sourceAssem = self.r.core.getFirstAssembly(Flags.IGNITER) # assign different flags to test flag preservation sourceAssem.p.flags = Flags.FUEL | Flags.IGNITER | Flags.TEST self.converter._generateUniformMesh(minimumMeshSize=0.01) b = sourceAssem.getFirstBlock(Flags.FUEL) newAssem = self.converter.makeAssemWithUniformMesh( sourceAssem, self.converter._uniformMesh, paramMapper=uniformMesh.ParamMapper([], ["power"], b), mapNumberDensities=True, ) self.assertEqual(newAssem.p.flags, sourceAssem.p.flags) # chnage sourceAssem flags to verify that a unique copy was made sourceAssem.p.flags = Flags.FUEL | Flags.IGNITER self.assertNotEqual(newAssem.p.flags, sourceAssem.p.flags) prevB = None for newB in newAssem: sourceB = sourceAssem.getBlockAtElevation(newB.p.z) if newB.isFuel() and sourceB.isFuel(): self.assertEqual(newB.p["xsType"], sourceB.p["xsType"]) elif not newB.isFuel() and not sourceB.isFuel(): self.assertEqual(newB.p["xsType"], sourceB.p["xsType"]) elif newB.isFuel() and not sourceB.isFuel(): # a newB that is fuel can overwrite the xsType of a nonfuel sourceB; # this is the expected behavior immediately above the fuel block self.assertEqual(newB.p["xsType"], prevB.p["xsType"]) elif sourceB.isFuel() and not newB.isFuel(): raise ValueError( f"The source block {sourceB} is fuel but uniform mesh convertercreated a nonfuel block {newB}." ) prevB = newB newAssemNumberDens = newAssem.getNumberDensities() for nuc, val in sourceAssem.getNumberDensities().items(): self.assertAlmostEqual(val, newAssemNumberDens[nuc]) for nuc, val in sourceAssem.getNumberDensities().items(): if not val: continue self.assertAlmostEqual(newAssem.getNumberOfAtoms(nuc) / sourceAssem.getNumberOfAtoms(nuc), 1.0) def test_makeAssemWithUniformMeshSubmesh(self): """If sourceAssem has submesh, check that newAssem splits into separate blocks.""" # assign axMesh to blocks randomly sourceAssem = self.r.core.refAssem for i, b in enumerate(sourceAssem): b.p.axMesh = i % 2 + 1 self.r.core.updateAxialMesh() newAssem = self.converter.makeAssemWithUniformMesh( sourceAssem, self.r.core.p.axialMesh[1:], paramMapper=uniformMesh.ParamMapper([], ["power"], b), ) self.assertNotEqual(len(newAssem), len(sourceAssem)) newHeights = [b.getHeight() for b in newAssem] sourceHeights = [b.getHeight() / b.p.axMesh for b in sourceAssem for i in range(b.p.axMesh)] self.assertListEqual(newHeights, sourceHeights) def test_makeAssemUniformMeshParams(self): """Tests creating a uniform mesh assembly while mapping both number densities and specified parameters.""" sourceAssem = self.r.core.getFirstAssembly(Flags.IGNITER) for b in sourceAssem: b.p.flux = 1.0 b.p.power = 10.0 b.p.mgFlux = [1.0, 2.0] # Create a new assembly that has the same mesh as the source assem, but also demonstrates the transfer of number # densities and parameter data as a 1:1 mapping without any volume integration/data migration based on a # differing mesh. bpNames = ["flux", "power", "mgFlux"] newAssem = self.converter.makeAssemWithUniformMesh( sourceAssem, sourceAssem.getAxialMesh(), paramMapper=uniformMesh.ParamMapper([], bpNames, b), mapNumberDensities=True, ) for b, origB in zip(newAssem, sourceAssem): self.assertEqual(b.p.flux, 1.0) self.assertEqual(b.p.power, 10.0) self.assertListEqual(list(b.p.mgFlux), [1.0, 2.0]) self.assertEqual(b.p.flux, origB.p.flux) self.assertEqual(b.p.power, origB.p.power) self.assertListEqual(list(b.p.mgFlux), list(origB.p.mgFlux)) originalNDens = origB.getNumberDensities() for nuc, val in b.getNumberDensities().items(): self.assertAlmostEqual(val, originalNDens[nuc]) # Now, let us update the flux, power, and mgFlux on the new assembly and test that it can be transferred back to # the source assembly. for b in newAssem: b.p.flux = 2.0 b.p.power = 20.0 b.p.mgFlux = [2.0, 4.0] bpNames = ["flux", "power", "mgFlux"] uniformMesh.UniformMeshGeometryConverter.setAssemblyStateFromOverlaps( sourceAssembly=newAssem, destinationAssembly=sourceAssem, paramMapper=uniformMesh.ParamMapper([], bpNames, b), ) for b, updatedB in zip(newAssem, sourceAssem): self.assertEqual(b.p.flux, 2.0) self.assertEqual(b.p.power, 20.0) self.assertListEqual(list(b.p.mgFlux), [2.0, 4.0]) self.assertEqual(b.p.flux, updatedB.p.flux) self.assertEqual(b.p.power, updatedB.p.power) self.assertListEqual(list(b.p.mgFlux), list(updatedB.p.mgFlux)) originalNDens = updatedB.getNumberDensities() for nuc, val in b.getNumberDensities().items(): self.assertAlmostEqual(val, originalNDens[nuc]) def test_clearAssemblyState(self): """Tests clearing the parameter state of an assembly and returning the cached parameters.""" sourceAssem = self.r.core.getFirstAssembly(Flags.IGNITER) for b in sourceAssem: b.p.flux = 1.0 b.p.power = 10.0 b.p.mgFlux = [1.0, 2.0] for b in sourceAssem: self.assertEqual(b.p.flux, 1.0) self.assertEqual(b.p.power, 10.0) self.assertListEqual(list(b.p.mgFlux), [1.0, 2.0]) # Let's test the clearing of the assigned parameters on the source assembly. cachedBlockParams = uniformMesh.UniformMeshGeometryConverter.clearStateOnAssemblies( [sourceAssem], blockParamNames=["flux", "power", "mgFlux"], cache=True, ) for b in sourceAssem: self.assertEqual(b.p.flux, b.p.pDefs["flux"].default) self.assertEqual(b.p.power, b.p.pDefs["flux"].default) self.assertEqual(b.p.mgFlux, b.p.pDefs["mgFlux"].default) self.assertEqual(cachedBlockParams[b]["flux"], 1.0) self.assertEqual(cachedBlockParams[b]["power"], 10.0) self.assertListEqual(list(cachedBlockParams[b]["mgFlux"]), [1.0, 2.0]) class TestUniformMeshGenerator(unittest.TestCase): @classmethod def setUpClass(cls): newSettings = {CONF_XS_KERNEL: "MC2v2", CONF_UNIFORM_MESH_MINIMUM_SIZE: 3.0} cls.o, cls.r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml", customSettings=newSettings, ) cls.r.core.lib = _getIsotxsLibrary() # make the mesh a little non-uniform a4 = cls.r.core[4] a4[2].setHeight(a4[2].getHeight() * 1.05) a3 = cls.r.core[3] a3[2].setHeight(a3[2].getHeight() * 1.20) def setUp(self): self.generator = uniformMesh.UniformMeshGenerator(self.r, self.o.cs[CONF_UNIFORM_MESH_MINIMUM_SIZE]) def test_computeAverageAxialMesh(self): refMesh = self.r.core.findAllAxialMeshPoints([self.r.core.getFirstAssembly(Flags.FUEL)])[1:] self.generator._computeAverageAxialMesh() avgMesh = self.generator._commonMesh self.assertEqual(len(refMesh), len(avgMesh)) self.assertEqual(refMesh[0], avgMesh[0]) self.assertNotEqual(refMesh[4], avgMesh[4], "Not equal above the fuel.") def test_filterMesh(self): """ Test that the mesh can be correctly filtered. .. test:: Produce a uniform mesh with a size no smaller than a user-specified value. :id: T_ARMI_UMC_MIN_MESH1 :tests: R_ARMI_UMC_MIN_MESH """ meshList = [1.0, 3.0, 4.0, 7.0, 9.0, 12.0, 16.0, 19.0, 20.0] anchorPoints = [4.0, 16.0] combinedMesh = self.generator._filterMesh( meshList, self.generator.minimumMeshSize, anchorPoints, preference="bottom", ) self.assertListEqual(combinedMesh, [1.0, 4.0, 7.0, 12.0, 16.0, 19.0]) combinedMesh = self.generator._filterMesh( meshList, self.generator.minimumMeshSize, anchorPoints, preference="top", ) self.assertListEqual(combinedMesh, [1.0, 4.0, 9.0, 12.0, 16.0, 20.0]) anchorPoints = [3.0, 4.0] with self.assertRaises(ValueError): self.generator._filterMesh( meshList, self.generator.minimumMeshSize, anchorPoints, preference="top", ) def test_filteredTopAndBottom(self): fuelBottoms, fuelTops = self.generator._getFilteredMeshTopAndBottom(Flags.FUEL) self.assertListEqual(fuelBottoms, [15.0]) self.assertListEqual(fuelTops, [35.32]) # ctrlAndFuelBottoms and ctrlAndFuelTops include the fuelBottoms and fuelTops, respectively ( ctrlAndFuelBottoms, ctrlAndFuelTops, ) = self.generator._getFilteredMeshTopAndBottom(Flags.CONTROL, fuelBottoms, fuelTops) self.assertListEqual(ctrlAndFuelBottoms, [15.0]) self.assertListEqual(ctrlAndFuelTops, [35.32]) def test_generateCommonMesh(self): """ Covers generateCommonmesh() and _decuspAxialMesh(). .. test:: Produce a uniform mesh with a size no smaller than a user-specified value. :id: T_ARMI_UMC_MIN_MESH0 :tests: R_ARMI_UMC_MIN_MESH .. test:: Preserve the boundaries of fuel and control material. :id: T_ARMI_UMC_NON_UNIFORM0 :tests: R_ARMI_UMC_NON_UNIFORM """ self.generator.generateCommonMesh() expectedMesh = [ 15.0, 25.16, 35.32, 60.06580357142856, 84.81160714285714, 109.55741071428572, 134.3032142857143, 159.04901785714287, 183.79482142857142, 208.540625, 233.2864285714286, ] for i, item in enumerate(list(self.generator._commonMesh)): self.assertAlmostEqual(item, expectedMesh[i]) class TestUniformMeshComponents(unittest.TestCase): """Tests individual operations of the uniform mesh converter.""" @classmethod def setUpClass(cls): cls.o, cls.r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml", ) cls.r.core.lib = _getIsotxsLibrary() # make the mesh a little non-uniform a = cls.r.core[4] a[2].setHeight(a[2].getHeight() * 1.05) def setUp(self): self.converter = uniformMesh.NeutronicsUniformMeshConverter(cs=self.o.cs) self.converter._sourceReactor = self.r def test_blueprintCopy(self): """Ensure that necessary blueprint attributes are set.""" convReactor = self.converter.initNewReactor(self.converter._sourceReactor, self.o.cs) converted = convReactor.blueprints original = self.converter._sourceReactor.blueprints # NOTE: items within toCompare must be list or "list-like", like an ordered set toCompare = ["activeNuclides", "allNuclidesInProblem", "elementsToExpand", "inertNuclides"] for attr in toCompare: for c, o in zip(getattr(converted, attr), getattr(original, attr)): self.assertEqual(c, o) # ensure that the assemblies were copied over self.assertTrue(converted.assemblies, msg="Assembly objects not copied!") def applyNonUniformHeightDistribution(reactor): """Modifies some assemblies to have non-uniform axial meshes.""" for a in reactor.core: delta = 0.0 for b in a[:-1]: origHeight = b.getHeight() newHeight = origHeight * (1 + 0.03 * random.uniform(-1, 1)) b.setHeight(newHeight) delta += newHeight - origHeight a[-1].setHeight(a[-1].getHeight() - delta) a.calculateZCoords() class TestUniformMesh(unittest.TestCase): """Tests full uniform mesh converter, using a smaller test reactor.""" @classmethod def setUpClass(cls): # random seed to support random mesh in unit tests below random.seed(987324987234) cls.o, cls.r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml", customSettings={CONF_XS_KERNEL: "MC2v2"}, ) cls.r.core.lib = _getIsotxsLibrary() cls.r.core.p.keff = 1.0 cls.converter = uniformMesh.NeutronicsUniformMeshConverter(cs=cls.o.cs, calcReactionRates=True) # reactor parameters cls.r.core.p.beta = 700 cls.r.core.p.betaComponents = [100, 150, 150, 100, 100, 100] cls.r.core.p.power = 10 cls.reactorParamNames = ["beta", "betaComponents", "power", "keff", "keffUnc"] cls.converter._cachedReactorCoreParamData = {"powerDensity": 1.0} cls.paramMapper = uniformMesh.ParamMapper(cls.reactorParamNames, [], cls.r.core.getFirstBlock()) def test_convertNumberDensities(self): """ Test the reactor mass before and after conversion. .. test:: Make a copy of the reactor where the new reactor core has a uniform axial mesh. :id: T_ARMI_UMC :tests: R_ARMI_UMC """ refMass = self.r.core.getMass("U235") # perturb the heights of the assemblies -> changes the mass of everything in the core applyNonUniformHeightDistribution(self.r) perturbedCoreMass = self.r.core.getMass("U235") self.assertNotEqual(refMass, perturbedCoreMass) self.converter.convert(self.r) uniformReactor = self.converter.convReactor uniformMass = uniformReactor.core.getMass("U235") # conversion conserved mass self.assertAlmostEqual(perturbedCoreMass, uniformMass) # conversion didn't change source reactor mass self.assertAlmostEqual(self.r.core.getMass("U235"), perturbedCoreMass) # conversion results in uniform axial mesh refAssemMesh = self.converter.convReactor.core.refAssem.getAxialMesh() for a in self.converter.convReactor.core: mesh = a.getAxialMesh() for ref, check in zip(refAssemMesh, mesh): self.assertEqual(ref, check) class TestUniformMeshLargeReactor(unittest.TestCase): """Tests full uniform mesh converter, using a larger test reactor.""" @classmethod def setUpClass(cls): # random seed to support random mesh in unit tests below random.seed(987324987234) cls.o, cls.r = loadTestReactor(TEST_ROOT, customSettings={CONF_XS_KERNEL: "MC2v2"}) reduceTestReactorRings(cls.r, cls.o.cs, 2) cls.r.core.lib = _getIsotxsLibrary() cls.r.core.p.keff = 1.0 cls.converter = uniformMesh.NeutronicsUniformMeshConverter(cs=cls.o.cs, calcReactionRates=True) # reactor parameters cls.r.core.p.beta = 700 cls.r.core.p.betaComponents = [100, 150, 150, 100, 100, 100] cls.r.core.p.power = 10 cls.reactorParamNames = ["beta", "betaComponents", "power", "keff", "keffUnc"] cls.converter._cachedReactorCoreParamData = {"powerDensity": 1.0} cls.paramMapper = uniformMesh.ParamMapper(cls.reactorParamNames, [], cls.r.core.getFirstBlock()) def test_applyStateToOriginal(self): """ Test applyStateToOriginal() to revert mesh conversion. .. test:: Map select parameters from composites on the new mesh to the original mesh. :id: T_ARMI_UMC_PARAM_BACKWARD0 :tests: R_ARMI_UMC_PARAM_BACKWARD """ applyNonUniformHeightDistribution(self.r) # NOTE: this perturbs the ref mass self.converter.convert(self.r) for ib, b in enumerate(self.converter.convReactor.core.iterBlocks()): b.p.mgFlux = list(range(1, 34)) b.p.adjMgFlux = list(range(1, 34)) b.p.fastFlux = 2.0 b.p.flux = 5.0 b.p.power = 5.0 b.p.pdens = 0.5 b.p.fluxPeak = 10.0 + (-1) ** ib # check integral and density params assemblyPowers = [a.calcTotalParam("power") for a in self.converter.convReactor.core] totalPower = self.converter.convReactor.core.calcTotalParam("power", generationNum=2) totalPower2 = self.converter.convReactor.core.calcTotalParam("pdens", volumeIntegrated=True, generationNum=2) self.converter.applyStateToOriginal() for b in self.r.core.iterBlocks(): self.assertAlmostEqual(b.p.fastFlux, 2.0) self.assertAlmostEqual(b.p.flux, 5.0) self.assertAlmostEqual(b.p.pdens, 0.5) # fluxPeak is mapped differently as a ParamLocation.MAX value # make sure that it's one of the two exact possible values self.assertIn(b.p.fluxPeak, [9.0, 11.0]) for expectedPower, a in zip(assemblyPowers, self.r.core): self.assertAlmostEqual(a.calcTotalParam("power"), expectedPower) self.assertAlmostEqual( self.r.core.calcTotalParam("pdens", volumeIntegrated=True, generationNum=2), totalPower2, ) self.assertAlmostEqual(self.r.core.calcTotalParam("power", generationNum=2), totalPower) self.converter.updateReactionRates() for a in self.r.core: for b in a: self.assertTrue(b.p.rateAbs) self.assertTrue(b.p.rateCap) # reactor parameters self.assertEqual(self.r.core.p.power, 10) self.assertEqual(self.r.core.p.beta, 700) self.assertEqual(self.r.core.p.powerDensity, 1.0) self.assertEqual(self.r.core.p.keff, 1.0) self.assertEqual(self.r.core.p.keffUnc, 0.0) self.assertListEqual(self.r.core.p.betaComponents, [100, 150, 150, 100, 100, 100]) class TestCalcReationRates(unittest.TestCase): def test_calcReactionRatesBlockList(self): """ Test that the efficient reaction rate code executes and sets a param > 0.0. .. test:: Return the reaction rates for a given list of ArmiObjects. :id: T_ARMI_FLUX_RX_RATES_BY_XS_ID :tests: R_ARMI_FLUX_RX_RATES """ b = test_blocks.loadTestBlock() test_blocks.applyDummyData(b) self.assertAlmostEqual(b.p.rateAbs, 0.0) blockList = [copy.deepcopy(b) for _i in range(3)] xsID = b.getMicroSuffix() xsNucDict = {nuc: b.core.lib.getNuclide(nuc, xsID) for nuc in b.getNuclides()} uniformMesh.UniformMeshGeometryConverter._calcReactionRatesBlockList(blockList, 1.01, xsNucDict) for b in blockList: self.assertGreater(b.p.rateAbs, 0.0) vfrac = b.getComponentAreaFrac(Flags.FUEL) self.assertEqual(b.p.fisDens, b.p.rateFis / vfrac) self.assertEqual(b.p.fisDensHom, b.p.rateFis) class TestGammaUniformMesh(unittest.TestCase): """Tests gamma uniform mesh converter.""" @classmethod def setUpClass(cls): # random seed to support random mesh in unit tests below random.seed(987324987234) cls.o, cls.r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml", customSettings={CONF_XS_KERNEL: "MC2v2"}, ) cls.r.core.lib = _getIsotxsLibrary() cls.r.core.p.keff = 1.0 cls.converter = uniformMesh.GammaUniformMeshConverter(cs=cls.o.cs) def test_convertNumberDensities(self): refMass = self.r.core.getMass("U235") applyNonUniformHeightDistribution(self.r) # this changes the mass of everything in the core perturbedCoreMass = self.r.core.getMass("U235") self.assertNotEqual(refMass, perturbedCoreMass) self.converter.convert(self.r) uniformReactor = self.converter.convReactor uniformMass = uniformReactor.core.getMass("U235") self.assertAlmostEqual(perturbedCoreMass, uniformMass) # conversion conserved mass # conversion didn't change source reactor mass self.assertAlmostEqual(self.r.core.getMass("U235"), perturbedCoreMass) def test_applyStateToOriginal(self): """ Test applyStateToOriginal() to revert mesh conversion. .. test:: Map select parameters from composites on the new mesh to the original mesh. :id: T_ARMI_UMC_PARAM_BACKWARD1 :tests: R_ARMI_UMC_PARAM_BACKWARD """ applyNonUniformHeightDistribution(self.r) # note: this perturbs the ref. mass # set original parameters on pre-mapped core with non-uniform assemblies for b in self.r.core.iterBlocks(): b.p.mgFlux = list(range(33)) b.p.adjMgFlux = list(range(33)) b.p.fastFlux = 2.0 b.p.flux = 5.0 b.p.power = 5.0 b.p.linPow = 2.0 # set new parameters on core with uniform assemblies (emulate a physics kernel) self.converter.convert(self.r) for b in self.converter.convReactor.core.iterBlocks(): b.p.powerGamma = 0.5 b.p.powerNeutron = 0.5 b.p.linPow = 10.0 b.p.power = b.p.powerGamma + b.p.powerNeutron # check integral and density params assemblyPowers = [a.calcTotalParam("power") for a in self.converter.convReactor.core] assemblyGammaPowers = [a.calcTotalParam("powerGamma") for a in self.converter.convReactor.core] totalPower = self.converter.convReactor.core.calcTotalParam("power", generationNum=2) totalPowerGamma = self.converter.convReactor.core.calcTotalParam("powerGamma", generationNum=2) self.converter.applyStateToOriginal() for b in self.r.core.iterBlocks(): # equal to original value because these were never mapped self.assertEqual(b.p.fastFlux, 2.0) self.assertEqual(b.p.flux, 5.0) # not equal because blocks are different size self.assertNotEqual(b.p.powerGamma, 0.5) self.assertNotEqual(b.p.powerNeutron, 0.5) self.assertNotEqual(b.p.power, 1.0) # has updated value self.assertAlmostEqual(b.p.linPow, 10.0) # equal because these are mapped for expectedPower, expectedGammaPower, a in zip(assemblyPowers, assemblyGammaPowers, self.r.core): self.assertAlmostEqual(a.calcTotalParam("power"), expectedPower) self.assertAlmostEqual(a.calcTotalParam("powerGamma"), expectedGammaPower) self.assertAlmostEqual(self.r.core.calcTotalParam("powerGamma", generationNum=2), totalPowerGamma) self.assertAlmostEqual(self.r.core.calcTotalParam("power", generationNum=2), totalPower) class TestParamConversion(unittest.TestCase): def setUp(self): """ Build two assemblies. The source assembly has two blocks, heights 3 and 7 cm. The destination has one big block that's 10 cm. Flux is set to 5 and 10 respectively on the two source blocks. They are populated with arbitrary flux and pdens values. """ self.sourceAssem, self.destinationAssem = test_assemblies.buildTestAssemblies()[2:] self.height1 = 3.0 self.height2 = 7.0 self.sourceAssem[0].setHeight(self.height1) self.sourceAssem[0].p.flux = 5.0 self.sourceAssem[1].setHeight(self.height2) self.sourceAssem[1].p.flux = 10.0 self.sourceAssem.calculateZCoords() self.destinationAssem[0].setHeight(self.height1 + self.height2) self.destinationAssem.calculateZCoords() # This sets up a caching for the `mgNeutronVelocity` block parameter on each of the blocks of the destination # assembly without setting the data on the blocks of the source assembly to demonstrate that only new parameters # set on the source assembly will be mapped to the destination assembly. This ensures that parameters that are # not being set on the source assembly are not cleared out on the destination assembly with # `setAssemblyStateFromOverlaps` is called. self._cachedBlockParamData = collections.defaultdict(dict) for b in self.destinationAssem: self._cachedBlockParamData[b]["mgNeutronVelocity"] = [1.0] * 33 b.p["mgNeutronVelocity"] = self._cachedBlockParamData[b]["mgNeutronVelocity"] def test_setStateFromOverlaps(self): """ Test that state is translated correctly from source to dest assems. Here we set flux and pdens to 3 on the source blocks. .. test:: Map select parameters from composites on the original mesh to the new mesh. :id: T_ARMI_UMC_PARAM_FORWARD :tests: R_ARMI_UMC_PARAM_FORWARD """ paramList = ["flux", "pdens"] for pName in paramList: for b in self.sourceAssem: b.p[pName] = 3 bpNames = paramList + ["mgNeutronVelocity"] uniformMesh.UniformMeshGeometryConverter.setAssemblyStateFromOverlaps( self.sourceAssem, self.destinationAssem, paramMapper=uniformMesh.ParamMapper([], bpNames, b), ) for paramName in paramList: sourceVal1 = self.sourceAssem[0].p[paramName] sourceVal2 = self.sourceAssem[1].p[paramName] self.assertAlmostEqual( self.destinationAssem[0].p[paramName], (sourceVal1 * self.height1 + sourceVal2 * self.height2) / (self.height1 + self.height2), ) for b in self.sourceAssem: self.assertIsNone(b.p.mgNeutronVelocity) for b in self.destinationAssem: self.assertListEqual( b.p.mgNeutronVelocity, self._cachedBlockParamData[b]["mgNeutronVelocity"], ) class TestUMNonUAssemFlags(unittest.TestCase): """Tests a reactor conversion with only a subset of assemblies being defined as having a non-uniform mesh.""" @classmethod def setUpClass(cls): # random seed to support random mesh in unit tests below random.seed(987324987234) cls.o, cls.r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml", customSettings={ CONF_XS_KERNEL: "MC2v2", "nonUniformAssemFlags": ["primary control"], }, ) cls.r.core.lib = _getIsotxsLibrary() cls.r.core.p.keff = 1.0 cls.converter = uniformMesh.NeutronicsUniformMeshConverter(cs=cls.o.cs, calcReactionRates=True) def test_reactorConversion(self): """Tests the reactor conversion to and from the original reactor.""" self.assertTrue(self.converter._hasNonUniformAssems) self.assertTrue(self.r.core.lib) self.assertEqual(self.r.core.p.keff, 1.0) controlAssems = self.r.core.getAssemblies(Flags.PRIMARY | Flags.CONTROL) # Add a bunch of multi-group flux to the control assemblies in the core to demonstrate that data can be mapped # back to the original control rod assemblies if they are changed. Additionally, this will check that # block-level reaction rates are being calculated (i.e., `rateAbs`). for a in controlAssems: for b in a: b.p.mgFlux = [1.0] * 33 self.assertFalse(b.p.rateAbs) self.converter.convert(self.r) self.assertEqual(len(controlAssems), len(self.converter._nonUniformAssemStorage)) self.converter.applyStateToOriginal() self.assertEqual(len(self.converter._nonUniformAssemStorage), 0) for a in controlAssems: for b in a: self.assertTrue(all(b.getMgFlux())) self.assertTrue(b.p.rateAbs) self.converter.updateReactionRates() for a in controlAssems: for b in a: self.assertTrue(b.p.rateCap) self.assertTrue(b.p.rateAbs) class TestParamMapper(unittest.TestCase): """Test how the ParamMapper maps params.""" def setUp(self): sourceAssem, destinationAssem = test_assemblies.buildTestAssemblies()[2:] self.sourceBlock = sourceAssem.getBlocks()[0] self.destinationBlock = destinationAssem.getBlocks()[0] # volume integrated parameters self.sourceBlock.p.power = 2.0 self.sourceBlock.p.mgFlux = np.array([2.0, 2.0, 2.0]) self.volumeIntegratedParameterNames = ["power", "mgFlux"] # non-volume integrated parameters self.sourceBlock.p.rateFis = 2.0 self.sourceBlock.p.linPowByPin = np.array([2.0, 2.0, 2.0]) self.regularParameterNames = ["rateFis", "linPowByPin"] self.allParameterNames = self.volumeIntegratedParameterNames + self.regularParameterNames self.sourceBlock.getSymmetryFactor = Mock() self.destinationBlock.getSymmetryFactor = Mock() def mappingTestHelper(self, expectedRatioVolumeIntegrated): """ Test helper to run block comparison when mapping parameters. Parameters ---------- expectedRatioVolumeIntegrated : int, float The ratio expected for volume integrated parameters when dividing the destination value by the source value. """ paramMapper = uniformMesh.ParamMapper([], self.allParameterNames, self.sourceBlock) sourceValues = paramMapper.paramGetter(self.sourceBlock, self.allParameterNames) paramMapper.paramSetter(self.destinationBlock, sourceValues, self.allParameterNames) for paramName in self.volumeIntegratedParameterNames: ratio = self.destinationBlock.p[paramName] / self.sourceBlock.p[paramName] np.testing.assert_equal(ratio, expectedRatioVolumeIntegrated) for paramName in self.regularParameterNames: ratio = self.destinationBlock.p[paramName] / self.sourceBlock.p[paramName] np.testing.assert_equal(ratio, 1) def test_mappingSameSymmetry(self): """Test mapping parameters between blocks with similar and dissimilar symmetry factors.""" self.sourceBlock.getSymmetryFactor.return_value = 3 self.destinationBlock.getSymmetryFactor.return_value = 3 self.mappingTestHelper(1) def test_mappingDifferentSymmetry(self): """Test mapping parameters between blocks with similar and dissimilar symmetry factors.""" self.sourceBlock.getSymmetryFactor.return_value = 3 self.destinationBlock.getSymmetryFactor.return_value = 1 self.mappingTestHelper(3) ================================================ FILE: armi/reactor/converters/uniformMesh.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Converts reactor with arbitrary axial meshing (e.g. multiple assemblies with different axial meshes) to one with a global uniform axial mesh. Useful for preparing inputs for physics codes that require structured meshes from a more flexible ARMI reactor mesh. This is implemented generically but includes a concrete subclass for neutronics-specific parameters. This is used for build input files for codes like DIF3D which require axially uniform meshes. Requirements ------------ 1. Build an average reactor with aligned axial meshes from a reactor with arbitrarily unaligned axial meshes in a way that conserves nuclide mass 2. Translate state information computed on the uniform mesh back to the unaligned mesh. 3. For neutronics cases, all neutronics-related block params should be translated, as well as the multigroup real and adjoint flux. .. warning:: This procedure can cause numerical diffusion in some cases. For example, if a control rod tip block has a large coolant block below it, things like peak absorption rate can get lost into it. We recalculate some but not all reaction rates in the re-mapping process based on a flux remapping. To avoid this, finer meshes will help. Always perform mesh sensitivity studies to ensure appropriate convergence for your needs. Examples -------- converter = uniformMesh.NeutronicsUniformMeshConverter() converter.convert(reactor) uniformReactor = converter.convReactor # do calcs, then: converter.applyStateToOriginal() The mesh mapping happens as described in the figure: .. figure:: /.static/axial_homogenization.png """ import collections import copy import typing from timeit import default_timer as timer import numpy as np from armi import runLog from armi.physics.neutronics.globalFlux import RX_ABS_MICRO_LABELS, RX_PARAM_NAMES from armi.reactor import grids, parameters from armi.reactor.converters.geometryConverters import GeometryConverter from armi.reactor.flags import Flags from armi.reactor.reactors import Core, Reactor from armi.settings.fwSettings.globalSettings import CONF_UNIFORM_MESH_MINIMUM_SIZE from armi.utils import plotting from armi.utils.mathematics import average1DWithinTolerance if typing.TYPE_CHECKING: from armi.reactor.blocks import Block HEAVY_METAL_PARAMS = ["molesHmBOL", "massHmBOL"] def converterFactory(globalFluxOptions): if globalFluxOptions.photons: return GammaUniformMeshConverter(globalFluxOptions.cs) else: return NeutronicsUniformMeshConverter( globalFluxOptions.cs, calcReactionRates=globalFluxOptions.calcReactionRatesOnMeshConversion, ) class UniformMeshGenerator: """ This class generates a common axial mesh to for the uniform mesh converter to use. The generation algorithm starts with the simple ``average1DWithinTolerance`` utility function to compute a representative "average" of the assembly meshes in the reactor. It then modifies that mesh to more faithfully represent important material boundaries of fuel and control absorber material. The decusping feature is controlled with the case setting ``uniformMeshMinimumSize``. If no value is provided for this setting, the uniform mesh generator will skip the decusping step and just provide the result of ``_computeAverageAxialMesh``. """ def __init__(self, r, minimumMeshSize=None): """ Initialize an object to generate an appropriate common axial mesh to use for uniform mesh conversion. Parameters ---------- r : :py:class:`Reactor <armi.reactor.reactors.Reactor>` object. Reactor for which a common mesh is generated minimumMeshSize : float, optional Minimum allowed separation between axial mesh points in cm If no minimum mesh size is provided, no "decusping" is performed """ self._sourceReactor = r self.minimumMeshSize = minimumMeshSize self._commonMesh = None def generateCommonMesh(self): """ Generate a common axial mesh to use. .. impl:: Try to preserve the boundaries of fuel and control material. :id: I_ARMI_UMC_NON_UNIFORM :implements: R_ARMI_UMC_NON_UNIFORM A core-wide mesh is computed via ``_computeAverageAxialMesh`` which operates by first collecting all the mesh points for every assembly (``allMeshes``) and then averaging them together using ``average1DWithinTolerance``. An attempt to preserve fuel and control material boundaries is accomplished by moving fuel region boundaries to accommodate control rod boundaries. Note this behavior only occurs by calling ``_decuspAxialMesh`` which is dependent on ``minimumMeshSize`` being defined (this is controlled by the ``uniformMeshMinimumSize`` setting). .. impl:: Produce a mesh with a size no smaller than a user-specified value. :id: I_ARMI_UMC_MIN_MESH :implements: R_ARMI_UMC_MIN_MESH If a minimum mesh size ``minimumMeshSize`` is provided, calls ``_decuspAxialMesh`` on the core-wide mesh to maintain that minimum size while still attempting to honor fuel and control material boundaries. Relies ultimately on ``_filterMesh`` to remove mesh points that violate the minimum size. Note that ``_filterMesh`` will always respect the minimum mesh size, even if this means losing a mesh point that represents a fuel or control material boundary. Notes ----- Attempts to reduce the effect of fuel and control rod absorber smearing ("cusping" effect) by keeping important material boundaries in the common mesh. """ self._computeAverageAxialMesh() if self.minimumMeshSize is not None: self._decuspAxialMesh() def _computeAverageAxialMesh(self, includeSubMesh: bool = True): """ Computes an average axial mesh based on the core's reference assembly. Parameters ---------- includeSubMesh: bool, optional Whether to include the computational axial submesh in the average mesh. Notes ----- This iterates over all the assemblies in the core and collects all assembly meshes that have the same number of fine-mesh points as the `refAssem` for the core. Based on this, the proposed uniform mesh will be some average of many assemblies in the core. The reason for this is to account for the fact that multiple assemblies (i.e., fuel assemblies) may have a different mesh due to differences in thermal and/or burn-up expansion. Averaging all the assembly meshes that have the same number of points can be undesirable in certain corner cases because no preference is assigned based on assembly type. For example: if the reflector assemblies have the same number of mesh points as the fuel assemblies but the size of the blocks is slightly different, the reflector mesh can influence the uniform mesh and effectively pull it away from the fuel mesh boundaries, potentially resulting in smearing (i.e., homogenization) of fuel with non-fuel materials. This is an undesirable outcome. In the future, it may be advantageous to determine a better way of sorting and prioritizing assembly meshes for generating the uniform mesh. """ src = self._sourceReactor refAssem = src.core.refAssem refNumPoints = len(src.core.findAllAxialMeshPoints([refAssem], applySubMesh=includeSubMesh)[1:]) allMeshes = [] for a in src.core: # Get the mesh points of the assembly, neglecting the first coordinate # (typically zero). aMesh = src.core.findAllAxialMeshPoints([a], applySubMesh=includeSubMesh)[1:] if len(aMesh) == refNumPoints: allMeshes.append(aMesh) averageMesh = average1DWithinTolerance(np.array(allMeshes)) self._commonMesh = np.array(averageMesh) def _decuspAxialMesh(self): """ Preserve control rod material boundaries to reduce control rod cusping effect. Notes ----- Uniform mesh conversion can lead to axial smearing of control assembly material, which causes a pronounced control rod "cusping" affect in the differential rod worth. This function modifies the uniform mesh to honor fuel and control rod material boundaries while avoiding excessively small mesh sizes. If adding control rod material boundaries to the mesh creates excessively small mesh regions, this function will move internal fuel region boundaries to make room for the control rod boundaries. This function operates by filtering out mesh points that are too close together while always holding on to the specified "anchor" points in the mesh. The anchor points are built up progressively as the appropriate bottom and top boundaries of fuel and control assemblies are determined. """ # filter fuel material boundaries to minimum mesh size filteredBottomFuel, filteredTopFuel = self._getFilteredMeshTopAndBottom(Flags.FUEL) materialBottoms, materialTops = self._getFilteredMeshTopAndBottom( Flags.CONTROL, filteredBottomFuel, filteredTopFuel ) # combine the bottoms and tops into one list with bottom preference allMatBounds = materialBottoms + materialTops materialAnchors = self._filterMesh( allMatBounds, self.minimumMeshSize, filteredBottomFuel + filteredTopFuel, preference="bottom", warn=True, ) runLog.extra( "Attempting to honor control and fuel material boundaries in uniform mesh " f"for {self} while also keeping minimum mesh size of {self.minimumMeshSize}. " f"Material boundaries are: {allMatBounds}" ) # combine material bottom boundaries with full mesh using bottom preference meshWithBottoms = self._filterMesh( list(self._commonMesh) + materialBottoms, self.minimumMeshSize, materialBottoms, preference="bottom", ) # combine material top boundaries with full mesh using top preference meshWithTops = self._filterMesh( list(self._commonMesh) + materialTops, self.minimumMeshSize, materialTops, preference="top", ) # combine all mesh points using all material boundaries as anchors with top preference # top vs. bottom preference is somewhat arbitrary here combinedMesh = self._filterMesh( list(set(meshWithBottoms + meshWithTops)), self.minimumMeshSize, materialAnchors, preference="top", ) self._commonMesh = np.array(combinedMesh) def _filterMesh(self, meshList, minimumMeshSize, anchorPoints, preference="bottom", warn=False): """ Check for mesh violating the minimum mesh size and remove them if necessary. Parameters ---------- meshList : list of float, required List of mesh points to be filtered by minimum mesh size minimumMeshSize : float, required Minimum allowed separation between axial mesh points in cm anchorPoints : list of float, required These mesh points will not be removed. Note that the anchor points must be separated by at least the ``minimumMeshSize``. preference : str, optional When neither mesh point is in the list of ``anchorPoints``, which mesh point is given preference ("bottom" or "top") warn : bool, optional Whether to log a warning when a mesh is removed. This is true if a control material boundary is removed, but otherwise it is false. """ if preference == "bottom": meshList = sorted(list(set(meshList))) elif preference == "top": meshList = sorted(list(set(meshList)), reverse=True) else: raise ValueError( f"Mesh filtering preference {preference} is not an option! Preference must be either bottom or top" ) while True: for i in range(len(meshList) - 1): difference = abs(meshList[i + 1] - meshList[i]) if difference < minimumMeshSize: if meshList[i] in anchorPoints and meshList[i + 1] in anchorPoints: errorMsg = ( "Attempting to remove two anchor points!\n" "The uniform mesh minimum size for decusping is smaller than the " "gap between anchor points, which cannot be removed:\n" f"{meshList[i]}, {meshList[i + 1]}, gap = {abs(meshList[i] - meshList[i + 1])}" ) runLog.error(errorMsg) raise ValueError(errorMsg) if meshList[i + 1] in anchorPoints: removeIndex = i else: removeIndex = i + 1 if warn: runLog.warning( f"{meshList[i + 1]} is too close to {meshList[i]}! " f"Difference = {difference} is less than mesh size " f"tolerance of {minimumMeshSize}. The uniform mesh will " f"remove {meshList[removeIndex]}." ) break else: return sorted(meshList) meshList.pop(removeIndex) def _getFilteredMeshTopAndBottom(self, flags, bottoms=None, tops=None): """ Get the bottom and top boundaries of fuel assemblies and filter them based on the ``minimumMeshSize``. Parameters ---------- flags : armi.reactor.flags.Flags The assembly and block flags for which to preserve material boundaries ``getAssemblies()`` and ``getBlocks()`` are both called with the default, ``exact=False`` bottoms : list[float], optional Mesh "anchors" for material bottom boundaries tops : list[float], optional Mesh "anchors" for material top boundaries Returns ------- filteredBottoms : the bottom of assembly materials, filtered to a minimum separation of ``minimumMeshSize`` with preference for the lowest bounds filteredTops : the top of assembly materials, filtered to a minimum separation of ``minimumMeshSize`` with preference for the top bounds """ def firstBlockBottom(a, flags): return a.getFirstBlock(flags).p.zbottom def lastBlockTop(a, flags): return a.getBlocks(flags)[-1].p.ztop filteredBoundaries = dict() for meshList, preference, meshGetter, extreme in [ (bottoms, "bottom", firstBlockBottom, min), (tops, "top", lastBlockTop, max), ]: matBoundaries = set(meshList) if meshList is not None else set() for a in self._sourceReactor.core.getAssemblies(flags): matBoundaries.add(meshGetter(a, flags)) anchors = meshList if meshList is not None else [extreme(matBoundaries)] filteredBoundaries[preference] = self._filterMesh( matBoundaries, self.minimumMeshSize, anchors, preference=preference ) return filteredBoundaries["bottom"], filteredBoundaries["top"] class UniformMeshGeometryConverter(GeometryConverter): """ This geometry converter can be used to change the axial mesh structure of the reactor core. Notes ----- There are several staticmethods available on this class that allow for: - Creation of a new reactor without applying a new uniform axial mesh. See: `<UniformMeshGeometryConverter.initNewReactor>` - Creation of a new assembly with a new axial mesh applied. See: `<UniformMeshGeometryConverter.makeAssemWithUniformMesh>` - Resetting the parameter state of an assembly back to the defaults for the provided block parameters. See: `<UniformMeshGeometryConverter.clearStateOnAssemblies>` - Mapping number densities and block parameters between one assembly to another. See: `<UniformMeshGeometryConverter.setAssemblyStateFromOverlaps>` This class is meant to be extended for specific physics calculations that require a uniform mesh. The child types of this class should define custom `reactorParamsToMap` and `blockParamsToMap` attributes, and the `_setParamsToUpdate` method to specify the precise parameters that need to be mapped in each direction between the non-uniform and uniform mesh assemblies. The definitions should avoid mapping block parameters in both directions because the mapping process will cause numerical diffusion. The behavior of `setAssemblyStateFromOverlaps` is dependent on the direction in which the mapping is being applied to prevent the numerical diffusion problem. - "in" is used when mapping parameters into the uniform assembly from the non-uniform assembly. - "out" is used when mapping parameters from the uniform assembly back to the non-uniform assembly. .. warning:: If a parameter is calculated by a physics solver while the reactor is in its converted (uniform mesh) state, that parameter *must* be included in the list of `reactorParamNames` or `blockParamNames` to be mapped back to the non-uniform reactor; otherwise, it will be lost. These lists are defined through the `_setParamsToUpdate` method, which uses the `reactorParamMappingCategories` and `blockParamMappingCategories` attributes and applies custom logic to create a list of parameters to be mapped in each direction. """ reactorParamMappingCategories = { "in": [], "out": [], } blockParamMappingCategories = { "in": [], "out": [], } _TEMP_STORAGE_NAME_SUFFIX = "-TEMP" def __init__(self, cs=None): GeometryConverter.__init__(self, cs) self._uniformMesh = None self.calcReactionRates = False self.includePinCoordinates = False self.paramMapper = None # These dictionaries represent back-up data from the source reactor # that can be recovered if the data is not being brought back from # the uniform mesh reactor when ``applyStateToOriginal`` to called. # This prevents clearing out data on the original reactor that should # be preserved since no changes were applied. self._cachedReactorCoreParamData = {} self._nonUniformMeshFlags = None self._hasNonUniformAssems = None self._nonUniformAssemStorage = set() self._minimumMeshSize = None if cs is not None: self._nonUniformMeshFlags = [Flags.fromStringIgnoreErrors(f) for f in cs["nonUniformAssemFlags"]] self._hasNonUniformAssems = any(self._nonUniformMeshFlags) self._minimumMeshSize = cs[CONF_UNIFORM_MESH_MINIMUM_SIZE] def convert(self, r=None): """ Create a new reactor core with a uniform mesh. .. impl:: Make a copy of the reactor where the new core has a uniform axial mesh. :id: I_ARMI_UMC :implements: R_ARMI_UMC Given a source Reactor, ``r``, as input and when ``_hasNonUniformAssems`` is ``False``, a new Reactor is created in ``initNewReactor``. This new Reactor contains copies of select information from the input source Reactor (e.g., Operator, Blueprints, cycle, timeNode, etc). The uniform mesh to be applied to the new Reactor is calculated in ``_generateUniformMesh`` (see :need:`I_ARMI_UMC_NON_UNIFORM` and :need:`I_ARMI_UMC_MIN_MESH`). New assemblies with this uniform mesh are created in ``_buildAllUniformAssemblies`` and added to the new Reactor. Core-level parameters are then mapped from the source Reactor to the new Reactor in ``_mapStateFromReactorToOther``. Finally, the core-wide axial mesh is updated on the new Reactor via ``updateAxialMesh``. .. impl:: Map select parameters from composites on the original mesh to the new mesh. :id: I_ARMI_UMC_PARAM_FORWARD :implements: R_ARMI_UMC_PARAM_FORWARD In ``_mapStateFromReactorToOther``, Core-level parameters are mapped from the source Reactor to the new Reactor. If requested, block-level parameters can be mapped using an averaging equation as described in ``setAssemblyStateFromOverlaps``. """ if r is None: raise ValueError(f"No reactor provided in {self}") completeStartTime = timer() self._sourceReactor = r self._setParamsToUpdate("in") # Here we are taking a short cut to homogenizing the core by only focusing on the # core assemblies that need to be homogenized. This will have a large speed up # since we don't have to create an entirely new reactor perform the data mapping. if self._hasNonUniformAssems: runLog.extra( f"Replacing non-uniform assemblies in reactor {r}, " "with assemblies whose axial mesh is uniform with " f"the core's reference assembly mesh: {r.core.refAssem.getAxialMesh()}" ) self.convReactor = self._sourceReactor self.convReactor.core.updateAxialMesh() for assem in self.convReactor.core.getAssemblies(self._nonUniformMeshFlags): homogAssem = self.makeAssemWithUniformMesh( assem, self.convReactor.core.p.axialMesh[1:], paramMapper=self.paramMapper, includePinCoordinates=self.includePinCoordinates, ) homogAssem.spatialLocator = assem.spatialLocator # Remove this assembly from the core and add it to the temporary storage # so that it can be replaced with the homogenized assembly. Note that we # do not call `removeAssembly()` because this will delete the core # assembly from existence rather than only stripping its spatialLocator. if assem.spatialLocator in self.convReactor.core.childrenByLocator: self.convReactor.core.childrenByLocator.pop(assem.spatialLocator) self.convReactor.core.remove(assem) self.convReactor.core.assembliesByName.pop(assem.getName(), None) for b in assem: self.convReactor.core.blocksByName.pop(b.getName(), None) assem.setName(assem.getName() + self._TEMP_STORAGE_NAME_SUFFIX) self._nonUniformAssemStorage.add(assem) self.convReactor.core.add(homogAssem) else: runLog.extra(f"Building copy of {r} with a uniform axial mesh.") self.convReactor = self.initNewReactor(r, self._cs) self._generateUniformMesh(minimumMeshSize=self._minimumMeshSize) self._buildAllUniformAssemblies() self._mapStateFromReactorToOther(self._sourceReactor, self.convReactor, mapBlockParams=False) self._newAssembliesAdded = self.convReactor.core.getAssemblies() self.convReactor.core.updateAxialMesh() self.convReactor.core.zones = self._sourceReactor.core.zones self._checkConversion() completeEndTime = timer() runLog.extra(f"Reactor core conversion time: {completeEndTime - completeStartTime} seconds") def _generateUniformMesh(self, minimumMeshSize): """ Generate a common axial mesh to use for uniform mesh conversion. Parameters ---------- minimumMeshSize : float, required Minimum allowed separation between axial mesh points in cm """ generator = UniformMeshGenerator(self._sourceReactor, minimumMeshSize=minimumMeshSize) generator.generateCommonMesh() self._uniformMesh = generator._commonMesh @staticmethod def initNewReactor(sourceReactor, cs): """Build a new, yet empty, reactor with the same settings as sourceReactor. Parameters ---------- sourceReactor : :py:class:`Reactor <armi.reactor.reactors.Reactor>` original reactor object to be copied cs: Setting Complete settings object """ # developer note: deepcopy on the blueprint object ensures that all relevant blueprints # attributes are set. Simply calling blueprints.loadFromCs() just initializes # a blueprints object and may not set all necessary attributes. E.g., some # attributes are set when assemblies are added in coreDesign.construct(), however # since we skip that here, they never get set; therefore the need for the deepcopy. bp = copy.deepcopy(sourceReactor.blueprints) newReactor = Reactor(sourceReactor.name, bp) coreDesign = bp.systemDesigns["core"] coreDesign.construct(cs, bp, newReactor, loadComps=False) newReactor.p.cycle = sourceReactor.p.cycle newReactor.p.timeNode = sourceReactor.p.timeNode newReactor.p.maxAssemNum = sourceReactor.p.maxAssemNum newReactor.core.p.coupledIteration = sourceReactor.core.p.coupledIteration newReactor.core.lib = sourceReactor.core.lib newReactor.core.setPitchUniform(sourceReactor.core.getAssemblyPitch()) newReactor.o = sourceReactor.o # This is needed later for geometry transformation # check if the sourceReactor has been modified from the blueprints if sourceReactor.core.isFullCore and not newReactor.core.isFullCore: _geometryConverter = newReactor.core.growToFullCore(cs) return newReactor def applyStateToOriginal(self): """ Apply the state of the converted reactor back to the original reactor, mapping number densities and block parameters. .. impl:: Map select parameters from composites on the new mesh to the original mesh. :id: I_ARMI_UMC_PARAM_BACKWARD :implements: R_ARMI_UMC_PARAM_BACKWARD To ensure that the parameters on the original Reactor are from the converted Reactor, the first step is to clear the Reactor-level parameters on the original Reactor (see ``_clearStateOnReactor``). ``_mapStateFromReactorToOther`` is then called to map Core-level parameters and, optionally, averaged Block-level parameters (see :need:`I_ARMI_UMC_PARAM_FORWARD`). """ runLog.extra(f"Applying uniform neutronics results from {self.convReactor} to {self._sourceReactor}") completeStartTime = timer() # map the block parameters back to the non-uniform assembly self._setParamsToUpdate("out") # If we have non-uniform mesh assemblies then we need to apply a # different approach to undo the geometry transformations on an # assembly by assembly basis. if self._hasNonUniformAssems: for assem in self._sourceReactor.core.getAssemblies(self._nonUniformMeshFlags): for storedAssem in self._nonUniformAssemStorage: if storedAssem.getName() == assem.getName() + self._TEMP_STORAGE_NAME_SUFFIX: self.setAssemblyStateFromOverlaps( assem, storedAssem, self.paramMapper, mapNumberDensities=False, calcReactionRates=self.calcReactionRates, ) # Remove the stored assembly from the temporary storage list # and replace the current assembly with it. storedAssem.spatialLocator = assem.spatialLocator storedAssem.setName(assem.getName()) self._nonUniformAssemStorage.remove(storedAssem) self._sourceReactor.core.removeAssembly(assem, discharge=False) self._sourceReactor.core.add(storedAssem) break else: runLog.error( f"No assembly matching name {assem.getName()} " f"was found in the temporary storage list. {assem} " "will persist as an axially unified assembly. " "This is likely not intended." ) self._sourceReactor.core.updateAxialMesh() else: # Clear the state of the original source reactor to ensure that # a clean mapping between the converted reactor for data that has been # changed. In this case, we cache the original reactor's data so that # after the mapping has been applied, we can recover data from any # parameters that did not change. self._cachedReactorCoreParamData = {} self._clearStateOnReactor(self._sourceReactor, cache=True) self._mapStateFromReactorToOther(self.convReactor, self._sourceReactor) # We want to map the converted reactor core's library to the source reactor # because in some instances this has changed (i.e., when generating cross sections). self._sourceReactor.core.lib = self.convReactor.core.lib completeEndTime = timer() runLog.extra(f"Parameter remapping time: {completeEndTime - completeStartTime} seconds") @staticmethod def makeAssemWithUniformMesh( sourceAssem, newMesh, paramMapper=None, mapNumberDensities=True, includePinCoordinates=False, ): """ Build new assembly based on a source assembly but apply the uniform mesh. Notes ----- This creates a new assembly based on the provided source assembly, applies a new uniform mesh and then maps number densities and block-level parameters to the new assembly from the source assembly. Parameters ---------- sourceAssem : `Assembly <armi.reactor.assemblies.Assembly>` object Assembly that is used to map number densities and block-level parameters to a new mesh structure. newMesh : List[float] A list of the new axial mesh coordinates of the blocks. Note that these mesh coordinates are in cm and should represent the top axial mesh coordinates of the new blocks. paramMapper : ParamMapper Object that contains list of parameters to be mapped and has methods for mapping mapNumberDensities : bool, optional If True, number densities will be mapped from the source assembly to the new assembly. This is True by default, but this can be set to False to only map block-level parameters if the names are provided in `blockParamNames`. It can be useful to set this to False in circumstances where the ``setNumberDensitiesFromOverlaps`` does not conserve mass and for some edge cases. This can show up in specific instances with moving meshes (i.e., control rods) in some applications. In those cases, the mapping of number densities can be treated independent of this more general implementation. See Also -------- setAssemblyStateFromOverlaps This can be used to reverse the number density and parameter mappings between two assemblies. """ newAssem = UniformMeshGeometryConverter._createNewAssembly(sourceAssem) newAssem.p.assemNum = sourceAssem.p.assemNum runLog.debug(f"Creating a uniform mesh of {newAssem}") bottom = 0.0 def checkPriorityFlags(b): """ Check that a block has the flags that are prioritized for uniform mesh conversion. Also check that it's not different type of block that is a superset of the priority flags, like "Flags.FUEL | Flags.PLENUM" """ priorityFlags = [Flags.FUEL, Flags.CONTROL, Flags.SHIELD | Flags.RADIAL] return b.hasFlags(priorityFlags) and not b.hasFlags(Flags.PLENUM) for topMeshPoint in newMesh: overlappingBlockInfo = sourceAssem.getBlocksBetweenElevations(bottom, topMeshPoint) # This is not expected to occur given that the assembly mesh is consistent with # the blocks within it, but this is added for defensive programming and to # highlight a developer issue. if not overlappingBlockInfo: raise ValueError( f"No blocks found between {bottom:.3f} and {topMeshPoint:.3f} in {sourceAssem}. " f"Ensure a valid mesh is provided. Mesh given: {newMesh}" ) # Iterate over the blocks that are within this region and # select one as a "source" for determining which cross section # type to use. This uses the following rules: # 1. Determine the total height corresponding to each XS type that # appears for blocks with FUEL, CONTROL, or SHIELD|RADIAL flags in this domain. # 2. Determine the single XS type that represents the largest fraction # of the total height of FUEL, CONTROL, or SHIELD|RADIAL cross sections. # 3. Use the first block of the majority XS type as the source block. # 4. If none of the special block types are present(fuelOrAbsorber == False), # use the xs type that represents the largest fraction of the destination block. typeHeight = collections.defaultdict(float) blocks = [b for b, _h in overlappingBlockInfo] fuelOrAbsorber = any(checkPriorityFlags(b) for b in blocks) for b, h in overlappingBlockInfo: if checkPriorityFlags(b) or not fuelOrAbsorber: typeHeight[b.p.xsType] += h sourceBlock = None # xsType is the one with the majority of overlap xsType = next(k for k, v in typeHeight.items() if v == max(typeHeight.values())) for b in blocks: if checkPriorityFlags(b) or not fuelOrAbsorber: if b.p.xsType == xsType: sourceBlock = b break if len(typeHeight) > 1: if sourceBlock: totalHeight = sum(typeHeight.values()) runLog.debug( f"Multiple XS types exist between {bottom} and {topMeshPoint}. " f"Using the XS type from the largest region, {xsType}" ) for xs, h in typeHeight.items(): heightFrac = h / totalHeight runLog.debug(f"XSType {xs}: {heightFrac:.4f}") block = sourceBlock.createHomogenizedCopy(includePinCoordinates) block.p.xsType = xsType block.setHeight(topMeshPoint - bottom) block.p.axMesh = 1 newAssem.add(block) bottom = topMeshPoint newAssem.reestablishBlockOrder() newAssem.calculateZCoords() UniformMeshGeometryConverter.setAssemblyStateFromOverlaps( sourceAssem, newAssem, paramMapper, mapNumberDensities, ) return newAssem @staticmethod def setAssemblyStateFromOverlaps( sourceAssembly, destinationAssembly, paramMapper, mapNumberDensities=False, calcReactionRates=False, ): r""" Set state data (i.e., number densities and block-level parameters) on a assembly based on a source assembly with a different axial mesh. This solves an averaging equation from the source to the destination. .. math:: <P> = \frac{\int_{z_1}^{z_2} P(z) dz}{\int_{z_1}^{z_2} dz} which can be solved piecewise for z-coordinates along the source blocks. Notes ----- * If the parameter is volume integrated (e.g., flux, linear power) then calculate the fractional contribution from the source block. * If the parameter is not volume integrated (e.g., volumetric reaction rate) then calculate the fraction contribution on the destination block. This smears the parameter over the destination block. Parameters ---------- sourceAssembly : Assembly assem that has the state destinationAssembly : Assembly assem that has is getting the state from sourceAssembly paramMapper : ParamMapper Object that contains list of parameters to be mapped and has methods for mapping mapNumberDensities : bool, optional If True, number densities will be mapped from the source assembly to the destination assembly. This is True by default, but this can be set to False to only map block-level parameters if the names are provided in `blockParamNames`. It can be useful to set this to False in circumstances where the ``setNumberDensitiesFromOverlaps`` does not conserve mass and for some edge cases. This can show up in specific instances with moving meshes (i.e., control rods) in some applications. In those cases, the mapping of number densities can be treated independent of this more general implementation. calcReactionRates : bool, optional If True, the neutron reaction rates will be calculated on each block within the destination assembly. Note that this will skip the reaction rate calculations for a block if it does not contain a valid multi-group flux. See Also -------- setNumberDensitiesFromOverlaps : does this but does smarter caching for number densities. """ for destBlock in destinationAssembly: zLower = destBlock.p.zbottom zUpper = destBlock.p.ztop destinationBlockHeight = destBlock.getHeight() # Determine which blocks in the uniform mesh source assembly are # within the lower and upper bounds of the destination block. sourceBlocksInfo = sourceAssembly.getBlocksBetweenElevations(zLower, zUpper) if abs(zUpper - zLower) < 1e-6 and not sourceBlocksInfo: continue elif not sourceBlocksInfo: raise ValueError( "An error occurred when attempting to map to the " f"results from {sourceAssembly} to {destinationAssembly}. " f"No blocks in {sourceAssembly} exist between the axial " f"elevations of {zLower:<12.5f} cm and {zUpper:<12.5f} cm. " "This a major bug in the uniform mesh converter that should " "be reported to the developers." ) if mapNumberDensities: setNumberDensitiesFromOverlaps(destBlock, sourceBlocksInfo) # Iterate over each of the blocks that were found in the uniform mesh # source assembly within the lower and upper bounds of the destination # block and perform the parameter mapping. if paramMapper is not None: updatedDestVals = collections.defaultdict(float) for sourceBlock, sourceBlockOverlapHeight in sourceBlocksInfo: sourceBlockVals = paramMapper.paramGetter( sourceBlock, paramMapper.blockParamNames, ) sourceBlockHeight = sourceBlock.getHeight() for paramName, sourceBlockVal in zip(paramMapper.blockParamNames, sourceBlockVals): if sourceBlockVal is None: continue if paramMapper.isPeak[paramName]: updatedDestVals[paramName] = max(sourceBlockVal, updatedDestVals[paramName]) else: if paramMapper.isVolIntegrated[paramName]: denominator = sourceBlockHeight else: denominator = destinationBlockHeight integrationFactor = sourceBlockOverlapHeight / denominator updatedDestVals[paramName] += sourceBlockVal * integrationFactor paramMapper.paramSetter(destBlock, updatedDestVals.values(), updatedDestVals.keys()) # If requested, the reaction rates will be calculated based on the # mapped neutron flux and the XS library. if calcReactionRates: if paramMapper is None: runLog.warning( f"Reaction rates requested for {destinationAssembly}, but no ParamMapper " "was provided to setAssemblyStateFromOverlaps(). Reaction rates calculated " "will reflect the intended result without new parameter values being mapped in." ) core = sourceAssembly.getAncestor(lambda c: isinstance(c, Core)) if core is not None: UniformMeshGeometryConverter._calculateReactionRates( lib=core.lib, keff=core.p.keff, assem=destinationAssembly ) else: runLog.warning( f"Reaction rates requested for {destinationAssembly}, but no core object " "exists. This calculation will be skipped.", single=True, label="Block reaction rate calculation skipped due to insufficient multi-group flux data.", ) def clearStateOnAssemblies(assems, blockParamNames=None, cache=True): """ Clears the parameter state of blocks for a list of assemblies. Parameters ---------- assems : List[`Assembly <armi.reactor.assemblies.Assembly>`] List of assembly objects. blockParamNames : List[str], optional A list of block parameter names to clear on the given assemblies. cache : bool If True, the block parameters that were cleared are stored and returned as a dictionary of ``{b: {param1: val1, param2: val2}, b2: {...}, ...}`` """ if blockParamNames is None: blockParamNames = [] cachedBlockParamData = collections.defaultdict(dict) if not assems: return cachedBlockParamData blocks = [] for a in assems: blocks.extend(a) firstBlock = blocks[0] for paramName in blockParamNames: defaultValue = firstBlock.p.pDefs[paramName].default for b in blocks: if cache: cachedBlockParamData[b][paramName] = b.p[paramName] b.p[paramName] = defaultValue return cachedBlockParamData def plotConvertedReactor(self): """Generate a radial layout image of the converted reactor core. A pass-through to preserve the API.""" plotting.plotRadialReactorLayouts(self.convReactor) def reset(self): """Clear out stored attributes and reset the global assembly number.""" self._cachedReactorCoreParamData = {} super().reset() def _setParamsToUpdate(self, direction): """ Activate conversion of the specified parameters. Notes ----- The parameters mapped into and out of the uniform mesh will vary depending on the physics kernel using the uniform mesh. The parameters to be mapped in each direction are defined as a class attribute. New options can be created by extending the base class with different class attributes for parameters to map, and applying special modifications to these categorized lists with the `_setParamsToUpdate` method. This base class `_setParamsToUpdate()` method should not be called, so this raises a NotImplementedError. Parameters ---------- direction : str "in" or "out". The direction of mapping; "in" to the uniform mesh assembly, or "out" of it. Different parameters are mapped in each direction. Raises ------ NotImplementedError """ raise NotImplementedError def _checkConversion(self): """Perform checks to ensure conversion occurred properly.""" pass @staticmethod def _createNewAssembly(sourceAssembly): a = sourceAssembly.__class__(sourceAssembly.getType()) a.spatialGrid = grids.AxialGrid.fromNCells(len(sourceAssembly)) a.setName(sourceAssembly.getName()) a.p.flags = sourceAssembly.p.flags return a def _buildAllUniformAssemblies(self): """ Loop through each new block for each mesh point and apply conservation of atoms. We use the submesh and allow blocks to be as small as the smallest submesh to avoid unnecessarily diffusing small blocks into huge ones (e.g. control blocks into plenum). """ runLog.debug( f"Creating new assemblies from {self._sourceReactor.core} with a uniform mesh of {self._uniformMesh}" ) for sourceAssem in self._sourceReactor.core: newAssem = self.makeAssemWithUniformMesh( sourceAssem, self._uniformMesh, paramMapper=self.paramMapper, includePinCoordinates=self.includePinCoordinates, ) src = sourceAssem.spatialLocator newLoc = self.convReactor.core.spatialGrid[src.i, src.j, 0] self.convReactor.core.add(newAssem, newLoc) def _clearStateOnReactor(self, reactor, cache): """ Delete existing state that will be updated so they don't increment. The summations should start at zero but will happen for all overlaps. """ runLog.debug("Clearing params from source reactor that will be converted.") for rp in self.paramMapper.reactorParamNames: if cache: self._cachedReactorCoreParamData[rp] = reactor.core.p[rp] reactor.core.p[rp] = 0.0 def _mapStateFromReactorToOther(self, sourceReactor, destReactor, mapNumberDensities=False, mapBlockParams=True): """ Map parameters from one reactor to another. Notes ----- This is a basic parameter mapping routine that can be used by most sub-classes. If special mapping logic is required, this method can be defined on sub-classes as necessary. """ # Map reactor core parameters for paramName in self.paramMapper.reactorParamNames: # Check if the source reactor has a value assigned for this # parameter and if so, then apply it. Otherwise, revert back to # the original value. paramDefined = isinstance(sourceReactor.core.p[paramName], np.ndarray) or sourceReactor.core.p[paramName] if paramDefined or paramName not in self._cachedReactorCoreParamData: val = sourceReactor.core.p[paramName] else: val = self._cachedReactorCoreParamData[paramName] destReactor.core.p[paramName] = val if mapBlockParams: # Map block parameters for aSource in sourceReactor.core: aDest = destReactor.core.getAssemblyByName(aSource.getName()) UniformMeshGeometryConverter.setAssemblyStateFromOverlaps( aSource, aDest, self.paramMapper, mapNumberDensities, calcReactionRates=False, ) # If requested, the reaction rates will be calculated based on the # mapped neutron flux and the XS library. if self.calcReactionRates: self._calculateReactionRatesEfficient(destReactor.core, sourceReactor.core.p.keff) # Clear the cached data after it has been mapped to prevent issues with # holding on to block data long-term. self._cachedReactorCoreParamData = {} @staticmethod def _calculateReactionRatesEfficient(core, keff): """ First, sort blocks into groups by XS type. Then, we just need to grab micros for each XS type once. Iterate over list of blocks with the given XS type; calculate reaction rates for these blocks """ xsTypeGroups = collections.defaultdict(list) for b in core.iterBlocks(): xsTypeGroups[b.getMicroSuffix()].append(b) for xsID, blockList in xsTypeGroups.items(): nucSet = set() for b in blockList: nucSet.update(nuc for nuc, ndens in b.getNumberDensities().items() if ndens > 0.0) xsNucDict = {nuc: core.lib.getNuclide(nuc, xsID) for nuc in nucSet} UniformMeshGeometryConverter._calcReactionRatesBlockList(blockList, keff, xsNucDict) @staticmethod def _calculateReactionRates(lib, keff, assem): """ Calculates the neutron reaction rates on the given assembly. Notes ----- If a block in the assembly does not contain any multi-group flux than the reaction rate calculation for this block will be skipped. """ from armi.physics.neutronics.globalFlux import globalFluxInterface for b in assem: # Checks if the block has a multi-group flux defined and if it # does not then this will skip the reaction rate calculation. This # is captured by the TypeError, due to a `NoneType` divide by float # error. try: b.getMgFlux() except TypeError: continue globalFluxInterface.calcReactionRates(b, keff, lib) @staticmethod def _calcReactionRatesBlockList(objList, keff, xsNucDict): r""" Compute 1-group reaction rates for the objects in objList (usually a block). :meta public: .. impl:: Return the reaction rates for a given ArmiObject :id: I_ARMI_FLUX_RX_RATES_BY_XS_ID :implements: R_ARMI_FLUX_RX_RATES This is an alternative implementation of :need:`I_ARMI_FLUX_RX_RATES` that is more efficient when computing reaction rates for a large set of blocks that share a common set of microscopic cross sections. For more detail on the reation rate calculations, see :need:`I_ARMI_FLUX_RX_RATES`. Parameters ---------- objList : List[Block] The list of objects to compute reaction rates on. Notionally this could be upgraded to be any kind of ArmiObject but with params defined as they are it currently is only implemented for a block. keff : float The keff of the core. This is required to get the neutron production rate correct via the neutron balance statement (since nuSigF has a 1/keff term). xsNucDict: Dict[str, XSNuclide] Microscopic cross sections to use in computing the reaction rates. Keys are nuclide names (e.g., "U235") and values are the associated XSNuclide objects from the cross section library, which contain the microscopic cross section data for a given nuclide in the current cross section group. """ for obj in objList: rate = collections.defaultdict(float) numberDensities = obj.getNumberDensities() try: mgFlux = np.array(obj.getMgFlux()) except TypeError: continue for nucName, numberDensity in numberDensities.items(): if numberDensity == 0.0: continue nucRate = collections.defaultdict(float) micros = xsNucDict[nucName].micros # absorption is fission + capture (no n2n here) for name in RX_ABS_MICRO_LABELS: volumetricRR = numberDensity * mgFlux.dot(micros[name]) nucRate["rateAbs"] += volumetricRR if name != "fission": nucRate["rateCap"] += volumetricRR else: nucRate["rateFis"] += volumetricRR # scale nu by keff. nusigmaF = micros["fission"] * micros.neutronsPerFission nucRate["rateProdFis"] += numberDensity * mgFlux.dot(nusigmaF) / keff nucRate["rateProdN2n"] += 2.0 * numberDensity * mgFlux.dot(micros.n2n) for rx in RX_PARAM_NAMES: if nucRate[rx]: rate[rx] += nucRate[rx] for paramName in RX_PARAM_NAMES: obj.p[paramName] = rate[paramName] # put in #/cm^3/s if rate["rateFis"] > 0.0: fuelVolFrac = obj.getComponentAreaFrac(Flags.FUEL) obj.p.fisDens = np.nan if fuelVolFrac == 0 else rate["rateFis"] / fuelVolFrac obj.p.fisDensHom = rate["rateFis"] else: obj.p.fisDens = 0.0 obj.p.fisDensHom = 0.0 def updateReactionRates(self): """ Update reaction rates on converted assemblies. Notes ----- In some cases, we may want to read flux into a converted reactor from a pre-existing physics output instead of mapping it in from the pre-conversion source reactor. This method can be called after reading that flux in to calculate updated reaction rates derived from that flux. """ if self._hasNonUniformAssems: for assem in self.convReactor.core.getAssemblies(self._nonUniformMeshFlags): self._calculateReactionRates(self.convReactor.core.lib, self.convReactor.core.p.keff, assem) else: self._calculateReactionRatesEfficient(self.convReactor.core, self.convReactor.core.p.keff) class NeutronicsUniformMeshConverter(UniformMeshGeometryConverter): """ A uniform mesh converter that specifically maps neutronics parameters. Notes ----- This uniform mesh converter is intended for setting up an eigenvalue (fission-source) neutronics solve. There are no block parameters that need to be mapped in for a basic eigenvalue calculation, just number densities. The results of the calculation are mapped out (i.e., back to the non-uniform mesh). The results mapped out include things like flux, power, and reaction rates. .. warning:: If a parameter is calculated by a physics solver while the reactor is in its converted (uniform mesh) state, that parameter *must* be included in the list of `reactorParamNames` or `blockParamNames` to be mapped back to the non-uniform reactor; otherwise, it will be lost. These lists are defined through the `_setParamsToUpdate` method, which uses the `reactorParamMappingCategories` and `blockParamMappingCategories` attributes and applies custom logic to create a list of parameters to be mapped in each direction. """ reactorParamMappingCategories = { "in": [parameters.Category.neutronics], "out": [parameters.Category.neutronics], } blockParamMappingCategories = { "in": [], "out": [ parameters.Category.detailedAxialExpansion, parameters.Category.multiGroupQuantities, parameters.Category.pinQuantities, ], } def __init__(self, cs=None, calcReactionRates=True): """ Parameters ---------- cs : obj, optional Case settings object. calcReactionRates : bool, optional Set to True by default, but if set to False the reaction rate calculation after the neutron flux is remapped will not be calculated. """ UniformMeshGeometryConverter.__init__(self, cs) self.calcReactionRates = calcReactionRates def _setParamsToUpdate(self, direction): """ Activate conversion of the specified parameters. Notes ----- For the fission-source neutronics calculation, there are no block parameters that need to be mapped in. This function applies additional filters to the list of categories defined in `blockParamMappingCategories[out]` to avoid mapping out cumulative parameters like DPA or burnup. These parameters should not exist on the neutronics uniform mesh assembly anyway, but this filtering provides an added layer of safety to prevent data from being inadvertently overwritten. Parameters ---------- direction : str "in" or "out". The direction of mapping; "in" to the uniform mesh assembly, or "out" of it. Different parameters are mapped in each direction. """ reactorParamNames = [] blockParamNames = [] for category in self.reactorParamMappingCategories[direction]: reactorParamNames.extend(self._sourceReactor.core.p.paramDefs.inCategory(category).names) b = self._sourceReactor.core.getFirstBlock() excludedCategories = [parameters.Category.gamma] if direction == "out": excludedCategories.append(parameters.Category.cumulative) excludedCategories.append(parameters.Category.cumulativeOverCycle) excludedParamNames = [] for category in excludedCategories: excludedParamNames.extend(b.p.paramDefs.inCategory(category).names) for category in self.blockParamMappingCategories[direction]: blockParamNames.extend( [name for name in b.p.paramDefs.inCategory(category).names if name not in excludedParamNames] ) if direction == "in": # initial heavy metal masses are needed to calculate burnup in MWd/kg blockParamNames.extend(HEAVY_METAL_PARAMS) # remove any duplicates (from parameters that have multiple categories) blockParamNames = list(set(blockParamNames)) self.paramMapper = ParamMapper(reactorParamNames, blockParamNames, b) class GammaUniformMeshConverter(UniformMeshGeometryConverter): """ A uniform mesh converter that specifically maps gamma parameters. Notes ----- This uniform mesh converter is intended for setting up a fixed-source gamma transport solve. Some block parameters from the neutronics solve, such as `b.p.mgFlux`, may need to be mapped into the uniform mesh reactor so that the gamma source can be calculated by the ARMI plugin performing gamma transport. Parameters that are updated with gamma transport results, such as `powerGenerated`, `powerNeutron`, and `powerGamma`, need to be mapped back to the non-uniform reactor. .. warning:: If a parameter is calculated by a physics solver while the reactor is in its converted (uniform mesh) state, that parameter *must* be included in the list of `reactorParamNames` or `blockParamNames` to be mapped back to the non-uniform reactor; otherwise, it will be lost. These lists are defined through the `_setParamsToUpdate` method, which uses the `reactorParamMappingCategories` and `blockParamMappingCategories` attributes and applies custom logic to create a list of parameters to be mapped in each direction. """ reactorParamMappingCategories = { "in": [parameters.Category.neutronics], "out": [parameters.Category.neutronics], } blockParamMappingCategories = { "in": [ parameters.Category.multiGroupQuantities, ], "out": [ parameters.Category.gamma, parameters.Category.neutronics, ], } def _setParamsToUpdate(self, direction): """ Activate conversion of the specified parameters. Notes ----- For gamma transport, only a small subset of neutronics parameters need to be mapped out. The set is defined in this method. There are conditions on the output blockParamMappingCategories: only non-cumulative, gamma parameters are mapped out. This avoids numerical diffusion of cumulative parameters or those created by the initial eigenvalue neutronics solve from being mapped in both directions by the mesh converter for the fixed-source gamma run. Parameters ---------- direction : str "in" or "out". The direction of mapping; "in" to the uniform mesh assembly, or "out" of it. Different parameters are mapped in each direction. """ reactorParamNames = [] blockParamNames = [] for category in self.reactorParamMappingCategories[direction]: reactorParamNames.extend(self._sourceReactor.core.p.paramDefs.inCategory(category).names) b = self._sourceReactor.core.getFirstBlock() if direction == "out": excludeList = ( b.p.paramDefs.inCategory(parameters.Category.cumulative).names + b.p.paramDefs.inCategory(parameters.Category.cumulativeOverCycle).names ) else: excludeList = b.p.paramDefs.inCategory(parameters.Category.gamma).names for category in self.blockParamMappingCategories[direction]: blockParamNames.extend( [name for name in b.p.paramDefs.inCategory(category).names if name not in excludeList] ) # remove any duplicates (from parameters that have multiple categories) blockParamNames = list(set(blockParamNames)) self.paramMapper = ParamMapper(reactorParamNames, blockParamNames, b) class ParamMapper: """ Utility for parameter setters/getters that can be used when transferring data from one assembly to another during the mesh conversion process. Stores some data like parameter defaults and properties to save effort of accessing paramDefs many times for the same data. """ def __init__(self, reactorParamNames: list[str], blockParamNames: list[str], b: "Block"): """ Initialize the list of parameter defaults. The ParameterDefinitionCollection lookup is very slow, so this we do it once and store it as a hashed list. """ self.paramDefaults = {paramName: b.p.pDefs[paramName].default for paramName in blockParamNames} # Determine which parameters are volume integrated self.isVolIntegrated = { paramName: b.p.paramDefs[paramName].atLocation(parameters.ParamLocation.VOLUME_INTEGRATED) for paramName in blockParamNames } # determine which parameters are peak/max # Unfortunately, these parameters don't tell you WHERE in the block the peak # value occurs. So when mapping block parameters in setAssemblyStateFromOverlaps(), # we will just grab the maximum value over all of the source blocks. This effectively # assumes that all of the source blocks overlap 100% with the destination block, # although this is rarely actually the case. self.isPeak = { paramName: b.p.paramDefs[paramName].atLocation(parameters.ParamLocation.MAX) for paramName in blockParamNames } self.reactorParamNames = reactorParamNames self.blockParamNames = blockParamNames def paramSetter(self, block: "Block", vals: list, paramNames: list[str]): """Sets block parameter data.""" for paramName, val in zip(paramNames, vals): # Skip setting None values. if val is None: continue if isinstance(val, (tuple, list, np.ndarray)): self._arrayParamSetter(block, [val], [paramName]) else: self._scalarParamSetter(block, [val], [paramName]) def paramGetter(self, block: "Block", paramNames: list[str]): """Returns block parameter values as an array in the order of the parameter names given.""" paramVals = [] symmetryFactor = block.getSymmetryFactor() for paramName in paramNames: multiplier = self.getFactorSymmetry(paramName, symmetryFactor) val = block.p[paramName] # list-like should be treated as a numpy array if val is None: paramVals.append(val) elif isinstance(val, (tuple, list, np.ndarray)): paramVals.append(np.array(val) * multiplier if len(val) > 0 else None) else: paramVals.append(val * multiplier) return np.array(paramVals, dtype=object) def _scalarParamSetter(self, block: "Block", vals: list, paramNames: list[str]): """Assigns a set of float/integer/string values to a given set of parameters on a block.""" symmetryFactor = block.getSymmetryFactor() for paramName, val in zip(paramNames, vals): if val is None: block.p[paramName] = val else: block.p[paramName] = val / self.getFactorSymmetry(paramName, symmetryFactor) def _arrayParamSetter(self, block: "Block", arrayVals: list, paramNames: list[str]): """Assigns a set of list/array values to a given set of parameters on a block.""" symmetryFactor = block.getSymmetryFactor() for paramName, vals in zip(paramNames, arrayVals): if vals is None: continue block.p[paramName] = np.array(vals) / self.getFactorSymmetry(paramName, symmetryFactor) def getFactorSymmetry(self, paramName: str, symmetryFactor: int): """Returns the symmetry factor if the parameter is volume integrated, returns 1 otherwise.""" if self.isVolIntegrated[paramName]: return symmetryFactor else: return 1 def setNumberDensitiesFromOverlaps(block, overlappingBlockInfo): r""" Set number densities on a block based on overlapping blocks. A conservation of number of atoms technique is used to map the non-uniform number densities onto the uniform neutronics mesh. When the number density of a height :math:`H` neutronics mesh block :math:`N^{\prime}` is being computed from one or more blocks in the ARMI mesh with number densities :math:`N_i` and heights :math:`h_i`, the following formula is used: .. math:: N^{\prime} = \sum_i N_i \frac{h_i}{H} """ totalDensities = collections.defaultdict(float) block.clearNumberDensities() blockHeightInCm = block.getHeight() for overlappingBlock, overlappingHeightInCm in overlappingBlockInfo: heightScaling = overlappingHeightInCm / blockHeightInCm for nucName, numberDensity in overlappingBlock.getNumberDensities().items(): totalDensities[nucName] += numberDensity * heightScaling block.setNumberDensities(dict(totalDensities)) # Set the volume of each component in the block to `None` so that the # volume of each component is recomputed. for c in block: c.p.volume = None ================================================ FILE: armi/reactor/cores.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Core is a high-level object in the data model in ARMI. A Core frequently contain assemblies which in turn contain more refinement in representing the physical reactor. """ import collections import copy import itertools import os import time from typing import Callable, Iterator, Optional import numpy as np from ruamel.yaml import YAML from armi import getPluginManagerOrFail, nuclearDataIO, runLog from armi.nuclearDataIO import xsLibraries from armi.reactor import ( assemblies, blocks, composites, flags, geometry, grids, parameters, reactorParameters, zones, ) from armi.reactor.flags import Flags from armi.reactor.zones import Zone, Zones from armi.settings.fwSettings.globalSettings import ( CONF_CIRCULAR_RING_PITCH, CONF_DETAILED_AXIAL_EXPANSION, CONF_FRESH_FEED_TYPE, CONF_MIN_MESH_SIZE_RATIO, CONF_NON_UNIFORM_ASSEM_FLAGS, CONF_STATIONARY_BLOCK_FLAGS, CONF_TRACK_ASSEMS, CONF_ZONE_DEFINITIONS, CONF_ZONES_FILE, ) from armi.utils import createFormattedStrWithDelimiter, tabulate, units from armi.utils.iterables import Sequence from armi.utils.mathematics import average1DWithinTolerance class Core(composites.Composite): """ Reactor structure made up of assemblies. Could be a Core, spent fuel pool, reactor head, etc. This has the bulk of the data management operations. Attributes ---------- params : dict Core-level parameters are scalar values that have time dependence. Examples are keff, maxPercentBu, etc. assemblies : list List of assembly objects that are currently in the core """ pDefs = reactorParameters.defineCoreParameters() def __init__(self, name): """ Initialize the reactor object. Parameters ---------- name : str Name of the object. Flags will inherit from this. """ composites.Composite.__init__(self, name) self.assembliesByName = {} self.circularRingList = {} self.blocksByName = {} # lookup tables self.numRings = 0 self.spatialGrid = None self.xsIndex = {} self.p.numMoves = 0 self._lib = None # placeholder for ISOTXS object self.locParams = {} # location-based parameters # overridden in case.py to include pre-reactor time. self.timeOfStart = time.time() self.zones = zones.Zones() # initialize with empty Zones object # initialize the list that holds all shuffles self.moves = {} self.scalarVals = {} self._nuclideCategories = {} self.typeList = [] # list of block types to convert name - to -number. # leftover default "settings" that are intended to eventually be elsewhere. self._freshFeedType = "feed fuel" self._trackAssems = False self._circularRingMode = False self._circularRingPitch = 1.0 self._minMeshSizeRatio = 0.15 self._detailedAxialExpansion = False def setOptionsFromCs(self, cs): from armi.physics.fuelCycle.settings import ( CONF_CIRCULAR_RING_MODE, CONF_JUMP_RING_NUM, ) # these are really "user modifiable modeling constants" self.p.jumpRing = cs[CONF_JUMP_RING_NUM] self._freshFeedType = cs[CONF_FRESH_FEED_TYPE] self._trackAssems = cs[CONF_TRACK_ASSEMS] self._circularRingMode = cs[CONF_CIRCULAR_RING_MODE] self._circularRingPitch = cs[CONF_CIRCULAR_RING_PITCH] self._minMeshSizeRatio = cs[CONF_MIN_MESH_SIZE_RATIO] self._detailedAxialExpansion = cs[CONF_DETAILED_AXIAL_EXPANSION] def __getstate__(self): """Applies a settings and parent to the core and components.""" state = composites.Composite.__getstate__(self) return state def __setstate__(self, state): composites.Composite.__setstate__(self, state) self.regenAssemblyLists() def __deepcopy__(self, memo): memo[id(self)] = newC = self.__class__.__new__(self.__class__) newC.__setstate__(copy.deepcopy(self.__getstate__(), memo)) newC.name = self.name + "-copy" return newC def __repr__(self): return "<{}: {} id:{}>".format(self.__class__.__name__, self.name, id(self)) def __iter__(self): """Override the base Composite __iter__ to produce stable sort order.""" return iter(self._children) @property def r(self): from armi.reactor.reactors import Reactor if isinstance(self.parent, Reactor): return self.parent return None @property def symmetry(self) -> geometry.SymmetryType: """Getter for symmetry type. .. impl:: Get core symmetry. :id: I_ARMI_R_SYMM :implements: R_ARMI_R_SYMM This property getter returns the symmetry attribute of the spatialGrid instance attribute. The spatialGrid is an instance of a child of the abstract base class :py:class:`Grid <armi.reactor.grids.grid.Grid>` type. The symmetry attribute is an instance of the :py:class:`SymmetryType <armi.reactor.geometry.SymmetryType>` class, which is a wrapper around the :py:class:`DomainType <armi.reactor.geometry.DomainType>` and :py:class:`BoundaryType <armi.reactor.geometry.BoundaryType>` enumerations used to classify the domain (e.g., 1/3 core, quarter core, full core) and symmetry boundary conditions (e.g., periodic, reflective, none) of a reactor, respectively. Only specific combinations of :py:class:`Grid <armi.reactor.grids.grid.Grid>` type, :py:class:`DomainType <armi.reactor.geometry.DomainType>`, and :py:class:`BoundaryType <armi.reactor.geometry.BoundaryType>` are valid. The validity of a user-specified geometry and symmetry is verified by a settings :py:class:`Inspector <armi.settings.settingsValidation.Inspector`. """ if not self.spatialGrid: raise ValueError("Cannot access symmetry before a spatialGrid is attached.") return self.spatialGrid.symmetry @symmetry.setter def symmetry(self, val: str): """Setter for symmetry type.""" self.spatialGrid.symmetry = str(val) self.clearCache() @property def geomType(self) -> geometry.GeomType: if not self.spatialGrid: raise ValueError("Cannot access geomType before a spatialGrid is attached.") return self.spatialGrid.geomType @property def powerMultiplier(self): """ Symmetry factor for this model. 1 for full core, 3 for 1/3 core, etc. Notes ----- This should not be a state variable because it just reflects the current geometry. It changes automatically if the symmetry changes (e.g. from a geometry conversion). """ return self.symmetry.symmetryFactor() @property def lib(self) -> Optional[xsLibraries.IsotxsLibrary]: """ Return the microscopic cross section library, if one exists. - If there is a library currently associated with the Core, it will be returned - Otherwise, an ``ISOTXS`` file will be searched for in the working directory, opened as ``ISOTXS`` object and returned. If possible, it will find the correct file for the current cycle and timeNode. - Finally, if no ``ISOTXS`` file exists in the working directory, a None value will be returned. """ # determine the current cycle and timeNode cycle = None node = None if self.r is not None: cycle = self.r.p.cycle node = self.r.p.timeNode # if self._lib is None, try to find a local file isotxsFileName = nuclearDataIO.getExpectedISOTXSFileName(cycle, node) if self._lib is None and os.path.exists(isotxsFileName): # try to find the file for this specific cycle/node runLog.info(f"Loading microscopic cross section library `{isotxsFileName}` at {cycle}/{node}") self._lib = nuclearDataIO.isotxs.readBinary(isotxsFileName) elif self._lib is None: # try to find any local file, not labeled by cycle/node isotxsFileName = nuclearDataIO.getExpectedISOTXSFileName() if os.path.exists(isotxsFileName): runLog.info(f"Loading microscopic cross section library `{isotxsFileName}`") self._lib = nuclearDataIO.isotxs.readBinary(isotxsFileName) return self._lib @lib.setter def lib(self, value): """Set the microscopic cross section library.""" runLog.extra(f"Updating cross section library on {self}.\nInitial: {self._lib}\nUpdated: {value}.") self._lib = value def hasLib(self): """Check if the microscopic cross section library is set. Since the property ``lib`` will attempt to auto-load from a given ISOTXS file in the working directory, checking ``r.core.lib is not None`` may result in unexpected behavior. Use this instead. """ return self._lib is not None @property def isFullCore(self): """Return True if reactor is full core, otherwise False.""" # Avoid using `not core.isFullCore` to check if third core geometry # use `core.symmetry.domain == geometry.DomainType.THIRD_CORE return self.symmetry.domain == geometry.DomainType.FULL_CORE @property def refAssem(self): """ Return the "reference" assembly for this Core. The reference assembly is defined as the center-most assembly with a FUEL flag, if any are present, or the center-most of any assembly otherwise. Warning ------- The convenience of this property should be weighed against it's somewhat arbitrary nature for any particular client. The center-most fueled assembly is not particularly representative of the state of the core as a whole. """ key = lambda a: a.spatialLocator.getRingPos() assems = self.getAssemblies(Flags.FUEL, sortKey=key) if not assems: assems = self.getAssemblies(sortKey=key) return assems[0] def sortAssemsByRing(self): """Sorts the reactor assemblies by ring and position.""" sortKey = lambda a: a.spatialLocator.getRingPos() self._children = sorted(self._children, key=sortKey) def summarizeReactorStats(self): """Writes a summary of the reactor to check the mass and volume of all of the blocks.""" totalMass = 0.0 fissileMass = 0.0 heavyMetalMass = 0.0 totalVolume = 0.0 numBlocks = 0 for block in self.iterBlocks(): totalMass += block.getMass() fissileMass += block.getFissileMass() heavyMetalMass += block.getHMMass() totalVolume += block.getVolume() numBlocks += 1 totalMass = totalMass * self.powerMultiplier / 1000.0 fissileMass = fissileMass * self.powerMultiplier / 1000.0 heavyMetalMass = heavyMetalMass * self.powerMultiplier / 1000.0 totalVolume = totalVolume * self.powerMultiplier runLog.extra( "Summary of {}\n".format(self) + tabulate.tabulate( [ ("Number of Blocks", numBlocks), ("Total Volume (cc)", totalVolume), ("Total Mass (kg)", totalMass), ("Fissile Mass (kg)", fissileMass), ("Heavy Metal Mass (kg)", heavyMetalMass), ], tableFmt="armi", ) ) def setPowerFromDensity(self): """Set the power from the powerDensity.""" self.p.power = self.p.powerDensity * self.getHMMass() def setPowerIfNecessary(self): """Set the core power, from the power density. If the power density is set, but the power isn't, calculate the total heavy metal mass of the reactor, and set the total power. Which will then be the real source of truth again. """ if self.p.power == 0 and self.p.powerDensity > 0: self.setPowerFromDensity() def setBlockMassParams(self): """Set the parameters kgHM and kgFis for each block and calculate Pu fraction.""" for b in self.iterBlocks(): b.p.kgHM = b.getHMMass() / units.G_PER_KG b.p.kgFis = b.getFissileMass() / units.G_PER_KG b.p.puFrac = b.getPuMoles() / b.p.molesHmBOL if b.p.molesHmBOL > 0.0 else 0.0 def getScalarEvolution(self, key): return self.scalarVals[key] def locateAllAssemblies(self): """ Store the current location of all assemblies. This is required for shuffle printouts, repeat shuffling, and MCNP shuffling. """ for a in self.getAssemblies(includeAll=True): a.lastLocationLabel = a.getLocation() def removeAssembly(self, a1, discharge=True, addToSFP=False): """ Takes an assembly and puts it out of core. Parameters ---------- a1 : assembly The assembly to remove discharge : bool, optional Discharge the assembly, including adding it to the SFP. Default: True addToSFP : bool, optional Store the discharged assembly in the SFP regardless of the ``trackAssems`` setting. Default: False Notes ----- Please expect this method will delete your assembly (instead of moving it into a Spent Fuel Pool) unless you set ``trackAssems`` to True or ``addToSFP`` is set to True. Originally, this held onto all assemblies in the Spend Fuel Pool. However, they use memory. And it is possible to have the history interface record only the parameters you need. """ from armi.reactor.reactors import Reactor paramDefs = set(parameters.ALL_DEFINITIONS) paramDefs.difference_update(set(parameters.forType(Core))) paramDefs.difference_update(set(parameters.forType(Reactor))) for paramDef in paramDefs: if paramDef.assigned & parameters.SINCE_ANYTHING: paramDef.assigned = parameters.SINCE_ANYTHING if discharge: runLog.debug(f"Removing {a1} from {self}") else: runLog.debug(f"Purging {a1} from {self}") self.childrenByLocator.pop(a1.spatialLocator) a1.p.dischargeTime = self.r.p.time self.remove(a1) if discharge and (self._trackAssems or addToSFP): if self.parent.excore.get("sfp") is not None: self.parent.excore.sfp.add(a1) else: runLog.info("No Spent Fuel Pool is found, can't track assemblies.") else: self._removeListFromAuxiliaries(a1) def removeAssembliesInRing(self, ringNum, cs, overrideCircularRingMode=False): """ Removes all of the assemblies in a given ring. Parameters ---------- ringNum : int The ring to remove cs: Settings A relevant settings object overrideCircularRingMode : bool, optional False ~ default: use circular/square/hex rings, just as the reactor defines them True ~ Turn off circular ring mode, and instead use square or hex. See Also -------- getAssembliesInRing : definition of a ring """ for a in self.getAssembliesInRing(ringNum, overrideCircularRingMode=overrideCircularRingMode): self.removeAssembly(a) self.processLoading(cs) def _removeListFromAuxiliaries(self, assembly): """ Remove an assembly from all auxiliary reference tables and lists. Otherwise it will get added back into assembliesByName, etc. History will fail if it tries to summarize an assembly that has been purged. """ del self.assembliesByName[assembly.getName()] for b in assembly: try: del self.blocksByName[b.getName()] except KeyError: runLog.warning( "Cannot delete block {0}. It is not in the Core.blocksByName structure".format(b), single=True, label="cannot dereference: lost block", ) def normalizeNames(self, startIndex=0): """ Renumber and rename all the Assemblies and Blocks. Parameters ---------- startIndex : int, optional The default is to start counting at zero. But if you are renumbering assemblies across the entire Reactor, you may want to start at a different number. Returns ------- int The new max Assembly number. """ ind = startIndex for a in self: oldName = a.getName() newName = a.makeNameFromAssemNum(ind) if oldName == newName: ind += 1 continue a.p.assemNum = ind a.setName(newName) for b in a: axialIndex = int(b.name.split("-")[-1]) b.name = b.makeName(ind, axialIndex) ind += 1 self.normalizeInternalBookeeping() return ind def normalizeInternalBookeeping(self): """Update some bookkeeping dictionaries of assembly and block names in this Core.""" self.assembliesByName = {} self.blocksByName = {} for assem in self: self.assembliesByName[assem.getName()] = assem for b in assem: self.blocksByName[b.getName()] = b def add(self, a, spatialLocator=None): """ Adds an assembly to the reactor. An object must be added before it is placed in a particular cell in the reactor's spatialGrid. When an object is added to a Reactor it get placed in a generic location at the center of the Reactor unless a spatialLocator is passed in as well. Parameters ---------- a : ArmiObject The object to add to the reactor spatialLocator : SpatialLocator object, optional The location in the reactor to add the new object to. Must be unoccupied. See Also -------- removeAssembly : removes an assembly """ from armi.reactor.reactors import Reactor # Negative assembly IDs are placeholders, and we need to renumber the assembly if a.p.assemNum < 0: a.renumber(self.r.incrementAssemNum()) # resetting .assigned forces database to be rewritten for shuffled core paramDefs = set(parameters.ALL_DEFINITIONS) paramDefs.difference_update(set(parameters.forType(Core))) paramDefs.difference_update(set(parameters.forType(Reactor))) for paramDef in paramDefs: if paramDef.assigned & parameters.SINCE_ANYTHING: paramDef.assigned = parameters.SINCE_ANYTHING # could speed up output by passing format args as an arg and only process if verb good. runLog.debug("Adding {0} to {1}".format(a, self)) composites.Composite.add(self, a) aName = a.getName() spatialLocator = spatialLocator or a.spatialLocator if spatialLocator is not None and spatialLocator in self.childrenByLocator: raise ValueError( "Cannot add {} because location {} is already filled by {}.".format( aName, a.spatialLocator, self.childrenByLocator[a.spatialLocator] ) ) if spatialLocator is not None: # transfer spatialLocator to Core one spatialLocator = self.spatialGrid[tuple(spatialLocator.indices)] if not self.spatialGrid.locatorInDomain(spatialLocator, symmetryOverlap=True): raise LookupError( "Location `{}` outside of the represented domain: `{}`".format( spatialLocator, self.spatialGrid.symmetry.domain ) ) a.moveTo(spatialLocator) self.childrenByLocator[spatialLocator] = a # build a lookup table for history tracking. if aName in self.assembliesByName and self.assembliesByName[aName] != a: # try to keep assem numbering correct runLog.error( "The assembly {1} in the reactor already has the name {0}.\nCannot add {2}. " "Current assemNum is {3}" "".format(aName, self.assembliesByName[aName], a, self.r.p.maxAssemNum) ) raise RuntimeError("Core already contains an assembly with the same name.") self.assembliesByName[aName] = a for b in a: self.blocksByName[b.getName()] = b a.orientBlocks(parentSpatialGrid=self.spatialGrid) if self.geomType == geometry.GeomType.HEX: ring, _loc = self.spatialGrid.getRingPos(a.spatialLocator.getCompleteIndices()) if ring > self.numRings: self.numRings = ring # track the highest assem Num so when we load from a DB the future assemNums remain constant aNum = a.p.assemNum if aNum > self.p.maxAssemNum: self.p.maxAssemNum = aNum if a.lastLocationLabel != a.DATABASE: # time the assembly enters the core in days a.p.chargeTime = self.r.p.time # cycle that the assembly enters the core a.p.chargeCycle = self.r.p.cycle # convert to kg a.p.chargeFis = a.getFissileMass() / 1000.0 a.p.chargeBu = a.getMaxParam("percentBu") def genAssembliesAddedThisCycle(self): """ Yield the assemblies that have been added in the current cycle. This uses the reactor's cycle parameter and the assemblies' chargeCycle parameters. """ for a in self: if a.p.chargeCycle == self.r.p.cycle: yield a def getNumRings(self, indexBased=False): """ Returns the number of rings in this reactor. Based on location, so indexing will start at 1. Circular ring shuffling changes the interpretation of this result. Warning ------- If you loop through range(maxRing) then ring+1 is the one you want! Parameters ---------- indexBased : bool, optional If true, will force location-index interpretation, even if "circular shuffling" is enabled. """ if self.circularRingList and not indexBased: return max(self.circularRingList) else: return self.getNumHexRings() def getNumHexRings(self): """Return the number of hex rings in the core. Based on location so indexing starts at 1.""" maxRing = 0 for a in self: ring, _pos = self.spatialGrid.getRingPos(a.spatialLocator) maxRing = max(maxRing, ring) return maxRing def getNumAssembliesWithAllRingsFilledOut(self, nRings): """ Returns nAssmWithBlanks (see description immediately below). Parameters ---------- nRings : int The number of hex assembly rings in this core, including non-ful) rings. Returns ------- nAssmWithBlanks: int The number of assemblies that WOULD exist in this core if all outer assembly hex rings were "filled out". """ if self.powerMultiplier == 1: return 3 * nRings * (nRings - 1) + 1 else: return nRings * (nRings - 1) + (nRings + 1) // 2 def getNumEnergyGroups(self): """ Return the number of energy groups used in the problem. See Also -------- armi.nuclearDataIO.ISOTXS.read1D : reads the number of energy groups off the ISOTXS library. """ return self.lib.numGroups def countBlocksWithFlags(self, blockTypeSpec, assemTypeSpec=None): """ Return the total number of blocks in an assembly in the reactor that meets the specified type. Parameters ---------- blockTypeSpec : Flags or list of Flags The types of blocks to be counted in a single assembly assemTypeSpec : Flags or list of Flags The types of assemblies that are to be examine for the blockTypes of interest. None is every assembly. Returns ------- maxBlocks : int The maximum number of blocks of the specified types in a single assembly in the core. """ assems = self.getAssemblies(typeSpec=assemTypeSpec) try: return max(sum(b.hasFlags(blockTypeSpec) for b in a) for a in assems) except ValueError: # In case assems is empty return 0 def countFuelAxialBlocks(self): """ Return the maximum number of fuel type blocks in any assembly in the core. See Also -------- getFirstFuelBlockAxialNode """ fuelblocks = (a.getBlocks(Flags.FUEL) for a in self.getAssemblies(includeBolAssems=True)) try: return max(len(fuel) for fuel in fuelblocks) except ValueError: # thrown when iterator is empty return 0 def getFirstFuelBlockAxialNode(self): """ Determine the offset of the fuel from the grid plate in the assembly with the lowest fuel block. This assembly will dictate at what block level the SASSYS reactivity coefficients will start to be generated """ try: return min( i for a in self.getAssemblies(includeBolAssems=True) for (i, b) in enumerate(a) if b.hasFlags(Flags.FUEL) ) except ValueError: # ValueError is thrown if min is called on an empty sequence. return float("inf") def getAssembliesInRing( self, ring, typeSpec=None, exactType=False, exclusions=None, overrideCircularRingMode=False, ) -> list[assemblies.Assembly]: """ Returns the assemblies in a specified ring. Definitions of rings can change with problem parameters. This function acts as a switch between two separate functions that define what a ring is based on a cs setting 'circularRingMode' Parameters ---------- ring : int The ring number typeSpec : str, list a string or list of assembly types of interest exactType : bool flag to match the assembly type exactly exclusions : list of assemblies list of assemblies that are not to be considered overrideCircularRingMode : bool, optional False ~ default: use circular/square/hex rings, just as the reactor defines them True ~ If you know you don't want to use the circular ring mode, and instead want square or hex. Returns ------- aList : list of assemblies A list of assemblies that match the criteria within the ring """ if self._circularRingMode and not overrideCircularRingMode: getter = self.getAssembliesInCircularRing else: getter = self.getAssembliesInSquareOrHexRing return getter(ring=ring, typeSpec=typeSpec, exactType=exactType, exclusions=exclusions) def getMaxAssembliesInHexRing(self, ring, fullCore=False): """ Returns the maximum number of assemblies possible for a given Hexagonal ring. ring - The ring of interest to calculate the maximum number of assemblies. numEdgeAssems - The number of edge assemblies in the reactor model (1/3 core). Notes ----- Assumes that odd rings do not have an edge assembly in third core geometry. """ numAssemsUpToOuterRing = self.getNumAssembliesWithAllRingsFilledOut(ring) numAssemsUpToInnerRing = self.getNumAssembliesWithAllRingsFilledOut(ring - 1) maxAssemsInRing = numAssemsUpToOuterRing - numAssemsUpToInnerRing # See note* if not fullCore: ringMod = ring % 2 if ringMod == 1: maxAssemsInRing -= 1 return maxAssemsInRing def getAssembliesInSquareOrHexRing( self, ring, typeSpec=None, exactType=False, exclusions=None ) -> list[assemblies.Assembly]: """ Returns the assemblies in a specified ring. Definitions of rings can change with problem parameters. Parameters ---------- ring : int The ring number typeSpec : Flags or [Flags], optional a Flags instance or list of Flags with assembly types of interest exactType : bool flag to match the assembly type exactly exclusions : list of assemblies list of assemblies that are not to be considered Returns ------- assems : list of assemblies A list of assemblies that match the criteria within the ring """ assems = Sequence(self) if exclusions: exclusions = set(exclusions) assems.drop(lambda a: a in exclusions) # filter based on geomType if self.geomType == geometry.GeomType.CARTESIAN: # a ring in cartesian is basically a square. assems.select(lambda a: any(xy == ring for xy in abs(a.spatialLocator.indices[:2]))) else: assems.select(lambda a: (a.spatialLocator.getRingPos()[0] == ring)) # filter based on typeSpec if typeSpec: assems.select(lambda a: a.hasFlags(typeSpec, exact=exactType)) return list(assems) def getAssembliesInCircularRing( self, ring, typeSpec=None, exactType=False, exclusions=None ) -> list[assemblies.Assembly]: """ Gets an assemblies within a circular range of the center of the core. This function allows for more circular styled assembly shuffling instead of the current hex approach. Parameters ---------- ring : int The ring number typeSpec : Flags or list of Flags a Flags instance or list of Flags with assembly types of interest exactType : bool flag to match the assembly type exactly exclusions : list of assemblies list of assemblies that are not to be considered Returns ------- assems : list of assemblies A list of assemblies that match the criteria within the ring """ if self.geomType == geometry.GeomType.CARTESIAN: # a ring in cartesian is basically a square. raise RuntimeError("A circular ring in cartesian coordinates has not been defined yet.") # determine if the circularRingList has been generated if not self.circularRingList: self.circularRingList = self.buildCircularRingDictionary(self._circularRingPitch) assems = Sequence(self) # Remove exclusions if exclusions: exclusions = set(exclusions) assems.drop(lambda a: a in exclusions) # get assemblies at locations locSet = self.circularRingList[ring] assems.select(lambda a: a.getLocation() in locSet) if typeSpec: assems.select(lambda a: a.hasFlags(typeSpec, exact=exactType)) return list(assems) def buildCircularRingDictionary(self, ringPitch=1.0): """ Builds a dictionary of all circular rings in the core. This is required information for getAssembliesInCircularRing. The purpose of this function is to allow for more circular core shuffling in the hex design. Parameters ---------- ringPitch : float, optional The relative pitch that should be used to define the spacing between each ring. """ runLog.extra("Building a circular ring dictionary with ring pitch {}".format(ringPitch)) referenceAssembly = self.childrenByLocator[self.spatialGrid[0, 0, 0]] refLocation = referenceAssembly.spatialLocator pitchFactor = ringPitch / self.spatialGrid.pitch circularRingDict = collections.defaultdict(set) for a in self: dist = a.spatialLocator.distanceTo(refLocation) # To reduce numerical sensitivity, round distance to 6 decimal places # before truncating. index = int(round(dist * pitchFactor, 6)) or 1 # 1 is the smallest ring. circularRingDict[index].add(a.getLocation()) return circularRingDict def _getAssembliesByName(self): """ If the assembly name-to-assembly object map is deleted or out of date, then this will regenerate it. """ runLog.extra("Generating assemblies-by-name map.") # NOTE: eliminated unnecessary repeated lookups in self for self.assembliesByName self.assembliesByName = assymap = {} # don't includeAll b/c detailed ones are not ready yet for assem in self.getAssemblies(includeBolAssems=True, includeSFP=True): aName = assem.getName() if aName in assymap and assymap[aName] != assem: # dangerous situation that can occur in restart runs where the global assemNum isn't # updated. !=assem clause added because sometimes an assem is in one of the # includeAll lists that is also in the core and that's ok. runLog.error( "Two (or more) assemblies in the reactor (and associated lists) have the name " "{0},\nincluding {1} and {2}.".format(aName, assem, assymap[aName]) ) raise RuntimeError("Assembly name collision.") assymap[aName] = assem def getAssemblyByName(self, name: str) -> assemblies.Assembly: """ Find the assembly that has this name. .. impl:: Get assembly by name. :id: I_ARMI_R_GET_ASSEM0 :implements: R_ARMI_R_GET_ASSEM This method returns the :py:class:`assembly <armi.reactor.core.assemblies.Assembly>` with a name matching the value provided as an input parameter to this function. The ``name`` of an assembly is based on the ``assemNum`` parameter. Parameters ---------- name : str the assembly name e.g. 'A0001' Returns ------- Assembly See Also -------- getAssembly : more general version of this method """ return self.assembliesByName[name] def getAssemblies( self, typeSpec=None, sortKey=None, includeBolAssems=False, includeSFP=False, includeAll=False, zones=None, exact=False, ) -> list[assemblies.Assembly]: """ Return a list of all the assemblies in the reactor. Assemblies from the Core are sorted based on the location-based Assembly comparison operators. This is done so that two reactors with physically identical properties are more likely to behave similarly when their assemblies may have been added in different orders. (In the future this will likely be replaced by sorting the _children list itself internally, as there is still opportunity for inconsistencies.) Parameters ---------- typeSpec : Flags or iterable of Flags, optional List of assembly types that will be returned sortKey : callable, optional Sort predicate to use when sorting the assemblies. includeBolAssems : bool, optional Include the BOL assemblies as well as the ones that are in the core. Default: False includeSFP : bool, optional Include assemblies in the SFP includeAll : bool, optional Will include ALL assemblies. zones : iterable, optional Only include assemblies that are in this these zones """ if includeAll: includeBolAssems = includeSFP = True assems = [] if includeBolAssems and self.parent is not None and self.parent.blueprints is not None: assems.extend(self.parent.blueprints.assemblies.values()) assems.extend(a for a in sorted(self, key=sortKey)) if includeSFP and self.parent is not None and self.parent.excore.get("sfp") is not None: assems.extend(self.parent.excore.sfp.getChildren()) if typeSpec: assems = [a for a in assems if a.hasFlags(typeSpec, exact=exact)] if zones: zoneLocs = self.zones.getZoneLocations(zones) assems = [a for a in assems if a.getLocation() in zoneLocs] return assems def getNozzleTypes(self): r""" Get a dictionary of all of the assembly ``nozzleType``\ s in the core. Returns ------- nozzles : dict A dictionary of ``{nozzleType: nozzleID}`` pairs, where the nozzleIDs are numbers corresponding to the alphabetical order of the ``nozzleType`` names. Notes ----- Getting the ``nozzleID`` by alphabetical order could cause a problem if a new ``nozzleType`` is added during a run. This problem should not occur with the ``includeBolAssems=True`` argument provided. """ nozzleList = list(set(a.p.nozzleType for a in self.getAssemblies(includeBolAssems=True))) return {nozzleType: i for i, nozzleType in enumerate(sorted(nozzleList))} def getBlockByName(self, name: str) -> blocks.Block: """ Finds a block based on its name. Parameters ---------- name : str Block name e.g. A0001A Returns ------- Block : the block with the name Notes ----- The blocksByName structure must be up to date for this to work properly. """ try: return self.blocksByName[name] except AttributeError: self._genBlocksByName() return self.blocksByName[name] def getBlocksByIndices(self, indices) -> list[blocks.Block]: """Get blocks in assemblies by block indices.""" blocks = [] for i, j, k in indices: assem = self.childrenByLocator[self.spatialGrid[i, j, 0]] blocks.append(assem[k]) return blocks def _genBlocksByName(self): """If self.blocksByName is deleted, then this will regenerate it.""" self.blocksByName = {block.getName(): block for block in self.getBlocks(includeAll=True)} # This will likely fail, but it will help diagnose why property approach wasn't working # correctly def genBlocksByLocName(self): """If self.blocksByLocName is deleted, then this will regenerate it or update it if things change.""" self.blocksByLocName = {block.getLocation(): block for block in self.getBlocks(includeAll=True)} def getBlocks(self, bType=None, **kwargs) -> list[blocks.Block]: """ Returns an iterator over all blocks in the reactor in order. Parameters ---------- bType : list or Flags, optional Restrict results to a specific block type such as Flags.FUEL, Flags.SHIELD, etc. includeBolAssems : bool, optional Include the BOL-Assembly blocks as well. These blocks are created at BOL and used to create new assemblies, etc. If true, the blocks in these assemblies will be returned as well as the ones in the reactor. kwargs : dict Any keyword argument from :meth:`getAssemblies` Returns ------- blocks : iterator all blocks in the reactor (or of type requested) See Also -------- * :meth:`iterBlocks`: iterator over blocks with limited filtering. * :meth:`getAssemblies` : locates the assemblies in the search """ blocks = [b for a in self.getAssemblies(**kwargs) for b in a] if bType: blocks = [b for b in blocks if b.hasFlags(bType)] return blocks def getFirstBlock(self, blockType=None, exact=False) -> blocks.Block: """ Return the first block of the requested type in the reactor, or return first block. exact=True will only match fuel, not testfuel, for example. Parameters ---------- blockType : Flags, optional The type of block to return exact : bool, optional Requires an exact match on blockType Returns ------- b : Block object (or None if no such block exists) """ for a in self: for b in a: if b.hasFlags(blockType, exact): return b return None def getFirstAssembly(self, typeSpec=None, exact=False) -> assemblies.Assembly: """ Gets the first assembly in the reactor. Warning ------- This function should be used with great care. There are **very** few circumstances in which one wants the "first" of a given sort of assembly, `whichever that may happen to be`. Precisely which assembly is returned is sensitive to all sorts of implementation details in Grids, etc., which make the concept of "first" rather slippery. Prefer using some sort of precise logic to pick a specific assembly from the Core. Parameters ---------- typeSpec : Flags or iterable of Flags, optional """ if typeSpec: try: return next(a for a in self if a.hasFlags(typeSpec, exact)) except StopIteration: runLog.warning("No assem of type {0} in reactor".format(typeSpec)) return None # Assumes at least one assembly in `self` return next(iter(self)) def regenAssemblyLists(self): """ If the attribute lists which contain assemblies are deleted (such as by reactors.detachAllAssemblies), then this function will call the other functions to regrow them. """ self._getAssembliesByName() self._genBlocksByName() self._genChildByLocationLookupTable() def getAllXsSuffixes(self): """Return all XS suffices (e.g. AA, AB, etc.) in the core.""" return sorted(set(b.getMicroSuffix() for b in self.iterBlocks())) def getNuclideCategories(self): """ Categorize nuclides as coolant, fuel and structure. Notes ----- This is used to categorize nuclides for Doppler broadening. Control nuclides are treated as structure. The categories are defined in the following way: 1. Add nuclides from coolant components to coolantNuclides 2. Add nuclides from fuel components to fuelNuclides (this may be incomplete, e.g. at BOL there are no fission products) 3. Add nuclides from all other components to structureNuclides 4. Since fuelNuclides may be incomplete, add anything else the user wants to model that isn't already listed in coolantNuclides or structureNuclides. Returns ------- coolantNuclides : set set of nuclide names fuelNuclides : set set of nuclide names structureNuclides : set set of nuclide names """ if not self._nuclideCategories: coolantNuclides = set() fuelNuclides = set() structureNuclides = set() for c in self.iterComponents(): compNuclides = [] # get only nuclides with non-zero number density # nuclides could be present at 0.0 density just for XS generation if c.p.numberDensities is None: continue for nuc, dens in zip(c.p.nuclides, c.p.numberDensities): if dens > 0.0: compNuclides.append(nuc.decode()) if c.getName() == "coolant": coolantNuclides.update(compNuclides) elif "fuel" in c.getName(): fuelNuclides.update(compNuclides) else: structureNuclides.update(compNuclides) structureNuclides -= coolantNuclides structureNuclides -= fuelNuclides remainingNuclides = set(self.parent.blueprints.allNuclidesInProblem) - structureNuclides - coolantNuclides fuelNuclides.update(remainingNuclides) self._nuclideCategories["coolant"] = coolantNuclides self._nuclideCategories["fuel"] = fuelNuclides self._nuclideCategories["structure"] = structureNuclides self.summarizeNuclideCategories() return ( self._nuclideCategories["coolant"], self._nuclideCategories["fuel"], self._nuclideCategories["structure"], ) def summarizeNuclideCategories(self): """Write summary table of the various nuclide categories within the reactor.""" runLog.info( "Nuclide categorization for cross section temperature assignments:\n" + tabulate.tabulate( [ ( "Fuel", createFormattedStrWithDelimiter(self._nuclideCategories["fuel"]), ), ( "Coolant", createFormattedStrWithDelimiter(self._nuclideCategories["coolant"]), ), ( "Structure", createFormattedStrWithDelimiter(self._nuclideCategories["structure"]), ), ], headers=["Nuclide Category", "Nuclides"], tableFmt="armi", ) ) def getLocationContents(self, locs, assemblyLevel=False, locContents=None): """ Given a list of locations, this goes through and finds the blocks or assemblies. Parameters ---------- locs : list of location objects or strings The locations you'd like to find assemblies in assemblyLevel : bool, optional If True, will find assemblies rather than blocks locContents : dict, optional A lookup table with location string keys and block/assembly values useful if you want to call this function many times and would like a speedup. Returns ------- blockList : iterable List of blocks or assemblies that correspond to the locations passed in Notes ----- Useful in reading the db. See Also -------- makeLocationLookup : allows caching to speed this up if you call it a lot. """ # Why isn't locContents an attribute of reactor? It could be another # property that is generated on demand if not locContents: locContents = self.makeLocationLookup(assemblyLevel) try: # now look 'em up return [locContents[str(loc)] for loc in locs] except KeyError as e: raise KeyError("There is nothing in core location {0}.".format(e)) def makeLocationLookup(self, assemblyLevel=False): """ Build a location-keyed lookup table to figure out which block (or assembly, if assemblyLevel=True) is in which location. Used within getLocationContents, but can also be used to pre-build a cache for that function, speeding the lookup with a cache. See Also -------- getLocationContents : can use this lookup table to go faster. """ # build a lookup table one time. if assemblyLevel: return {a.getLocation(): a for a in self} else: return {b.getLocation(): b for a in self for b in a} def getFluxVector(self, energyOrder=0, adjoint=False, extSrc=False, volumeIntegrated=True): """ Return the multigroup real or adjoint flux of the entire reactor as a vector. Order of meshes is based on getBlocks Parameters ---------- energyOrder : int, optional A value of 0 implies that the flux will have all energy groups for the first mesh point, and then all energy groups for the next mesh point, etc. A value of 1 implies that the flux will have values for all mesh points of the first energy group first, followed by all mesh points for the second energy group, etc. adjoint : bool, optional If True, will return adjoint flux instead of real flux. extSrc : bool, optional If True, will return external source instead of real flux. volumeIntegrated : bool, optional If true (default), flux units will be #-cm/s. If false, they will be #-cm^2/s Returns ------- vals : list The values you requested. length is NxG. """ flux = [] groups = range(self.lib.numGroups) # build in order 0 for b in self.iterBlocks(): if adjoint: vals = b.p.adjMgFlux elif extSrc: vals = b.p.extSrc else: vals = b.p.mgFlux if not volumeIntegrated: vol = b.getVolume() vals = [v / vol for v in vals] flux.extend(vals) if energyOrder == 1: # swap order newFlux = [] for g in groups: oneGroup = [flux[i] for i in range(g, len(flux), len(groups))] newFlux.extend(oneGroup) flux = newFlux return np.array(flux) def getAssembly(self, assemNum=None, locationString=None, assemblyName=None, *args, **kwargs): """ Finds an assembly in the core. Parameters ---------- assemNum : int, optional Returns the assembly with this assemNum locationString : str A location string assemblyName : str, optional The assembly name *args : additional optional arguments for self.getAssemblies Returns ------- a : Assembly The assembly that matches, or None if nothing is found See Also -------- getAssemblyByName getAssemblyWithStringLocation getLocationContents : a much more efficient way to look up assemblies in a list of locations """ if assemblyName: return self.getAssemblyByName(assemblyName) for a in self.getAssemblies(*args, **kwargs): if a.getLocation() == locationString: return a if a.getNum() == assemNum: return a return None def getAssemblyWithAssemNum(self, assemNum): """ Retrieve assembly with a particular assembly number from the core. Parameters ---------- assemNum : int The assembly number of interest Returns ------- foundAssembly : Assembly object or None The assembly found, or None """ return self.getAssembly(assemNum=assemNum) def getAssemblyWithStringLocation(self, locationString): """Returns an assembly or none if given a location string like '001-001'. .. impl:: Get assembly by location. :id: I_ARMI_R_GET_ASSEM1 :implements: R_ARMI_R_GET_ASSEM This method returns the :py:class:`assembly <armi.reactor.core.assemblies.Assembly>` located in the requested location. The location is provided to this method as an input parameter in a string with the format "001-001". For a :py:class:`HexGrid <armi.reactor.grids.hexagonal.HexGrid>`, the first number indicates the hexagonal ring and the second number indicates the position within that ring. For a :py:class:`CartesianGrid <armi.reactor.grids.cartesian.CartesianGrid>`, the first number represents the x index and the second number represents the y index. If there is no assembly in the grid at the requested location, this method returns None. """ ring, pos, _ = grids.locatorLabelToIndices(locationString) loc = self.spatialGrid.getLocatorFromRingAndPos(ring, pos) assem = self.childrenByLocator.get(loc) return assem def _checkIfAssemAtRingPosCycle(self, a, ring, pos, cycleNum): """ Interrogate location history param of specified assembly object. Return True if assembly was at specified (ring, pos) at specified cycleNum BOC. """ nCycles = len(a.p.ringPosHist) if nCycles > cycleNum: # requested cycleNum has data populated rp = a.p.ringPosHist[cycleNum] if rp[0] not in a.NOT_IN_CORE: if (int(rp[0]), int(rp[1])) == (ring, pos): return True return False def getAssemblyWithRingPosHist(self, ring, pos, cycleNum): """ Search the Core and SFP for assembly which resided at specified ring and position at specified cycle. This is an alternative to getting an assembly by number or string location. """ # search core for a in self: if self._checkIfAssemAtRingPosCycle(a, ring, pos, cycleNum): return a # search sfp if self.parent.excore.get("sfp") is not None: for a in list(self.r.excore["sfp"]): if self._checkIfAssemAtRingPosCycle(a, ring, pos, cycleNum): return a return None def getAssemblyPitch(self): """ Find the assembly pitch for the whole core. This returns the pitch according to the spatialGrid. To capture any thermal/hydraulic feedback of the core pitch, T/H modules will need to modify the grid pitch directly based on the relevant mechanical assumptions. Returns ------- pitch : float The assembly pitch. """ return self.spatialGrid.pitch def findNeighbors(self, a, showBlanks=True, duplicateAssembliesOnReflectiveBoundary=False): r""" Find assemblies that are next to this assembly. Return a list of neighboring assemblies. For a hexagonal grid, the list begins from the 30 degree point (point 1) then moves counterclockwise around. For a Cartesian grid, the order of the neighbors is east, north, west, south. .. impl:: Retrieve neighboring assemblies of a given assembly. :id: I_ARMI_R_FIND_NEIGHBORS :implements: R_ARMI_R_FIND_NEIGHBORS This method takes an :py:class:`Assembly <armi.reactor.assemblies.Assembly>` as an input parameter and returns a list of the assemblies neighboring that assembly. There are 6 neighbors in a hexagonal grid and 4 neighbors in a Cartesian grid. The (i, j) indices of the neighbors are provided by :py:meth:`getNeighboringCellIndices <armi.reactor.grids.StructuredGrid.getNeighboringCellIndices>`. For a hexagonal grid, the (i, j) indices are converted to (ring, position) indexing using the ``core.spatialGrid`` instance attribute. The ``showBlanks`` option determines whether non-existing assemblies will be indicated with a ``None`` in the list or just excluded from the list altogether. The ``duplicateAssembliesOnReflectiveBoundary`` setting only works for 1/3 core symmetry with periodic boundary conditions. For these types of geometries, if this setting is ``True``\ , neighbor lists for assemblies along a periodic boundary will include the assemblies along the opposite periodic boundary that are effectively neighbors. Parameters ---------- a : Assembly object The assembly to find neighbors of. showBlanks : Boolean, optional If True, the returned array of 6 neighbors will return "None" for neighbors that do not explicitly exist in the 1/3 core model (including many that WOULD exist in a full core model). If False, the returned array will not include the "None" neighbors. If one or more neighbors does not explicitly exist in the 1/3 core model, the returned array will have a length of less than 6. duplicateAssembliesOnReflectiveBoundary : Boolean, optional If True, findNeighbors duplicates neighbor assemblies into their "symmetric identicals" so that even assemblies that border symmetry lines will have 6 neighbors. The only assemblies that will have fewer than 6 neighbors are those that border the outer core boundary (usually vacuum). If False, findNeighbors returns None for assemblies that do not exist in a 1/3 core model (but WOULD exist in a full core model). For example, applying findNeighbors for the central assembly (ring, pos) = (1, 1) in 1/3 core symmetry (with duplicateAssembliesOnReflectiveBoundary = True) would return a list of 6 assemblies, but those 6 would really only be assemblies (2, 1) and (2, 2) repeated 3 times each. Note that the value of duplicateAssembliesOnReflectiveBoundary only really matters if showBlanks == True. This will have no effect if the model is full core since asymmetric models could find many duplicates in the other thirds Notes ----- The duplicateAssembliesOnReflectiveBoundary setting only works for third core symmetry. This uses the 'mcnp' index map (MCNP GEODST hex coordinates) instead of the standard (ring, pos) map. because neighbors have consistent indices this way. We then convert over to (ring, pos) using the lookup table that a reactor has. Returns ------- neighbors : list of assembly objects This is a list of "nearest neighbors" to assembly a. If showBlanks = False, it will return fewer than the maximum number of neighbors if not all neighbors explicitly exist in the core model. For a hexagonal grid, the maximum number of neighbors is 6. For a Cartesian grid, the maximum number is 4. If showBlanks = True and duplicateAssembliesOnReflectiveBoundary = False, it will have a "None" for assemblies that do not exist in the 1/3 model. If showBlanks = True and duplicateAssembliesOnReflectiveBoundary = True, it will return the existing "symmetric identical" assembly of a non-existing assembly. It will only return "None" for an assembly when that assembly is non-existing AND has no existing "symmetric identical". See Also -------- grids.Grid.getSymmetricEquivalents """ neighborIndices = self.spatialGrid.getNeighboringCellIndices(*a.spatialLocator.getCompleteIndices()) dupReflectors = ( self.symmetry.domain == geometry.DomainType.THIRD_CORE and self.symmetry.boundary == geometry.BoundaryType.PERIODIC and duplicateAssembliesOnReflectiveBoundary ) neighbors = [] for iN, jN, kN in neighborIndices: neighborLoc = self.spatialGrid[iN, jN, kN] neighbor = self.childrenByLocator.get(neighborLoc) if neighbor is not None: neighbors.append(neighbor) elif showBlanks: if dupReflectors: symmetricAssem = self._getReflectiveDuplicateAssembly(neighborLoc) neighbors.append(symmetricAssem) else: neighbors.append(None) return neighbors def _getReflectiveDuplicateAssembly(self, neighborLoc): """ Return duplicate assemblies across symmetry line. Notes ----- If an existing symmetric identical has been found, return it. If an existing symmetric identical has NOT been found, return a None (it's empty). """ duplicates = [] otherTwoLocations = self.spatialGrid.getSymmetricEquivalents(neighborLoc) for i, j in otherTwoLocations: neighborLocation2 = self.spatialGrid[i, j, 0] duplicateAssem = self.childrenByLocator.get(neighborLocation2) if duplicateAssem is not None: duplicates.append(duplicateAssem) # should always be 0 or 1 nDuplicates = len(duplicates) if nDuplicates == 1: return duplicates[0] elif nDuplicates > 1: raise ValueError("Too many neighbors found!") return None def setMoveList(self, cycle, oldLoc, newLoc, enrichList, assemblyType, ringPosCycle=None): """Tracks the movements in terms of locations and enrichments.""" from armi.physics.fuelCycle.fuelHandlers import AssemblyMove data = AssemblyMove(oldLoc, newLoc, enrichList, assemblyType, ringPosCycle) if self.moves.get(cycle) is None: self.moves[cycle] = [] if data in self.moves[cycle]: # remove the old version and throw the new one at the end. self.moves[cycle].remove(data) self.moves[cycle].append(data) def createFreshFeed(self, cs=None): """ Creates a new feed assembly. Parameters ---------- cs : Settings Global settings for the case See Also -------- createAssemblyOfType: creates an assembly """ return self.createAssemblyOfType(assemType=self._freshFeedType, cs=cs) def createAssemblyOfType(self, assemType=None, enrichList=None, cs=None): """ Create an assembly of a specific type and apply enrichments if they are specified. Parameters ---------- assemType : str The assembly type to create enrichList : list weight percent enrichments of each block cs : Settings Global settings for the case Returns ------- a : Assembly A new assembly Notes ----- This and similar fuel shuffle-enabling functionality on the Core are responsible for coupling between the Core and Blueprints. Technically, it should not be required to involve Blueprints at all in the construction of a Reactor model. Therefore in some circumstances, this function will not work. Ultimately, this should be purely the domain of blueprints themselves, and may be migrated out of Core in the future. See Also -------- armi.fuelHandler.doRepeatShuffle : uses this to repeat shuffling """ a = self.parent.blueprints.constructAssem(cs, name=assemType) # check to see if a default bol assembly is being used or we are adding more information if enrichList: # got an enrichment list that should be the same height as the fuel blocks if isinstance(enrichList, float): # make endlessly iterable if float was passed in enrichList = itertools.cycle([enrichList]) elif len(a) != len(enrichList): raise RuntimeError("{0} and enrichment list do not have the same number of blocks.".format(a)) for b, enrich in zip(a, enrichList): if enrich == 0.0: # don't change blocks when enrich specified as 0 continue if abs(b.getUraniumMassEnrich() - enrich) > 1e-10: # only adjust block enrichment if it's different. # WARNING: If this is not fresh fuel, this messes up the number of moles of HM at BOL and # therefore breaks the burnup metric. b.adjustUEnrich(enrich) if not self._detailedAxialExpansion: # if detailedAxialExpansion: False, make sure that the assembly being created has the correct core mesh a.setBlockMesh(self.p.referenceBlockAxialMesh[1:], conserveMassFlag="auto") # pass [1:] to skip 0.0 return a def saveAllFlux(self, fName="allFlux.txt"): """Dump all flux to file for debugging purposes.""" groups = range(self.lib.numGroups) with open(fName, "w") as f: for block in self.iterBlocks(): for gi in groups: f.write( "{:10s} {:10d} {:12.5E} {:12.5E} {:12.5E}\n".format( block.getName(), gi, block.p.mgFlux[gi], block.p.adjMgFlux[gi], block.getVolume(), ) ) if len(block.p.mgFlux) > len(groups) or len(block.p.adjMgFlux) > len(groups): raise ValueError( "Too many flux values: {}\n{}\n{}".format(block, block.p.mgFlux, block.p.adjMgFlux) ) def getAssembliesOnSymmetryLine(self, symmetryLineID): """Find assemblies that are on a symmetry line in a symmetric core.""" assembliesOnLine = [] for a in self: if a.isOnWhichSymmetryLine() == symmetryLineID: assembliesOnLine.append(a) # in order of innermost to outermost (for averaging) assembliesOnLine.sort(key=lambda a: a.spatialLocator.getRingPos()) return assembliesOnLine def getCoreRadius(self): """Returns a radius that the core would fit into.""" return self.getNumRings(indexBased=True) * self.getFirstBlock().getPitch() def findAllMeshPoints(self, assems=None, applySubMesh=True): """ Return all mesh positions in core including both endpoints. .. impl:: Construct a mesh based on core blocks. :id: I_ARMI_R_MESH :implements: R_ARMI_R_MESH This method iterates through all of the assemblies provided, or all assemblies in the core if no list of ``assems`` is provided, and constructs a tuple of three lists which contain the unique i, j, and k mesh coordinates, respectively. The ``applySubMesh`` setting controls whether the mesh will include the submesh coordinates. For a standard assembly-based reactor geometry with a hexagonal or Cartesian assembly grid, this method is only used to produce axial (k) mesh points. If multiple assemblies are provided with different axial meshes, the axial mesh list will contain the union of all unique mesh points. Duplicate mesh points are removed. Parameters ---------- assems : list, optional assemblies to consider when determining the mesh points. If not given, all in-core assemblies are used. applySubMesh : bool, optional Apply submeshing parameters to make mesh points smaller than blocks. Default=True. Returns ------- meshVals : tuple ((i-vals), (j-vals,), (k-vals,)) See Also -------- armi.reactor.assemblies.Assembly.getAxialMesh : get block mesh Notes ----- These include all mesh points, not just block boundaries. There may be multiple mesh points per block. If a large block with multiple mesh points is in the same core as arbitrarily-expanded fuel blocks from fuel performance, an imbalanced axial mesh may result. There is a challenge with TRZ blocks because we need the mesh centroid in terms of RZT, not XYZ When determining the submesh, it is important to not use too small of a rounding precision. It was found that when using a precision of units.FLOAT_DIMENSION_DECIMALS, that the division in `step` can produce mesh points that are the same up to the 9th or 10th digit, resulting in a repeated mesh point. This repetition results in problems in downstream methods, such as the uniform mesh converter. """ runLog.debug("Finding all mesh points.") if assems is None: assems = list(self) iMesh, jMesh, kMesh = set(), set(), set() for a in assems: for b in a: # these params should be combined into a new b.p.meshSubdivisions tuple numPoints = (a.p.AziMesh, a.p.RadMesh, b.p.axMesh) if applySubMesh else (1, 1, 1) base = b.spatialLocator.getGlobalCellBase() # make sure this is in mesh coordinates (important to have TRZ, not XYZ in TRZ cases top = b.spatialLocator.getGlobalCellTop() for axis, (collection, subdivisions) in enumerate(zip((iMesh, jMesh, kMesh), numPoints)): axisVal = float(base[axis]) # convert from np.float64 step = float(top[axis] - axisVal) / subdivisions for _subdivision in range(subdivisions): collection.add(round(axisVal, units.FLOAT_DIMENSION_DECIMALS)) axisVal += step # add top too (only needed for last point) collection.add(round(axisVal, units.FLOAT_DIMENSION_DECIMALS)) iMesh, jMesh, kMesh = map(sorted, (iMesh, jMesh, kMesh)) return iMesh, jMesh, kMesh def findAllAxialMeshPoints(self, assems=None, applySubMesh=True): """Return a list of all z-mesh positions in the core including zero and the top.""" _i, _j, k = self.findAllMeshPoints(assems, applySubMesh) return k def updateAxialMesh(self): """ Update axial mesh based on perturbed meshes of the assemblies that are linked to the ref assem. Notes ----- While processLoading finds *all* axial mesh points, this method only updates the values of the known mesh with the current assembly heights. **This does not change the number of mesh points**. If ``detailedAxialExpansion`` is active, the global axial mesh param still only tracks the refAssem. Otherwise, thousands upon thousands of mesh points would get created. See Also -------- processLoading : sets up the primary mesh that this perturbs. """ # most of the time, we want fuel, but they should mostly have the same number of blocks # if this becomes a problem, we might find either the # 1. mode: (len(a) for a in self).mode(), or # 2. max: max(len(a) for a in self) # depending on what makes the most sense refAssem = self.refAssem refMesh = self.findAllAxialMeshPoints([refAssem]) avgHeight = average1DWithinTolerance( np.array( [ [h for b in a for h in [(b.p.ztop - b.p.zbottom) / b.p.axMesh] * b.p.axMesh] for a in self if self.findAllAxialMeshPoints([a]) == refMesh ] ) ) self.p.axialMesh = list(np.append([0.0], avgHeight.cumsum())) def findAxialMeshIndexOf(self, heightCm): """ Return the axial index of the axial node corresponding to this height. If the height lies on the boundary between two nodes, the lower node index is returned. Parameters ---------- heightCm : float The height (cm) from the assembly bottom. Returns ------- zIndex : int The axial index (beginning with 0) of the mesh node containing the given height. """ for zi, currentHeightCm in enumerate(self.p.axialMesh[1:]): if currentHeightCm >= heightCm: return zi raise ValueError( "The value {} cm is not within range of the reactor axial mesh with max {}".format( heightCm, currentHeightCm ) ) def addMoreNodes(self, meshList): """Add additional mesh points in the the meshList so that the ratio of mesh sizes does not vary too fast.""" ratio = self._minMeshSizeRatio for i, innerMeshVal in enumerate(meshList[1:-1], start=1): dP0 = innerMeshVal - meshList[i - 1] dP1 = meshList[i + 1] - innerMeshVal if dP0 / (dP0 + dP1) < ratio: runLog.warning("Mesh gap too small. Adjusting mesh to be more reasonable.") meshList.append(innerMeshVal + dP1 * ratio) meshList.sort() return meshList, False elif dP0 / (dP0 + dP1) > (1.0 - ratio): runLog.warning("Mesh gap too large. Adjusting mesh to be more reasonable.") meshList.append(meshList[i - 1] + dP0 * (1.0 - ratio)) meshList.sort() return meshList, False return meshList, True def findAllAziMeshPoints(self, extraAssems=None, applySubMesh=True): """ Returns a list of all azimuthal (theta)-mesh positions in the core. Parameters ---------- extraAssems : list additional assemblies to consider when determining the mesh points. They may be useful in the MCPNXT models to represent the fuel management dummies. applySubMesh : bool generates submesh points to further discretize the theta reactor mesh """ i, _, _ = self.findAllMeshPoints(extraAssems, applySubMesh) return i def findAllRadMeshPoints(self, extraAssems=None, applySubMesh=True): """ Return a list of all radial-mesh positions in the core. Parameters ---------- extraAssems : list additional assemblies to consider when determining the mesh points. They may be useful in the MCPNXT models to represent the fuel management dummies. applySubMesh : bool (not implemented) generates submesh points to further discretize the radial reactor mesh """ _, j, _ = self.findAllMeshPoints(extraAssems, applySubMesh) return j def getMaxBlockParam(self, *args, **kwargs): """Get max param over blocks.""" if "generationNum" in kwargs: raise ValueError("Cannot getMaxBlockParam over anything but blocks. Prefer `getMaxParam`.") kwargs["generationNum"] = 2 return self.getMaxParam(*args, **kwargs) def getTotalBlockParam(self, *args, **kwargs): """Get total param over blocks.""" if "generationNum" in kwargs: raise ValueError("Cannot getTotalBlockParam over anything but blocks. Prefer `calcTotalParam`.") kwargs["generationNum"] = 2 return self.calcTotalParam(*args, **kwargs) def getMaxNumPins(self): """Find max number of pins of any block in the reactor.""" return max(b.getNumPins() for b in self.iterBlocks()) def getMinimumPercentFluxInFuel(self, target=0.005): """ Starting with the outer ring, this method goes through the entire Reactor to determine what percentage of flux occurs at each ring. Parameters ---------- target : float This is the fraction of the total reactor fuel flux compared to the flux in a specific assembly in a ring Returns ------- targetRing, fraction of flux : tuple targetRing is the ring with the fraction of flux that best meets the target. """ # get the total number of assembly rings numRings = self.getNumRings() # old target assembly fraction fluxFraction = 0 targetRing = numRings allFuelBlocks = self.getBlocks(Flags.FUEL) # loop there all of the rings for ringNumber in range(numRings, 0, -1): # Compare to outer most ring. flatten list into one list of all blocks blocksInRing = list( itertools.chain.from_iterable([a.iterBlocks(Flags.FUEL) for a in self.getAssembliesInRing(ringNumber)]) ) totalPower = self.getTotalBlockParam("flux", objs=allFuelBlocks) ringPower = self.getTotalBlockParam("flux", objs=blocksInRing) # make sure that there is a non zero return if fluxFraction == 0 and ringPower > 0: fluxFraction = ringPower / totalPower targetRing = ringNumber # this will only get the leakage if the target fraction isn't too low if ringPower / totalPower < target and ringPower / totalPower > fluxFraction: fluxFraction = ringPower / totalPower targetRing = ringNumber return targetRing, fluxFraction def getAvgTemp(self, typeSpec, blockList=None, flux2Weight=False): """ Get the volume-average fuel, cladding, coolant temperature in core. Parameters ---------- typeSpec : Flags or list of Flags Component types to consider. If typeSpec is a list, then you get the volume average temperature of all components. For instance, getAvgTemp([Flags.CLAD, Flags.WIRE, Flags.DUCT]) returns the avg. structure temperature. blockList : list, optional Blocks to consider. If None, all blocks in core will be considered flux2Weight : bool, optional If true, will weight temperature against flux**2 Returns ------- avgTemp : float The average temperature in C. """ num = 0.0 denom = 0.0 if not blockList: blockList = self.getBlocks() for b in blockList: if flux2Weight: weight = b.p.flux**2.0 else: weight = 1.0 for c in b.iterComponents(typeSpec): vol = c.getVolume() num += c.temperatureInC * vol * weight denom += vol * weight if denom: return num / denom else: raise RuntimeError("no temperature average for {0}".format(typeSpec)) def growToFullCore(self, cs): """Copies symmetric assemblies to build a full core model out of a 1/3 core model. Returns ------- converter : GeometryConverter Geometry converter used to do the conversion. """ from armi.reactor.converters.geometryConverters import ( ThirdCoreHexToFullCoreChanger, ) converter = ThirdCoreHexToFullCoreChanger(cs) converter.convert(self.r) return converter def setPitchUniform(self, pitchInCm): """Set the pitch in all blocks.""" for b in self.iterBlocks(): b.setPitch(pitchInCm) # have to update the 2-D reactor mesh too. self.spatialGrid.changePitch(pitchInCm) def calcBlockMaxes(self): """ Searches all blocks for maximum values of key params. See Also -------- armi.physics.optimize.OptimizationInterface.interactBOL : handles these maxes in optimization cases """ # restrict to fuel for k in self.p.paramDefs.inCategory("block-max").names: try: maxVal = self.getMaxBlockParam(k.replace("max", ""), Flags.FUEL) if maxVal != 0.0: self.p[k] = maxVal except KeyError: continue # add maxes based on pin-level max if it exists, block level max otherwise. self.p.maxBuF = max( (a.getMaxParam("percentBu") for a in self.getAssemblies(Flags.FEED | Flags.FUEL)), default=0.0, ) self.p.maxBuI = max( ( a.getMaxParam("percentBu") for a in self.getAssemblies( [ Flags.IGNITER | Flags.FUEL, Flags.DRIVER | Flags.FUEL, Flags.STARTER | Flags.FUEL, ] ) ), default=0.0, ) def getFuelBottomHeight(self): """ Obtain the height of the lowest fuel in the core. This is the "axial coordinate shift" between ARMI and SASSYS. While ARMI sets z=0 at the bottom of the lowest block (usually the grid plate), SASSYS sets z=0 at the bottom of the fuel. Returns ------- lowestFuelHeightInCm : float The height (cm) of the lowest fuel in this core model. """ lowestFuelHeightInCm = self[0].getHeight() fuelBottoms = [] for a in self.getAssemblies(Flags.FUEL): fuelHeightInCm = 0.0 for b in a: if b.hasFlags(Flags.FUEL): break else: fuelHeightInCm += b.getHeight() if fuelHeightInCm < lowestFuelHeightInCm: lowestFuelHeightInCm = fuelHeightInCm fuelBottoms.append(fuelHeightInCm) return lowestFuelHeightInCm def processLoading(self, cs, dbLoad: bool = False): """ After nuclide densities are loaded, this goes through and prepares the reactor. Notes ----- This does a few operations : * It process boosters, * sets axial snap lists, * checks the geometry, * sets up location tables (tracks where the initial feeds were (for moderation or something) See Also -------- updateAxialMesh : Perturbs the axial mesh originally set up here. """ self.setOptionsFromCs(cs) runLog.header("=========== Initializing Mesh, Assembly Zones, and Nuclide Categories =========== ") for b in self.iterBlocks(): if b.p.molesHmBOL > 0.0: break else: # Good easter egg, but sometimes a user will want to use the framework do # only decay analyses and heavy metals are not required. runLog.warning( "The system has no heavy metal and therefore is not a nuclear reactor.\n" "Please make sure that this is intended and not a input error." ) if dbLoad: # reactor.blueprints.assemblies need to be populated this normally happens during # blueprint constructAssem. But for DB load, this is not called so it must be here. self.parent.blueprints._prepConstruction(cs) else: # set reactor level meshing params nonUniformAssems = [Flags.fromStringIgnoreErrors(t) for t in cs[CONF_NON_UNIFORM_ASSEM_FLAGS]] # Some assemblies, like control assemblies, have a non-conforming mesh and should not be # included in self.p.referenceBlockAxialMesh and self.p.axialMesh uniformAssems = [a for a in self.getAssemblies() if not any(a.hasFlags(f) for f in nonUniformAssems)] self.p.referenceBlockAxialMesh = self.findAllAxialMeshPoints( assems=uniformAssems, applySubMesh=False, ) self.p.axialMesh = self.findAllAxialMeshPoints( assems=uniformAssems, applySubMesh=True, ) self.getNuclideCategories() # Generate list of flags that are to be stationary during assembly shuffling stationaryBlockFlags = [] for stationaryBlockFlagString in cs[CONF_STATIONARY_BLOCK_FLAGS]: stationaryBlockFlags.append(Flags.fromString(stationaryBlockFlagString)) self.stationaryBlockFlagsList = stationaryBlockFlags self.setBlockMassParams() self.p.maxAssemNum = self.getMaxParam("assemNum") getPluginManagerOrFail().hook.onProcessCoreLoading(core=self, cs=cs, dbLoad=dbLoad) def buildManualZones(self, cs): """ Build the Zones that are defined in the given Settings, in the `zoneDefinitions` or `zonesFile` case setting. Parameters ---------- cs : Settings The standard ARMI settings object Examples -------- Manual zones will be defined in a special string format, e.g.: >>> zoneDefinitions: >>> - "ring-1: 001-001" >>> - "ring-2: 002-001, 002-002" >>> - "ring-3: 003-001, 003-002, 003-003" Notes ----- This function will just define the Zones it sees in the settings, it does not do any validation against a Core object to ensure those manual zones make sense. """ if cs[CONF_ZONE_DEFINITIONS]: runLog.info(f"Building Zones by manual definitions in {CONF_ZONE_DEFINITIONS} setting") stripper = lambda s: s.strip() self.zones = zones.Zones() # parse the special input string for zone definitions for zoneString in cs[CONF_ZONE_DEFINITIONS]: zoneName, zoneLocs = zoneString.split(":") zoneLocs = zoneLocs.split(",") zone = zones.Zone(zoneName.strip()) zone.addLocs(map(stripper, zoneLocs)) self.zones.addZone(zone) elif cs[CONF_ZONES_FILE]: runLog.info(f"Custom zoning strategy applied from {CONF_ZONES_FILE}.") self.zones = Zones() with open(cs[CONF_ZONES_FILE]) as stream: zonesDict = YAML(typ="safe").load(stream) for location, zoneName in zonesDict["customZonesMap"].items(): # if the the zoneName isn't already a Zones key, then add a new Zone if zoneName not in self.zones: self.zones.addZone(Zone(zoneName, [location])) # if the zoneName is already a Zones key, then add the location to the existing Zone else: self.zones[zoneName].addLoc(location) # sort the Zones self.zones.sortZones() else: runLog.warning(f"No zones defined in either {CONF_ZONE_DEFINITIONS} or {CONF_ZONES_FILE} settings") def iterBlocks( self, typeSpec: Optional[flags.TypeSpec] = None, exact=False, predicate: Callable[[blocks.Block], bool] = None, ) -> Iterator[blocks.Block]: """Iterate over the blocks in the core. Useful for operations that just want to find all the blocks in the core with light filtering. Parameters ---------- typeSpec: armi.reactor.flags.TypeSpec, optional Limit the traversal to blocks that have these flags. exact: bool, optional Strictness on the usage of ``typeSpec`` used in :meth:`armi.reactor.composites.hasFlags` predicate: f(block) -> bool, optional Limit the traversal to blocks that pass this predicate. Can be used in addition to ``typeSpec`` to perform more advanced filtering. Returns ------- iterator[Block] Iterator over blocks in the core that meet the conditions provided. Examples -------- >>> for b in r.core.iterBlocks(Flags.FUEL): ... pass See Also -------- The :py:meth:`getBlocks` has more control over what is included in the returned list including looking at the spent fuel pool and assemblies that may not exist now but existed at BOL (via :meth:`getAssemblies`). But if you're just interested in the blocks in the core now, maybe with a flag attached to that block, this is what you should use. Notes ----- Assumes your composite tree is structured ``Core`` -> ``Assembly`` -> ``Block``. If this is not the case, consider using :meth:`iterChildren`. """ if typeSpec is not None: typeChecker = lambda b: b.hasFlags(typeSpec, exact=exact) else: typeChecker = lambda _: True if predicate is not None: blockChecker = lambda b: typeChecker(b) and predicate(b) else: blockChecker = typeChecker return self.iterChildren(generationNum=2, predicate=blockChecker) ================================================ FILE: armi/reactor/excoreStructure.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module provides the simplest base-class tools for representing reactor objects that are outside the reactor core. The idea here is that all ex-core objects will be represented first as a spatial grid, and then arbitrary ArmiObjects can be added to that grid. """ import copy from armi.reactor.composites import Composite class ExcoreStructure(Composite): """This is meant as the simplest baseclass needed to represent an ex-core reactor thing. An ex-core structure is expected to: - be a child of the Reactor, - have a grid associated with it, - contain a hierarchical set of ArmiObjects. """ def __init__(self, name, parent=None): Composite.__init__(self, name) self.parent = parent self.spatialGrid = None def __repr__(self): return "<{}: {} id:{}>".format(self.__class__.__name__, self.name, id(self)) @property def r(self): return self.getAncestor(fn=lambda x: x.__class__.__name__ == "Reactor") def add(self, obj, loc=None): """Add an ArmiObject to a particular grid location, in this structure. Parameters ---------- assem : ArmiObject Any generic ArmiObject to add to the structure. loc : LocationBase, optional The location on this structure's grid. If omitted, will come from the object. """ # if a location is not provided, we demand the object has one if loc is None: loc = obj.spatialLocator if loc.grid is not self.spatialGrid: raise ValueError(f"An Composite cannot be added to {self} using a spatial locator from another grid.") # If an assembly is added and it has a negative ID, that is a placeholder, fix it. if "assemNum" in obj.p and obj.p.assemNum < 0: # update the assembly count in the Reactor newNum = self.r.incrementAssemNum() obj.renumber(newNum) obj.spatialLocator = loc super().add(obj) class ExcoreCollection(dict): """ A collection that allows ex-core structures to be accessed like a dict, or class attributes. Examples -------- Build some sample data:: >>> sfp = ExcoreStructure("sfp") >>> ivs = ExcoreStructure("ivs") Build THIS collection:: >>> excore = ExcoreCollection() Now you can add data to this collection like it were a dictionary, and access freely:: >>> excore["sfp"] = sfp >>> excore["sfp"] <ExcoreStructure: sfp id:2311582653024> >>> excore.sfp <ExcoreStructure: sfp id:2311582653024> Or you can add data as if it were a class attribute, and still have dual access:: >>> excore.ivs = ivs >>> excore.ivs <ExcoreStructure: ivs id:2311590971136> >>> excore["ivs"] <ExcoreStructure: ivs id:2311590971136> """ def __getattr__(self, key): """Override the class attribute getter. First check if the class attribute exists. If not, check if the key is in the dictionary. """ try: # try to get a real class attribute return self.__dict__[key] except KeyError: try: # if it's not a class attribute, maybe it is a dictionary key? return self.__getitem__(key) except Exception: pass # it is neither, just raise the usual error raise def __setattr__(self, key, value): """Override the class attribute setting. If the value has an ExcoreStructure type, assume we want to store this in the dictionary. """ if type(value) is ExcoreStructure: self.__setitem__(key, value) else: self.__dict__[key] = value def __getstate__(self): """Needed to support pickling and unpickling the Reactor.""" return self.__dict__.copy() def __setstate__(self, state): """Needed to support pickling and unpickling the Reactor.""" self.__dict__.update(state) def __deepcopy__(self, memo): """Needed to support pickling and unpickling the Reactor.""" memo[id(self)] = newE = self.__class__.__new__(self.__class__) newE.__setstate__(copy.deepcopy(self.__getstate__(), memo)) return newE def __repr__(self): return "<{}: {} id:{}>".format(self.__class__.__name__, self.name, id(self)) ================================================ FILE: armi/reactor/flags.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Handles *flags* that formally bind certain categories to reactor parts. ``Flags`` are used to formally categorize the various ``ArmiObject`` objects that make up a reactor model. These categories allow parts of the ARMI system to treat different Assemblies, Blocks, Components, etc. differently. By default, the specific Flags that are bound to each object are derived by that object's name when constructed; if the name contains any valid flag names, those Flags will be assigned to the object. However, specific Flags may be specified within blueprints, in which case the name is ignored and only the explicitly-requested Flags are applied (see :ref:`bp-input-file` for more details). Individual Flags tend to be various nouns and adjectives that describe common objects that go into a reactor (e.g. "fuel", "shield", "control", "duct", "plenum", etc.). In addition, there are some generic Flags (e.g., "A", "B", "C", etc.) that aid in disambiguating between objects that need to be targeted separately but would otherwise have the same Flags. Flags are stored as integer bitfields within the parameter system, allowing them to be combined arbitrarily on any ARMI object. Since they are stored in bitfields, each new flag definition requires widening this bitfield; therefore, the number of defined Flags should be kept relatively small, and each flag should provide maximum utility. Within the code, Flags are usually combined into a "type specification (``TypeSpec``)", which is either a single combination of Flags, or a list of Flag combinations. More information about how ``TypeSpec`` is interpreted can be found in :py:meth:`armi.reactor.composites.ArmiObject.hasFlags`. Flags are intended to describe `what something is`, rather than `what something should do`. Historically, Flags have been used to do both, which has led to confusion. The binding of specific behavior to certain Flags should ideally be controlled through settings with reasonable defaults, rather than being hard-coded. Currently, much of the code still uses hard-coded ``TypeSpecs``, and certain Flags are clearly saying `what something should do` (e.g., ``Flags.DEPLETABLE``). .. note:: Flags have a rather storied history. Way back when, code that needed to operate on specific objects would do substring searches against object names to decide if they were relevant. This was very prone to error, and led to all sorts of surprising behavior based on the names used in input files. To improve the situation, Flags were developed to better formalize which strings mattered, and to define canonical names for things. Still almost all flag checks were hard-coded, and aside from up-front error checks, many of the original issues persisted. For instance, developing a comprehensive manual of which Flags lead to which behavior was very difficult. Migrating the `meaning` of Flags into settings will allow us to better document how those Flags/settings affect ARMI's behavior. As mentioned above, plenty of code still hard-codes Flag ``TypeSpecs``, and certain Flags do not follow the `what something is` convention. Future work should improve upon this as possible. Things that Flags are used for include: * **Fuel management**: Different kinds of assemblies (LTAs, fuel, reflectors) have different shuffling operations and must be distinguished. Certain blocks in an assembly are stationary, and shouldn't be moved along with the rest of the assembly when shuffling is performed. Filtering for stationary blocks can also be done using Flags (e.g., ``Flags.GRID_PLATE``). * **Fuel performance**: Knowing what's fuel (``Flags.FUEL``) and what isn't (e.g., ``Flags.PLENUM``) is important to figure out what things to grow and where to move fission gas to. * **Fluid fuel** reactors need to find all the fuel that ever circulates through the reactor so it can be depleted with the average flux. * **Core Mechanical** analyses often need to know if an object is solid, fluid, or void (material subclassing can handle this). * **T/H** needs to find the pin bundle in different kinds of assemblies (*radial shield* block in *radial shield* assemblies, *fuel* in *fuel*, etc.). Also needs to generate 3-layer pin models with pin (fuel/control/shield/slug), then gap (liners/gap/bond), then clad. Examples -------- >>> block.hasFlags(Flags.PRIMARY | Flags.TEST | Flags.FUEL) True >>> block.hasFlags([Flags.PRIMARY, Flags.TEST, Flags.FUEL]) True >>> block.getComponent(Flags.INTERDUCTCOOLANT) <component InterDuctCoolant> >>> block.getComponents(Flags.FUEL) [<component fuel1>, <component fuel2>, ...] """ import re from typing import Optional, Sequence, Union from armi.utils.flags import Flag, FlagType, auto # Type alias used for passing type specifications to many of the composite methods. See # Composite::hasFlags() to understand the semantics for how TypeSpecs are interpreted. # Anything that interprets a TypeSpec should apply the same semantics. TypeSpec = Optional[Union[FlagType, Sequence[FlagType]]] def __fromStringGeneral(cls, typeSpec, updateMethod): """Helper method to minimize code repeat in other fromString methods.""" result = cls(0) typeSpec = typeSpec.upper() for conversion in _CONVERSIONS: m = conversion.search(typeSpec) if m: typeSpec = re.sub(conversion, "", typeSpec) result |= _CONVERSIONS[conversion] for name in typeSpec.split(): try: # first, check for an exact match, to cover flags with digits result |= cls[name] except KeyError: # ignore numbers so we don't have to define flags up to the number of pins/assem typeSpecWithoutNumbers = "".join([c for c in name if not c.isdigit()]) if not typeSpecWithoutNumbers: continue result |= updateMethod(typeSpecWithoutNumbers) return result def _fromStringIgnoreErrors(cls, typeSpec): """ Convert string into a set of flags. Each word can be its own flag. Notes ----- This ignores words in the typeSpec that are not valid flags. Complications arise when: a. multiple-word flags are used such as *grid plate* or *inlet nozzle* so we use lookups. b. Some flags have digits in them. We just strip those off. """ def updateMethodIgnoreErrors(typeSpec): try: return cls[typeSpec] except KeyError: return cls(0) return __fromStringGeneral(cls, typeSpec, updateMethodIgnoreErrors) def _fromString(cls, typeSpec): """Make flag from string and fail if any unknown words are encountered.""" def updateMethod(typeSpec): try: return cls[typeSpec] except KeyError: raise InvalidFlagsError( f"The requested type specification `{typeSpec}` is invalid. See armi.reactor.flags documentation." ) return __fromStringGeneral(cls, typeSpec, updateMethod) def _toString(cls, typeSpec): """ Make flag from string and fail if any unknown words are encountered. Notes ----- This converts a flag from ``Flags.A|B`` to ``'A B'`` """ strings = str(typeSpec).split("{}.".format(cls.__name__))[1] return " ".join(sorted(strings.split("|"))) class Flags(Flag): """Defines the valid flags used in the framework.""" # basic classifiers PRIMARY = auto() SECONDARY = auto() TERTIARY = auto() ANNULAR = auto() # ideally this info would be inferred from shape A = auto() B = auto() C = auto() D = auto() E = auto() HIGH = auto() MEDIUM = auto() LOW = auto() # general kinds of assemblies or blocks MATERIAL = auto() FUEL = auto() TEST = auto() CONTROL = auto() ULTIMATE = auto() SHUTDOWN = auto() SHIELD = auto() SHIELD_BLOCK = auto() SLUG = auto() REFLECTOR = auto() # different kinds of fuel DRIVER = auto() IGNITER = auto() FEED = auto() STARTER = auto() BLANKET = auto() BOOSTER = auto() TARGET = auto() MOX = auto() # radial positions INNER = auto() MIDDLE = auto() OUTER = auto() RADIAL = auto() # axial positions AXIAL = auto() UPPER = auto() LOWER = auto() # assembly parts (including kinds of pins) DUCT = auto() GRID_PLATE = auto() HANDLING_SOCKET = auto() INLET_NOZZLE = auto() PLENUM = auto() BOND = auto() # not empty LINER = auto() # Use PRIMARY or SECONDARY to get multiple liners CLAD = auto() PIN = auto() # the "meat" inside the clad GAP = auto() # generally empty WIRE = auto() COOLANT = auto() INTERCOOLANT = auto() LOAD_PAD = auto() ACLP = auto() # above core load pad SKID = auto() VOID = auto() INTERDUCTCOOLANT = auto() DSPACERINSIDE = auto() GUIDE_TUBE = auto() FISSION_CHAMBER = auto() MODERATOR = auto() COLLAR = auto() # more parts CORE_BARREL = auto() DUMMY = auto() BATCHMASSADDITION = auto() POISON = auto() STRUCTURE = auto() DEPLETABLE = auto() # Allows movement of lower plenum with control rod MOVEABLE = auto() @classmethod def fromStringIgnoreErrors(cls, typeSpec): return _fromStringIgnoreErrors(cls, typeSpec) @classmethod def fromString(cls, typeSpec): """ Retrieve flag from a string. .. impl:: Retrieve flag from a string. :id: I_ARMI_FLAG_TO_STR0 :implements: R_ARMI_FLAG_TO_STR For a string passed as ``typeSpec``, first converts the whole string to uppercase. Then tries to parse the string for any special phrases, as defined in the module dictionary ``_CONVERSIONS``, and converts those phrases to flags directly. Then it splits the remaining string into words based on spaces. Looping over each of the words, if any word exactly matches a flag name. Otherwise, any numbers are stripped out and the remaining string is matched up to any class attribute names. If any matches are found these are returned as flags. """ return _fromString(cls, typeSpec) @classmethod def toString(cls, typeSpec): """ Convert a flag to a string. .. impl:: Convert a flag to string. :id: I_ARMI_FLAG_TO_STR1 :implements: R_ARMI_FLAG_TO_STR This converts the representation of a bunch of flags from ``typeSpec``, which might look like ``Flags.A|B``, into a string with spaces in between the flag names, which would look like ``'A B'``. This is done via nesting string splitting and replacement actions. """ return _toString(cls, typeSpec) class InvalidFlagsError(KeyError): """Raised when code attempts to look for an undefined flag.""" pass # string conversions for multiple-word flags # Beware of how these may interact with the standard flag names! E.g., make sure NOZZLE # doesn't eat the NOZZLE in INLET_NOZZLE. Make sure that words that would otherwise be a # substring of a valid flag are wrapped in word-boundary `\b`s _CONVERSIONS = { re.compile(r"\bGRID\s+PLATE\b"): Flags.GRID_PLATE, re.compile(r"\bGRID\b"): Flags.GRID_PLATE, re.compile(r"\bINLET\s+NOZZLE\b"): Flags.INLET_NOZZLE, re.compile(r"\bNOZZLE\b"): Flags.INLET_NOZZLE, re.compile(r"\bLOAD\s+PAD\b"): Flags.LOAD_PAD, re.compile(r"\bHANDLING\s+SOCKET\b"): Flags.HANDLING_SOCKET, re.compile(r"\bGUIDE\s+TUBE\b"): Flags.GUIDE_TUBE, re.compile(r"\bFISSION\s+CHAMBER\b"): Flags.FISSION_CHAMBER, re.compile(r"\bSOCKET\b"): Flags.HANDLING_SOCKET, re.compile(r"\bSHIELD\s+BLOCK\b"): Flags.SHIELD_BLOCK, re.compile(r"\bSHIELDBLOCK\b"): Flags.SHIELD_BLOCK, re.compile(r"\bCORE\s+BARREL\b"): Flags.CORE_BARREL, re.compile(r"\bINNERDUCT\b"): Flags.INNER | Flags.DUCT, re.compile(r"\bGAP1\b"): Flags.GAP | Flags.A, re.compile(r"\bGAP2\b"): Flags.GAP | Flags.B, re.compile(r"\bGAP3\b"): Flags.GAP | Flags.C, re.compile(r"\bGAP4\b"): Flags.GAP | Flags.D, re.compile(r"\bGAP5\b"): Flags.GAP | Flags.E, re.compile(r"\bLINER1\b"): Flags.LINER | Flags.A, re.compile(r"\bLINER2\b"): Flags.LINER | Flags.B, } ================================================ FILE: armi/reactor/geometry.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains constants and enumerations that are useful for describing system geometry. """ import enum from typing import Optional, Union class GeomType(enum.Enum): """ Enumeration of geometry types. Historically, ARMI has used strings to specify and express things like geometry type and symmetry conditions. This makes interpretation of user input straightforward, but is less ergonomic, less efficient, and more error-prone within the code. For instance: * is "quarter reflective" the same as "reflective quarter"? Should it be? * code that needs to interpret these need to use string operations, which are non-trivial compared to enum comparisons. * rules about mutual exclusion (hex and Cartesian can't both be used in the same context) and composability (geometry type + domain + symmetry type) are harder to enforce. Instead, we hope to parse user input into a collection of enumerations and use those internally throughout the code. Future work should expand this to satisfy all needs of the geometry system and refactor to replace use of the string constants. """ HEX = 1 CARTESIAN = 2 RZT = 3 RZ = 4 @classmethod def fromAny(cls, source: Union[str, "GeomType"]) -> "GeomType": """ Safely convert from string representation, no-op if already an enum instance. This is useful as we transition to using enumerations more throughout the code. There will remain situations where a geomType may be provided in string or enum form, in which the consuming code would have to check the type before proceeding. This function serves two useful purposes: * Relieve client code from having to if/elif/else on ``isinstance()`` checks * Provide a location to instrument these conversions for when we actually try to deprecate the strings. E.g., produce a warning when this is called, or eventually forbidding the conversion entirely. """ if isinstance(source, GeomType): return source elif isinstance(source, str): return cls.fromStr(source) else: raise TypeError("Expected str or GeomType; got {}".format(type(source))) @classmethod def fromStr(cls, geomStr: str) -> "GeomType": # case-insensitive canonical = geomStr.lower().strip() if canonical in (HEX, HEX_CORNERS_UP): # corners-up is used to rotate grids, but shouldn't be needed after the grid # is appropriately oriented, so we collapse to HEX in the enumeration. If # there is a good reason to make corners-up HEX its own geom type, we will # need to figure out how to design around that. return cls.HEX elif canonical == CARTESIAN: return cls.CARTESIAN elif canonical == RZT: return cls.RZT elif canonical == RZ: return cls.RZ # use the original geomStr with preserved capitalization for better # error-finding. errorMsg = "Unrecognized geometry type {}. Valid geometry options are: ".format(geomStr) errorMsg += ", ".join([f"{geom}" for geom in geomTypes]) raise ValueError(errorMsg) @property def label(self): """Human-presentable label.""" if self == self.HEX: return "Hexagonal" elif self == self.CARTESIAN: return "Cartesian" elif self == self.RZT: return "R-Z-Theta" else: return "R-Z" def __str__(self): """Inverse of fromStr().""" if self == self.HEX: return HEX elif self == self.CARTESIAN: return CARTESIAN elif self == self.RZT: return RZT else: return RZ class DomainType(enum.Enum): """Enumeration of shape types.""" NULL = 0 FULL_CORE = 1 THIRD_CORE = 3 QUARTER_CORE = 4 EIGHTH_CORE = 8 SIXTEENTH_CORE = 16 @classmethod def fromAny(cls, source: Union[str, "DomainType"]) -> "DomainType": if isinstance(source, DomainType): return source elif isinstance(source, str): return cls.fromStr(source) else: raise TypeError("Expected str or DomainType; got {}".format(type(source))) @classmethod def fromStr(cls, shapeStr: str) -> "DomainType": # case-insensitive canonical = shapeStr.lower().strip() if canonical == FULL_CORE: return cls.FULL_CORE elif canonical == THIRD_CORE: return cls.THIRD_CORE elif canonical == QUARTER_CORE: return cls.QUARTER_CORE elif canonical == EIGHTH_CORE: return cls.EIGHTH_CORE elif canonical == SIXTEENTH_CORE: return cls.SIXTEENTH_CORE elif canonical == "": return cls.NULL errorMsg = "{} is not a valid domain option. Valid domain options are:".format(str(canonical)) errorMsg += ", ".join([f"{sym}" for sym in domainTypes]) raise ValueError(errorMsg) @property def label(self): """Human-presentable label.""" if self == self.FULL_CORE: return "Full" elif self == self.THIRD_CORE: return "Third" elif self == self.QUARTER_CORE: return "Quarter" elif self == self.EIGHTH_CORE: return "Eighth" elif self == self.SIXTEENTH_CORE: return "Sixteenth" else: # is NULL return "" def __str__(self): """Inverse of fromStr().""" if self == self.FULL_CORE: return FULL_CORE elif self == self.THIRD_CORE: return THIRD_CORE elif self == self.QUARTER_CORE: return QUARTER_CORE elif self == self.EIGHTH_CORE: return EIGHTH_CORE elif self == self.SIXTEENTH_CORE: return SIXTEENTH_CORE else: # is NULL return "" def symmetryFactor(self) -> float: if self in (self.FULL_CORE, self == self.NULL): return 1.0 elif self == self.THIRD_CORE: return 3.0 elif self == self.QUARTER_CORE: return 4.0 elif self == self.EIGHTH_CORE: return 8.0 elif self == self.SIXTEENTH_CORE: return 16.0 else: raise ValueError("Could not calculate symmetry factor for domain size {}. update logic.".format(self.label)) class BoundaryType(enum.Enum): """Enumeration of boundary types.""" NO_SYMMETRY = 0 PERIODIC = 1 REFLECTIVE = 2 @classmethod def fromAny(cls, source: Union[str, "BoundaryType"]) -> "BoundaryType": if isinstance(source, BoundaryType): return source elif isinstance(source, str): return cls.fromStr(source) else: raise TypeError("Expected str or BoundaryType; got {}".format(type(source))) @classmethod def fromStr(cls, symmetryStr: str) -> "BoundaryType": # case-insensitive canonical = symmetryStr.lower().strip() if canonical == NO_SYMMETRY: return cls.NO_SYMMETRY elif canonical == PERIODIC: return cls.PERIODIC elif canonical == REFLECTIVE: return cls.REFLECTIVE errorMsg = "{} is not a valid boundary option. Valid boundary options are:".format(str(canonical)) errorMsg += ", ".join([f"{sym}" for sym in boundaryTypes]) raise ValueError(errorMsg) @property def label(self): """Human-presentable label.""" if self == self.NO_SYMMETRY: return "No Symmetry" elif self == self.REFLECTIVE: return "Reflective" else: return "Periodic" def __str__(self): """Inverse of fromStr().""" if self == self.NO_SYMMETRY: return "" elif self == self.PERIODIC: return PERIODIC else: return REFLECTIVE def hasSymmetry(self): return self != self.NO_SYMMETRY class SymmetryType: """ A wrapper for DomainType and BoundaryType enumerations. The goal of this class is to provide simple functions for storing these options in enumerations and using them to check symmetry conditions, while also providing a standard string representation of the options that facilitates interfacing with yaml and/or the database nicely. """ VALID_SYMMETRY = { (DomainType.FULL_CORE, BoundaryType.NO_SYMMETRY, False), (DomainType.FULL_CORE, BoundaryType.NO_SYMMETRY, True), (DomainType.THIRD_CORE, BoundaryType.PERIODIC, False), (DomainType.QUARTER_CORE, BoundaryType.PERIODIC, False), (DomainType.QUARTER_CORE, BoundaryType.REFLECTIVE, False), (DomainType.QUARTER_CORE, BoundaryType.PERIODIC, True), (DomainType.QUARTER_CORE, BoundaryType.REFLECTIVE, True), (DomainType.EIGHTH_CORE, BoundaryType.PERIODIC, False), (DomainType.EIGHTH_CORE, BoundaryType.REFLECTIVE, False), (DomainType.EIGHTH_CORE, BoundaryType.PERIODIC, True), (DomainType.EIGHTH_CORE, BoundaryType.REFLECTIVE, True), (DomainType.SIXTEENTH_CORE, BoundaryType.PERIODIC, False), (DomainType.SIXTEENTH_CORE, BoundaryType.REFLECTIVE, False), } @staticmethod def _checkIfThroughCenter(centerString: str) -> bool: return THROUGH_CENTER_ASSEMBLY in centerString def __init__( self, domainType: "DomainType" = DomainType.THIRD_CORE, boundaryType: "BoundaryType" = BoundaryType.PERIODIC, throughCenterAssembly: Optional[bool] = False, ): self.domain = domainType self.boundary = boundaryType self.isThroughCenterAssembly = throughCenterAssembly if not self.checkValidSymmetry(): errorMsg = "{} is not a valid symmetry option. Valid symmetry options are: ".format(str(self)) errorMsg += ", ".join([f"{sym}" for sym in self.createValidSymmetryStrings()]) raise ValueError(errorMsg) @classmethod def createValidSymmetryStrings(cls): """Create a list of valid symmetry strings based on the set of tuples in VALID_SYMMETRY.""" return [cls(domain, boundary, isThroughCenter) for domain, boundary, isThroughCenter in cls.VALID_SYMMETRY] @classmethod def fromStr(cls, symmetryString: str) -> "SymmetryType": """Construct a SymmetryType object from a valid string.""" canonical = symmetryString.lower().strip() # ignore "assembly" since it is unnecessary and overly-verbose and too specific noAssembly = canonical.replace("assembly", "").strip() isThroughCenter = cls._checkIfThroughCenter(canonical) coreString = noAssembly.replace(THROUGH_CENTER_ASSEMBLY, "").strip() trimmedString = coreString.replace("core", "").strip() pieces = trimmedString.split() domain = DomainType.fromStr(pieces[0]) if len(pieces) == 1: # set the BoundaryType to a default for the DomainType if domain == DomainType.FULL_CORE: boundary = BoundaryType.NO_SYMMETRY elif domain == DomainType.THIRD_CORE: boundary = BoundaryType.PERIODIC else: boundary = BoundaryType.REFLECTIVE elif len(pieces) == 2: boundary = BoundaryType.fromStr(pieces[1]) else: errorMsg = "{} [{}] is not a valid symmetry option. Valid symmetry options are:".format( symmetryString, trimmedString ) errorMsg += ", ".join([f"{sym}" for sym in cls.createValidSymmetryStrings()]) raise ValueError(errorMsg) return cls(domain, boundary, isThroughCenter) @classmethod def fromAny(cls, source: Union[str, "SymmetryType"]) -> "SymmetryType": if isinstance(source, SymmetryType): return source elif isinstance(source, str): return cls.fromStr(source) else: raise TypeError("Expected str or SymmetryType; got {}".format(type(source))) def __str__(self): """Combined string of domain and boundary symmetry type.""" strList = [str(self.domain)] if self.boundary.hasSymmetry(): strList.append(str(self.boundary)) if self.isThroughCenterAssembly: strList.append(THROUGH_CENTER_ASSEMBLY) return " ".join(strList) def __eq__(self, other): """Compare two SymmetryType instances. False if other is not a SymmetryType.""" if isinstance(other, SymmetryType): return ( self.domain == other.domain and self.boundary == other.boundary and self.isThroughCenterAssembly == other.isThroughCenterAssembly ) elif isinstance(other, str): otherSym = SymmetryType.fromStr(other) return ( self.domain == otherSym.domain and self.boundary == otherSym.boundary and self.isThroughCenterAssembly == otherSym.isThroughCenterAssembly ) else: raise NotImplementedError def __hash__(self): """Hash a SymmetryType object based on a tuple of its options.""" return hash((self.domain, self.boundary, self.isThroughCenterAssembly)) def checkValidSymmetry(self) -> bool: """Check if the tuple representation of the SymmetryType can be found in VALID_SYMMETRY.""" return ( self.domain, self.boundary, self.isThroughCenterAssembly, ) in self.VALID_SYMMETRY def symmetryFactor(self) -> float: return self.domain.symmetryFactor() def checkValidGeomSymmetryCombo( geomType: Union[str, "GeomType"], symmetryInput: Union[str, "SymmetryType"], ) -> bool: """ Check if the given combination of GeomType and SymmetryType is valid. Return a boolean indicating the outcome of the check. """ symmetry = SymmetryType.fromAny(symmetryInput) if (symmetry.domain, symmetry.boundary) in VALID_GEOM_SYMMETRY[GeomType.fromAny(geomType)]: return True else: raise ValueError( "GeomType: {} and SymmetryType: {} is not a valid combination!".format(str(geomType), str(symmetry)) ) SYSTEMS = "systems" VERSION = "version" HEX = "hex" HEX_CORNERS_UP = "hex_corners_up" RZT = "thetarz" RZ = "rz" CARTESIAN = "cartesian" DODECAGON = "dodecagon" REC_PRISM = "RecPrism" HEX_PRISM = "HexPrism" CONCENTRIC_CYLINDER = "ConcentricCylinder" ANNULUS_SECTOR_PRISM = "AnnulusSectorPrism" VALID_GEOMETRY_TYPE = {HEX, HEX_CORNERS_UP, RZT, RZ, CARTESIAN} VALID_GEOM_SYMMETRY = { GeomType.HEX: [ (DomainType.FULL_CORE, BoundaryType.NO_SYMMETRY), (DomainType.THIRD_CORE, BoundaryType.PERIODIC), ], GeomType.CARTESIAN: [ (DomainType.FULL_CORE, BoundaryType.NO_SYMMETRY), (DomainType.QUARTER_CORE, BoundaryType.PERIODIC), (DomainType.EIGHTH_CORE, BoundaryType.PERIODIC), (DomainType.QUARTER_CORE, BoundaryType.REFLECTIVE), (DomainType.EIGHTH_CORE, BoundaryType.REFLECTIVE), ], GeomType.RZT: [ (DomainType.FULL_CORE, BoundaryType.NO_SYMMETRY), (DomainType.THIRD_CORE, BoundaryType.PERIODIC), (DomainType.QUARTER_CORE, BoundaryType.PERIODIC), (DomainType.EIGHTH_CORE, BoundaryType.PERIODIC), (DomainType.SIXTEENTH_CORE, BoundaryType.PERIODIC), (DomainType.QUARTER_CORE, BoundaryType.REFLECTIVE), (DomainType.EIGHTH_CORE, BoundaryType.REFLECTIVE), (DomainType.SIXTEENTH_CORE, BoundaryType.REFLECTIVE), ], GeomType.RZ: [(DomainType.FULL_CORE, BoundaryType.NO_SYMMETRY)], } FULL_CORE = "full" THIRD_CORE = "third" QUARTER_CORE = "quarter" EIGHTH_CORE = "eighth" SIXTEENTH_CORE = "sixteenth" REFLECTIVE = "reflective" PERIODIC = "periodic" NO_SYMMETRY = "no symmetry" # through center assembly applies only to cartesian THROUGH_CENTER_ASSEMBLY = "through center" geomTypes = {HEX, CARTESIAN, RZT, RZ} domainTypes = {FULL_CORE, THIRD_CORE, QUARTER_CORE, EIGHTH_CORE, SIXTEENTH_CORE} boundaryTypes = {NO_SYMMETRY, PERIODIC, REFLECTIVE} ================================================ FILE: armi/reactor/grids/__init__.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" This contains structured meshes in multiple geometries and spatial locators (i.e. locations). :py:class:`Grids <Grid>` are objects that map indices (i, j, k) to spatial locations (x,y,z) or (t,r,z). They are useful for arranging things in reactors, such as: * Fuel assemblies in a reactor * Plates in a heat exchanger * Pins in a fuel assembly * Blocks in a fuel assembly (1-D) Fast reactors often use a hexagonal grid, while other reactors may be better suited for Cartesian or RZT grids. This module contains representations of all these. ``Grid``\ s can be defined by any arbitrary combination of absolute grid boundaries and unit step directions. Associated with grids are :py:class:`IndexLocations <IndexLocation>`. Each of these maps to a single cell in a grid, or to an arbitrary point in the continuous space represented by a grid. When a `Grid`` is built, it builds a collection of ``IndexLocation``\ s, one for each cell. In the ARMI :py:mod:`armi.reactor` module, each object is assigned a locator either from a grid or in arbitrary, continuous space (using a :py:class:`CoordinateLocation`) on the ``spatialLocator`` attribute. Below is a basic example of how to use a 2-D grid:: >>> grid = CartesianGrid.fromRectangle(1.0, 1.0) # 1 cm square-pitch Cartesian grid >>> location = grid[1,2,0] >>> location.getGlobalCoordinates() array([ 1., 2., 0.]) Grids can be chained together in a parent-child relationship. This is often used in ARMI where a 1-D axial grid (e.g. in an assembly) is being positioned in a core or spent-fuel pool. See example in :py:meth:`armi.reactor.tests.test_grids.TestSpatialLocator.test_recursion`. The "radial" (ring, position) indexing used in DIF3D can be converted to and from the more quasi-Cartesian indexing in a hex mesh easily with the utility methods :py:meth:`HexGrid.getRingPos` and :py:func:`indicesToRingPos`. This module is designed to satisfy the spatial arrangement requirements of :py:mod:`the Reactor package <armi.reactor>`. Throughout the module, the term **global** refers to the top-level coordinate system while the word **local** refers to within the current coordinate system defined by the current grid. """ # ruff: noqa: F401 from typing import Optional, Tuple from armi.reactor.grids.axial import AxialGrid from armi.reactor.grids.cartesian import CartesianGrid from armi.reactor.grids.constants import ( BOUNDARY_0_DEGREES, BOUNDARY_60_DEGREES, BOUNDARY_120_DEGREES, BOUNDARY_CENTER, ) from armi.reactor.grids.grid import Grid from armi.reactor.grids.hexagonal import COS30, SIN30, TRIANGLES_IN_HEXAGON, HexGrid from armi.reactor.grids.locations import ( CoordinateLocation, IndexLocation, LocationBase, MultiIndexLocation, addingIsValid, ) from armi.reactor.grids.structuredGrid import GridParameters, StructuredGrid, _tuplify from armi.reactor.grids.thetarz import TAU, ThetaRZGrid def locatorLabelToIndices(label: str) -> Tuple[int, int, Optional[int]]: """ Convert a locator label to numerical i,j,k indices. If there are only i,j indices, make the last item None """ intVals = tuple(int(idx) for idx in label.split("-")) if len(intVals) == 2: intVals = (intVals[0], intVals[1], None) return intVals ================================================ FILE: armi/reactor/grids/axial.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING, List, NoReturn, Optional import numpy as np from armi.reactor.grids.locations import IJType, LocationBase from armi.reactor.grids.structuredGrid import StructuredGrid if TYPE_CHECKING: from armi.reactor.composites import ArmiObject class AxialGrid(StructuredGrid): """1-D grid in the k-direction (z). .. note:: It is recommended to use :meth:`fromNCells` rather than calling the ``__init_`` constructor directly """ @classmethod def fromNCells(cls, numCells: int, armiObject: Optional["ArmiObject"] = None) -> "AxialGrid": """Produces an unit grid where each bin is 1-cm tall. ``numCells + 1`` mesh boundaries are added, since one block would require a bottom and a top. """ # Need float bounds or else we truncate integers return cls( bounds=(None, None, np.arange(numCells + 1, dtype=np.float64)), armiObject=armiObject, ) @staticmethod def getSymmetricEquivalents(indices: IJType) -> List[IJType]: return [] @staticmethod def locatorInDomain(locator: LocationBase, symmetryOverlap: Optional[bool] = False) -> NoReturn: raise NotImplementedError @staticmethod def getIndicesFromRingAndPos(ring: int, pos: int) -> NoReturn: raise NotImplementedError @staticmethod def getMinimumRings(n: int) -> NoReturn: raise NotImplementedError @staticmethod def getPositionsInRing(ring: int) -> NoReturn: raise NotImplementedError @staticmethod def overlapsWhichSymmetryLine(indices: IJType) -> None: return None @property def pitch(self) -> float: """Grid spacing in the z-direction. Returns ------- float Pitch in cm """ ================================================ FILE: armi/reactor/grids/cartesian.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from typing import NoReturn, Optional, Tuple import numpy as np from armi.reactor import geometry from armi.reactor.grids.locations import IJType from armi.reactor.grids.structuredGrid import StructuredGrid class CartesianGrid(StructuredGrid): """ Grid class representing a conformal Cartesian mesh. It is recommended to call :meth:`fromRectangle` to construct, rather than directly constructing with ``__init__`` Notes ----- In Cartesian, (i, j, k) indices map to (x, y, z) coordinates. In an axial plane (i, j) are as follows:: (-1, 1)(0, 1)(1, 1) (-1, 0)(0, 0)(1, 0) (-1, -1)(0, -1)(1, -1) The concepts of ring and position are a bit tricker in Cartesian grids than in Hex, because unlike in the Hex case, there is no guaranteed center location. For example, when using a CartesianGrid to lay out assemblies in a core, there is only a single central location if the number of assemblies in the core is odd-by-odd; in an even-by-even case, there are four center-most assemblies. Therefore, the number of locations per ring will vary depending on the "through center" nature of ``symmetry``. Furthermore, notice that in the "through center" (odd-by-odd) case, the central index location, (0,0) is typically centered at the origin (0.0, 0.0), whereas with the "not through center" (even-by-even) case, the (0,0) index location is offset, away from the origin. These concepts are illustrated in the example drawings below. .. figure:: ../.static/through-center.png :width: 400px :align: center Grid example where the axes pass through the "center assembly" (odd-by-odd). Note that ring 1 only has one location in it. .. figure:: ../.static/not-through-center.png :width: 400px :align: center Grid example where the axes lie between the "center assemblies" (even-by-even). Note that ring 1 has four locations, and that the center of the (0, 0)-index location is offset from the origin. """ @classmethod def fromRectangle(cls, width, height, numRings=5, symmetry="", isOffset=False, armiObject=None): """ Build a finite step-based 2-D Cartesian grid based on a width and height in cm. Parameters ---------- width : float Width of the unit rectangle height : float Height of the unit rectangle numRings : int Number of rings that the grid should span symmetry : str The symmetry condition (see :py:mod:`armi.reactor.geometry`) isOffset : bool If True, the origin of the Grid's coordinate system will be placed at the bottom-left corner of the center-most cell. Otherwise, the origin will be placed at the center of the center-most cell. armiObject : ArmiObject An object in a Composite model that the Grid should be bound to. """ unitSteps = ((width, 0.0, 0.0), (0.0, height, 0.0), (0, 0, 0)) offset = np.array((width / 2.0, height / 2.0, 0.0)) if isOffset else None return cls( unitSteps=unitSteps, unitStepLimits=((-numRings, numRings), (-numRings, numRings), (0, 1)), offset=offset, armiObject=armiObject, symmetry=symmetry, ) def overlapsWhichSymmetryLine(self, indices: IJType) -> None: """Return lines of symmetry position at a given index can be found. .. warning:: This is not really implemented, but parts of ARMI need it to not fail, so it always returns None. """ return None def getRingPos(self, indices): """ Return ring and position from indices. Ring is the Manhattan distance from (0, 0) to the passed indices. Position counts up around the ring counter-clockwise from the quadrant 1 diagonal, like this:: 7 6 5 4 3 2 1 8 | 24 9 | 23 10 -------|------ 22 11 | 21 12 | 20 13 14 15 16 17 18 19 Grids that split the central locations have 1 location in in inner-most ring, whereas grids without split central locations will have 4. Notes ----- This is needed to support GUI, but should not often be used. i, j (0-based) indices are much more useful. For example: >>> locator = core.spatialGrid[i, j, 0] # 3rd index is 0 for assembly >>> a = core.childrenByLocator[locator] >>> a = core.childrenByLocator[core.spatialGrid[i, j, 0]] # one liner """ i, j = indices[0:2] split = self._isThroughCenter() if not split: i += 0.5 j += 0.5 ring = max(abs(int(i)), abs(int(j))) if not split: ring += 0.5 if j == ring: # region 1 pos = -i + ring elif i == -ring: # region 2 pos = 3 * ring - j elif j == -ring: # region 3 pos = 5 * ring + i else: # region 4 pos = 7 * ring + j return (int(ring) + 1, int(pos) + 1) @staticmethod def getIndicesFromRingAndPos(ring: int, pos: int) -> NoReturn: """Not implemented for Cartesian-see getRingPos notes.""" raise NotImplementedError( "Cartesian should not need need ring/pos, use i, j indices." "See getRingPos doc string notes for more information/example." ) def getMinimumRings(self, n: int) -> int: """Return the minimum number of rings needed to fit ``n`` objects.""" numPositions = 0 ring = 0 for ring in itertools.count(1): ringPositions = self.getPositionsInRing(ring) numPositions += ringPositions if numPositions >= n: break return ring def getPositionsInRing(self, ring: int) -> int: """ Return the number of positions within a ring. Parameters ---------- ring : int Ring in question Notes ----- The number of positions within a ring will change depending on whether the central position in the grid is at origin, or if origin is the point where 4 positions meet (i.e., the ``_isThroughCenter`` method returns True). """ if ring == 1: ringPositions = 1 if self._isThroughCenter() else 4 else: ringPositions = (ring - 1) * 8 if not self._isThroughCenter(): ringPositions += 4 return ringPositions def locatorInDomain(self, locator, symmetryOverlap: Optional[bool] = False): if self.symmetry.domain == geometry.DomainType.QUARTER_CORE: return locator.i >= 0 and locator.j >= 0 else: return True def changePitch(self, xw: float, yw: float): """ Change the pitch of a Cartesian grid. This also scales the offset. """ xwOld = self._unitSteps[0][0] ywOld = self._unitSteps[1][1] self._unitSteps = np.array(((xw, 0.0, 0.0), (0.0, yw, 0.0), (0, 0, 0)))[self._stepDims] newOffsetX = self._offset[0] * xw / xwOld newOffsetY = self._offset[1] * yw / ywOld self._offset = np.array((newOffsetX, newOffsetY, 0.0)) def getSymmetricEquivalents(self, indices): symmetry = self.symmetry # construct the symmetry object once up top isRotational = symmetry.boundary == geometry.BoundaryType.PERIODIC i, j = indices[0:2] if symmetry.domain == geometry.DomainType.FULL_CORE: return [] elif symmetry.domain == geometry.DomainType.QUARTER_CORE: if symmetry.isThroughCenterAssembly: # some locations lie on the symmetric boundary if i == 0 and j == 0: # on the split corner, so the location is its own symmetric # equivalent return [] elif i == 0: if isRotational: return [(j, i), (i, -j), (-j, i)] else: return [(i, -j)] elif j == 0: if isRotational: return [(j, i), (-i, j), (j, -i)] else: return [(-i, j)] else: # Math is a bit easier for the split case, since there is an actual # center location for (0, 0) if isRotational: return [(-j, i), (-i, -j), (j, -i)] else: return [(-i, j), (-i, -j), (i, -j)] else: # most objects have 3 equivalents. the bottom-left corner of Quadrant I # is (0, 0), so to reflect, add one and negate each index in # combination. To rotate, first flip the indices for the Quadrant II and # Quadrant IV if isRotational: # rotational # QII QIII QIV return [(-j - 1, i), (-i - 1, -j - 1), (j, -i - 1)] else: # reflective # QII QIII QIV return [(-i - 1, j), (-i - 1, -j - 1), (i, -j - 1)] elif symmetry.domain == geometry.DomainType.EIGHTH_CORE: raise NotImplementedError("Eighth-core symmetry isn't fully implemented for grids yet!") else: raise NotImplementedError( "Unhandled symmetry condition for {}: {}".format(type(self).__name__, symmetry.domain) ) def _isThroughCenter(self): """Return whether the central cells are split through the middle for symmetry.""" return all(self._offset == [0, 0, 0]) @property def pitch(self) -> Tuple[float, float]: """Grid pitch in the x and y dimension. Returns ------- float x-pitch (cm) float y-pitch (cm) """ pitch = (self._unitSteps[0][0], self._unitSteps[1][1]) if pitch[0] == 0: raise ValueError(f"Grid {self} does not have a defined pitch.") return pitch ================================================ FILE: armi/reactor/grids/constants.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Some constants often used in grid manipulation.""" BOUNDARY_0_DEGREES = 1 BOUNDARY_60_DEGREES = 2 BOUNDARY_120_DEGREES = 3 BOUNDARY_CENTER = 4 ================================================ FILE: armi/reactor/grids/grid.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, Union import numpy as np from armi.reactor import geometry from armi.reactor.grids.locations import IJKType, IJType, IndexLocation, LocationBase if TYPE_CHECKING: from armi.reactor.composites import ArmiObject class Grid(ABC): """Base class that defines the interface for grids. Most work will be done with structured grids, e.g., hexagonal grid, Cartesian grids, but some physics codes accept irregular or unstructured grids. Consider a Cartesian grid but with variable stepping between cells, where ``dx`` may not be constant. So here, we define an interface so things that rely on grids can worry less about how the location data are stored. .. impl:: Grids can nest. :id: I_ARMI_GRID_NEST :implements: R_ARMI_GRID_NEST The reactor will usually have (i,j,k) coordinates to define a simple mesh for locating objects in the reactor. But inside that mesh can be a smaller mesh to define the layout of pins in a reactor, or fuel pellets in a pin, or the layout of some intricate ex-core structure. Every time the :py:class:`armi.reactor.grids.locations.IndexLocation` of an object in the reactor is returned, ARMI will look to see if the grid this object is in has a :py:meth:`parent <armi.reactor.grids.locations.IndexLocation.parentLocation>`, and if so, ARMI will try to sum the :py:meth:`indices <armi.reactor.grids.locations.IndexLocation.indices>` of the two nested grids to give a resultant, more finely-grained grid position. ARMI can only handle grids nested 3 deep. Parameters ---------- geomType : str or armi.reactor.geometry.GeomType Underlying geometric representation symmetry : str or armi.reactor.geometry.SymmetryType Symmetry conditions armiObject : optional, armi.reactor.composites.ArmiObject If given, what is this grid attached to or what does it describe? Something like a :class:`armi.reactor.Core` """ _geomType: str _symmetry: str armiObject: Optional["ArmiObject"] def __init__( self, geomType: Union[str, geometry.GeomType] = "", symmetry: Union[str, geometry.SymmetryType] = "", armiObject: Optional["ArmiObject"] = None, ): # geometric metadata encapsulated here because it's related to the grid. # They do not impact the grid object itself. # Notice that these are stored using their string representations, rather than # the GridType enum. This avoids the danger of deserializing an enum value from # an old version of the code that may have had different numeric values. self.geomType = geomType self.symmetry = symmetry self.armiObject = armiObject self._backup = None @property def geomType(self) -> geometry.GeomType: """Geometric representation.""" return geometry.GeomType.fromStr(self._geomType) @geomType.setter def geomType(self, geomType: Union[str, geometry.GeomType]): if geomType: self._geomType = str(geometry.GeomType.fromAny(geomType)) else: self._geomType = "" @property def symmetry(self) -> str: """Symmetry applied to the grid. .. impl:: Grids shall be able to represent 1/3 and full core symmetries. :id: I_ARMI_GRID_SYMMETRY0 :implements: R_ARMI_GRID_SYMMETRY Every grid contains a :py:class:`armi.reactor.geometry.SymmetryType` or string that defines a grid as full core or a partial core: 1/3, 1/4, 1/8, or 1/16 core. The idea is that the user can define 1/3 or 1/4 of the reactor, so the analysis can be run faster on a smaller reactor. And if a non-full core reactor grid is defined, the boundaries of the grid can be reflective or periodic, to determine what should happen at the boundaries of the reactor core. It is important to note, that not all of these geometries will apply to every reactor or core. If your core is made of hexagonal assemblies, then a 1/3 core grid would make sense, but not if your reactor core was made up of square assemblies. Likewise, a hexagonal core would not make be able to support a 1/4 grid. You want to leave assemblies (and other objects) whole when dividing a grid up fractionally. """ return geometry.SymmetryType.fromStr(self._symmetry) @symmetry.setter def symmetry(self, symmetry: Union[str, geometry.SymmetryType]): if symmetry: self._symmetry = str(geometry.SymmetryType.fromAny(symmetry)) else: self._symmetry = "" def __getstate__(self) -> Dict: """ Pickling removes reference to ``armiObject``. Removing the ``armiObject`` allows us to pickle an assembly without pickling the entire reactor. An ``Assembly.spatialLocator.grid.armiObject`` is the reactor, by removing the link here, we still have spatial orientation, but are not required to pickle the entire reactor to pickle an assembly. This relies on the ``armiObject.__setstate__`` to assign itself. """ state = self.__dict__.copy() state["armiObject"] = None return state def __setstate__(self, state: Dict): """ Pickling removes reference to ``armiObject``. This relies on the ``ArmiObject.__setstate__`` to assign itself. """ self.__dict__.update(state) for _index, locator in self.items(): locator._grid = self @property @abstractmethod def isAxialOnly(self) -> bool: """Indicate to parts of ARMI if this Grid handles only axial cells.""" @abstractmethod def __len__(self) -> int: """Number of items in the grid.""" @abstractmethod def items(self) -> Iterable[Tuple[IJKType, IndexLocation]]: """Return list of ((i, j, k), IndexLocation) tuples.""" @abstractmethod def locatorInDomain(self, locator: LocationBase, symmetryOverlap: Optional[bool] = False) -> bool: """ Return whether the passed locator is in the domain represented by the Grid. For instance, if we have a 1/3rd core hex grid, this would return False for locators that are outside of the first third of the grid. Parameters ---------- locator : LocationBase The location to test symmetryOverlap : bool, optional Whether grid locations along the symmetry line should be considered "in the represented domain". This can be useful when assemblies are split along the domain boundary, with fractions of the assembly on either side. Returns ------- bool If the given locator is within the given grid """ @abstractmethod def getSymmetricEquivalents(self, indices: IJType) -> List[IJType]: """ Return a list of grid indices that contain matching contents based on symmetry. The length of the list will depend on the type of symmetry being used, and potentially the location of the requested indices. E.g., third-core will return the two sets of indices at the matching location in the other two thirds of the grid, unless it is the central location, in which case no indices will be returned. """ @abstractmethod def overlapsWhichSymmetryLine(self, indices: IJType) -> Optional[int]: """Return lines of symmetry position at a given index can be found. Parameters ---------- indices : tuple of [int, int] Indices for the requested object Returns ------- None or int None if not line of symmetry goes through the object at the requested index. Otherwise, some grid constants like ``BOUNDARY_CENTER`` will be returned. """ @abstractmethod def getCoordinates( self, indices: Union[IJKType, List[IJKType]], nativeCoords: bool = False, ) -> np.ndarray: pass @abstractmethod def backUp(self): """Subclasses should modify the internal backup variable.""" @abstractmethod def restoreBackup(self): """Restore state from backup.""" @abstractmethod def getCellBase(self, indices: IJKType) -> np.ndarray: """Return the lower left case of this cell in cm.""" @abstractmethod def getCellTop(self, indices: IJKType) -> np.ndarray: """Get the upper right of this cell in cm.""" @staticmethod def getLabel(indices): """ Get a string label from a 0-based spatial locator. Returns a string representing i, j, and k indices of the locator """ i, j = indices[:2] label = f"{i:03d}-{j:03d}" if len(indices) == 3: label += f"-{indices[2]:03d}" return label @abstractmethod def reduce(self) -> Tuple[Hashable, ...]: """ Return the set of arguments used to create this Grid. This is very much like the argument tuple from ``__reduce__``, but we do not implement ``__reduce__`` for real, because we are generally happy with ``__getstate__`` and ``__setstate__`` for pickling purposes. However, getting these arguments to ``__init__`` is useful for storing Grids to the database, as they are more stable (less likely to change) than the actual internal state of the objects. The return value should be hashable, such that a set of these can be created. The return type should be symmetric such that a similar grid can be created just with the outputs of ``Grid.reduce``, e.g., ``type(grid)(*grid.reduce())`` Notes ----- For consistency, the second to last argument **must** be the geomType """ ================================================ FILE: armi/reactor/grids/hexagonal.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import deque from math import isclose, sqrt from typing import List, Optional, Tuple import numpy as np from armi.reactor import geometry from armi.reactor.grids.constants import ( BOUNDARY_0_DEGREES, BOUNDARY_60_DEGREES, BOUNDARY_120_DEGREES, BOUNDARY_CENTER, ) from armi.reactor.grids.locations import IJKType, IJType, IndexLocation from armi.reactor.grids.structuredGrid import StructuredGrid from armi.utils import hexagon COS30 = sqrt(3) / 2.0 SIN30 = 1.0 / 2.0 # going counter-clockwise from "position 1" (top right) TRIANGLES_IN_HEXAGON = np.array( [ (+COS30, SIN30), (+0, 1.0), (-COS30, SIN30), (-COS30, -SIN30), (+0, -1.0), (+COS30, -SIN30), ] ) class HexGrid(StructuredGrid): r""" Has 6 neighbors in plane. It is recommended to use :meth:`fromPitch` rather than calling the ``__init__`` onstructor directly. .. impl:: Construct a hexagonal lattice. :id: I_ARMI_GRID_HEX :implements: R_ARMI_GRID_HEX This class represents a hexagonal ``StructuredGrid``, that is one where the mesh maps to real, physical coordinates. This hexagonal grid is 2D, and divides the plane up into regular hexagons. That is, each hexagon is symmetric and is precisely flush with six neighboring hexagons. This class only allows for two rotational options: flats up (where two sides of the hexagons are parallel with the X-axis), and points up (where two sides are parallel with the Y-axis). Notes ----- In an axial plane (i, j) are as follows (flats up):: _____ / \ _____/ 0,1 \_____ / \ / \ / -1,1 \_____/ 1,0 \ \ / \ / \_____/ 0,0 \_____/ / \ / \ / -1,0 \_____/ 1,-1 \ \ / \ / \_____/ 0,-1 \_____/ \ / \_____/ In an axial plane (i, j) are as follows (corners up):: / \ / \ / \ / \ | 0,1 | 1,0 | | | | / \ / \ / \ / \ / \ / \ | -1,1 | 0,0 | 1,-1 | | | | | \ / \ / \ / \ / \ / \ / | -1,0 | 0,-1 | | | | \ / \ / \ / \ / Basic hexagon geometry:: - pitch = sqrt(3) * side - long diagonal = 2 * side - Area = (sqrt(3) / 4) * side^2 - perimeter = 6 * side """ @property def cornersUp(self) -> bool: """ Check whether the hexagonal grid is "corners up" or "flats up". See the armi.reactor.grids.HexGrid class documentation for an illustration of the two types of grid indexing. """ return self._unitSteps[0][1] != 0.0 @staticmethod def fromPitch(pitch, numRings=25, armiObject=None, cornersUp=False, symmetry=""): """ Build a finite step-based 2D hex grid from a hex pitch in cm. .. impl:: Hexagonal grids can be points-up or flats-up. :id: I_ARMI_GRID_HEX_TYPE :implements: R_ARMI_GRID_HEX_TYPE When this method creates a ``HexGrid`` object, it can create a hexagonal grid with one of two rotations: flats up (where two sides of the hexagons are parallel with the X-axis), and points up (where two sides are parallel with the Y-axis). While it is possible to imagine the hexagons being rotated at other arbitrary angles, those are not supported here. .. impl:: When creating a hexagonal grid, the user can specify the symmetry. :id: I_ARMI_GRID_SYMMETRY1 :implements: R_ARMI_GRID_SYMMETRY When this method creates a ``HexGrid`` object, it takes as an input the symmetry of the resultant grid. This symmetry can be a string (e.g. "full") or a ``SymmetryType`` object (e.g. ``FULL_CORE``). If the grid is not full-core, the method ``getSymmetricEquivalents()`` will be usable to map any possible grid cell to the ones that are being modeled in the sub-grid. Parameters ---------- pitch : float Hex pitch (flat-to-flat) in cm numRings : int, optional The number of rings in the grid to pre-populate with locatator objects. Even if positions are not pre-populated, locators will be generated there on the fly. armiObject : ArmiObject, optional The object that this grid is anchored to (i.e. the reactor for a grid of assemblies) cornersUp : bool, optional Rotate the hexagons 30 degrees so that the corners point up instead of the flat faces. symmetry : string, optional A string representation of the symmetry options for the grid. Returns ------- HexGrid A functional hexagonal grid object. """ unitSteps = HexGrid._getRawUnitSteps(pitch, cornersUp) hex = HexGrid( unitSteps=unitSteps, unitStepLimits=((-numRings, numRings), (-numRings, numRings), (0, 1)), armiObject=armiObject, symmetry=symmetry, ) return hex @property def pitch(self) -> float: """ Get the hex-pitch of a regular hexagonal array. See Also -------- armi.reactor.grids.HexGrid.fromPitch """ return sqrt(self._unitSteps[0][0] ** 2 + self._unitSteps[1][0] ** 2) @staticmethod def indicesToRingPos(i: int, j: int) -> Tuple[int, int]: """ Convert spatialLocator indices to ring/position. One benefit it has is that it never has negative numbers. Notes ----- Ring, pos index system goes in counterclockwise hex rings. """ if i > 0 and j >= 0: edge = 0 ring = i + j + 1 offset = j elif i <= 0 and j > -i: edge = 1 ring = j + 1 offset = -i elif i < 0 and j > 0: edge = 2 ring = -i + 1 offset = -j - i elif i < 0: edge = 3 ring = -i - j + 1 offset = -j elif i >= 0 and j < -i: edge = 4 ring = -j + 1 offset = i else: edge = 5 ring = i + 1 offset = i + j positionBase = 1 + edge * (ring - 1) return ring, positionBase + offset @staticmethod def getMinimumRings(n: int) -> int: """ Return the minimum number of rings needed to fit ``n`` objects. Notes ----- ``self`` is not used because hex grids always behave the same w.r.t. rings/positions. """ return hexagon.numRingsToHoldNumCells(n) @staticmethod def getPositionsInRing(ring: int) -> int: """Return the number of positions within a ring.""" return hexagon.numPositionsInRing(ring) def getNeighboringCellIndices(self, i: int, j: int = 0, k: int = 0) -> List[IJKType]: """ Return the indices of the immediate neighbors of a mesh point in the plane. Note that these neighbors are ordered counter-clockwise beginning from the 30 or 60 degree direction. Exact direction is dependent on cornersUp arg. """ return [ (i + 1, j, k), (i, j + 1, k), (i - 1, j + 1, k), (i - 1, j, k), (i, j - 1, k), (i + 1, j - 1, k), ] def getLabel(self, indices): """ Hex labels start at 1, and are ring/position based rather than i,j. This difference is partially because ring/pos is easier to understand in hex geometry, and partially because it is used in some codes ARMI originally was focused on. """ ring, pos = self.getRingPos(indices) if len(indices) == 2: return super().getLabel((ring, pos)) else: return super().getLabel((ring, pos, indices[2])) @staticmethod def _indicesAndEdgeFromRingAndPos(ring, position): """Given the ring and position, return the (I,J) coordinates, and which edge the grid cell is on. Parameters ---------- ring : int Starting with 1 (not zero), the ring of the grid cell. position : int Starting with 1 (not zero), the position of the grid cell, in the ring. Returns ------- (int, int, int) : I coordinate, J coordinate, which edge of the hex ring Notes ----- - Edge indicates which edge of the ring in which the hexagon resides. - Edge 0 is the NE edge, edge 1 is the N edge, etc. - Offset is (0-based) index of the hexagon in that edge. For instance, ring 3, pos 12 resides in edge 5 at index 1; it is the second hexagon in ring 3, edge 5. """ # The inputs start counting at 1, but the grid starts counting at zero. ring = ring - 1 pos = position - 1 # Handle the center grid cell. if ring == 0: if pos != 0: raise ValueError(f"Position in center ring must be 1, not {position}") return 0, 0, 0 # find the edge and offset (pos//ring or pos%ring) edge, offset = divmod(pos, ring) # find (I,J) based on the ring, edge, and offset if edge == 0: i = ring - offset j = offset elif edge == 1: i = -offset j = ring elif edge == 2: i = -ring j = ring - offset elif edge == 3: i = offset - ring j = -offset elif edge == 4: i = offset j = -ring elif edge == 5: i = ring j = offset - ring else: raise ValueError(f"Edge {edge} is invalid. From ring {ring}, pos {pos}") return i, j, edge @staticmethod def getIndicesFromRingAndPos(ring: int, pos: int) -> IJType: r"""Given the ring and position, return the (I,J) coordinates in the hex grid. Parameters ---------- ring : int Starting with 1 (not zero), the ring of the grid cell. position : int Starting with 1 (not zero), the position of the grid cell, in the ring. Returns ------- (int, int) : I coordinate, J coordinate Notes ----- In an axial plane, the (ring, position) coordinates are as follows:: Flat-to-Flat Corners Up _____ / \ / \ / \ _____/ 2,2 \_____ / \ / \ / \ / \ | 2,2 | 2,1 | / 2,3 \_____/ 2,1 \ | | | \ / \ / / \ / \ / \ \_____/ 1,1 \_____/ / \ / \ / \ / \ / \ | 2,3 | 1,1 | 2,6 | / 2,4 \_____/ 2,6 \ | | | | \ / \ / \ / \ / \ / \_____/ 2,5 \_____/ \ / \ / \ / \ / | 2,4 | 2,5 | \_____/ | | | \ / \ / \ / \ / """ i, j, _edge = HexGrid._indicesAndEdgeFromRingAndPos(ring, pos) return i, j def getRingPos(self, indices: IJKType) -> Tuple[int, int]: """ Get 1-based ring and position from normal indices. See Also -------- getIndicesFromRingAndPos : does the reverse """ i, j = indices[:2] return self.indicesToRingPos(i, j) def overlapsWhichSymmetryLine(self, indices: IJType) -> Optional[int]: """Return a list of which lines of symmetry this is on. Parameters ---------- indices : tuple of [int, int] Indices for the requested object Returns ------- None or int None if not line of symmetry goes through the object at the requested index. Otherwise, some grid constants like ``BOUNDARY_CENTER`` will be returned. Notes ----- - Only the 1/3 core view geometry is actually coded in here right now. - Being "on" a symmetry line means the line goes through the middle of you. """ i, j = indices[:2] if i == 0 and j == 0: symmetryLine = BOUNDARY_CENTER elif i > 0 and i == -2 * j: # edge 1: 1/3 symmetry line (bottom horizontal side in 1/3 core view, theta = 0) symmetryLine = BOUNDARY_0_DEGREES elif i == j and i > 0 and j > 0: # edge 2: 1/6 symmetry line (bisects 1/3 core view, theta = pi/3) symmetryLine = BOUNDARY_60_DEGREES elif j == -2 * i and j > 0: # edge 3: 1/3 symmetry line (left oblique side in 1/3 core view, theta = 2*pi/3) symmetryLine = BOUNDARY_120_DEGREES else: symmetryLine = None return symmetryLine def getSymmetricEquivalents(self, indices: IJKType) -> List[IJType]: """Retrieve the equivalent indices. If full core return nothing, if 1/3-core grid, return the symmetric equivalents, if any other grid, raise an error. .. impl:: Equivalent contents in thrid-core geometries are retrievable. :id: I_ARMI_GRID_EQUIVALENTS :implements: R_ARMI_GRID_EQUIVALENTS This method takes in (I,J,K) indices, and if this ``HexGrid`` is full core, it returns nothing. If this ``HexGrid`` is third-core, this method will return the third-core symmetric equivalent of just (I,J). If this grid is any other kind, this method will just return an error; a hexagonal grid with any other symmetry is probably an error. """ if ( self.symmetry.domain == geometry.DomainType.THIRD_CORE and self.symmetry.boundary == geometry.BoundaryType.PERIODIC ): return self._getSymmetricIdenticalsThird(indices) elif self.symmetry.domain == geometry.DomainType.FULL_CORE: return [] else: raise NotImplementedError(f"Unhandled symmetry condition for HexGrid: {self.symmetry}") @staticmethod def _getSymmetricIdenticalsThird(indices) -> List[IJType]: """This works by rotating the indices by 120 degrees twice, counterclockwise.""" i, j = indices[:2] if i == 0 and j == 0: return [] identicals = [(-i - j, i), (j, -i - j)] return identicals def triangleCoords(self, indices: IJKType) -> np.ndarray: """ Return 6 coordinate pairs representing the centers of the 6 triangles in a hexagon centered here. Ignores z-coordinate and only operates in 2D for now. """ xy = self.getCoordinates(indices)[:2] scale = self.pitch / 3.0 return xy + scale * TRIANGLES_IN_HEXAGON @staticmethod def _getRawUnitSteps(pitch, cornersUp=False): """Get the raw unit steps (ignore step dimensions), for a hex grid. Parameters ---------- pitch : float The short diameter of the hexagons (flat to flat). cornersUp : bool, optional If True, the hexagons have a corner pointing in the Y direction. Default: False Returns ------- tuple : The full 3D set of derivatives of X,Y,Z in terms of i,j,k. """ side = hexagon.side(pitch) if cornersUp: # rotated 30 degrees counter-clockwise from normal # increases in i moves you in x and y # increases in j also moves you in x and y unitSteps = ( (pitch / 2.0, -pitch / 2.0, 0), (1.5 * side, 1.5 * side, 0), (0, 0, 0), ) else: # x direction is only a function of i because j-axis is vertical. # y direction is a function of both. unitSteps = ((1.5 * side, 0.0, 0.0), (pitch / 2.0, pitch, 0.0), (0, 0, 0)) return unitSteps def changePitch(self, newPitchCm: float): """Change the hex pitch.""" unitSteps = np.array(HexGrid._getRawUnitSteps(newPitchCm, self.cornersUp)) self._unitSteps = unitSteps[self._stepDims] def locatorInDomain(self, locator, symmetryOverlap: Optional[bool] = False) -> bool: # This will include the "top" 120-degree symmetry lines. This is to support adding of edge # assemblies. if self.symmetry.domain == geometry.DomainType.THIRD_CORE: return self.isInFirstThird(locator, includeTopEdge=symmetryOverlap) else: return True def isInFirstThird(self, locator, includeTopEdge=False) -> bool: """Test if the given locator is in the first 1/3 of the HexGrid. .. impl:: Determine if grid is in first third. :id: I_ARMI_GRID_SYMMETRY_LOC :implements: R_ARMI_GRID_SYMMETRY_LOC This is a simple helper method to determine if a given locator (from an ArmiObject) is in the first 1/3 of the ``HexGrid``. This method does not attempt to check if this grid is full or 1/3-core. It just does the basic math of dividing up a hex-assembly reactor core into thirds and testing if the given location is in the first 1/3 or not. """ ring, pos = self.getRingPos(locator.indices) if ring == 1: return True maxPosTotal = self.getPositionsInRing(ring) maxPos1 = ring + ring // 2 - 1 maxPos2 = maxPosTotal - ring // 2 + 1 if ring % 2: # Odd ring; upper edge assem typically not included. if includeTopEdge: maxPos1 += 1 else: # Even ring; upper edge assem included. maxPos2 += 1 return bool(pos <= maxPos1 or pos >= maxPos2) def generateSortedHexLocationList(self, nLocs: int): """ Generate a list IndexLocations, sorted based on their distance from the center. IndexLocation are taken from a full core. Ties between locations with the same distance (e.g. A3001 and A3003) are broken by ring number then position number. """ # first, roughly calculate how many rings need to be created to cover nLocs worth of assemblies nLocs = int(nLocs) # next, generate a list of locations and corresponding distances locList = [] for ring in range(1, hexagon.numRingsToHoldNumCells(nLocs) + 1): positions = self.getPositionsInRing(ring) for position in range(1, positions + 1): i, j = self.getIndicesFromRingAndPos(ring, position) locList.append(self[(i, j, 0)]) # round to avoid differences due to floating point math locList.sort( key=lambda loc: ( round(np.linalg.norm(loc.getGlobalCoordinates()), 6), loc.i, loc.j, ) ) return locList[:nLocs] def rotateIndex(self, loc: IndexLocation, rotations: int) -> IndexLocation: """Find the new location of an index after some number of CCW rotations. Parameters ---------- loc : IndexLocation Starting index rotations : int Number of counter clockwise rotations Returns ------- IndexLocation Index in the grid after rotation Notes ----- Rotation uses a three-dimensional index in what can be known elsewhere by the confusing name of "cubic" coordinate system for a hexagon. Cubic stems from the notion of using three dimensions, ``(q, r, s)`` to describe a point in the hexagonal grid. The conversion from the indexing used in the ARMI framework follows:: q = i r = j # s = - q - r = - (q + r) s = -(i + j) The motivation for the cubic notation is rotation is far simpler: a clockwise rotation by 60 degrees results in a shifting and negating of the coordinates. So the first rotation of ``(q, r, s)`` would produce a new coordinate ``(-r, -s, -q)``. Another rotation would produce ``(s, q, r)``, and so on. Raises ------ TypeError If ``loc.grid`` is populated and not consistent with this grid. For example, it doesn't make sense to rotate an index from a Cartesian grid in a hexagonal coordinate system, nor hexagonal grid with different orientation (flats up vs. corners up) """ if self._roughlyEqual(loc.grid) or loc.grid is None: i, j, k = loc[:3] buffer = deque((i, j, -(i + j))) buffer.rotate(-rotations) newI = buffer[0] newJ = buffer[1] if rotations % 2: newI *= -1 newJ *= -1 return IndexLocation(newI, newJ, k, loc.grid) raise TypeError(f"Refusing to rotate an index {loc} from a grid {loc.grid} that is not consistent with {self}") def _roughlyEqual(self, other) -> bool: """Check that two hex grids are nearly identical. Would the same ``(i, j, k)`` index in ``self`` be the same location in ``other``? """ if other is self: return True return ( isinstance(other, HexGrid) and isclose(self.pitch, other.pitch, rel_tol=1e-4) and other.cornersUp == self.cornersUp ) ================================================ FILE: armi/reactor/grids/locations.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Hashable, Iterator, List, Optional, Tuple, Union import numpy as np if TYPE_CHECKING: # Avoid some circular imports from armi.reactor.grids import Grid IJType = Tuple[int, int] IJKType = Tuple[int, int, int] class LocationBase(ABC): """ A namedtuple-like object for storing location information. It's immutable (you can't set things after construction) and has names. """ __slots__ = ("_i", "_j", "_k", "_grid") def __init__(self, i: int, j: int, k: int, grid: Optional["Grid"]): self._i = i self._j = j self._k = k self._grid = grid def __repr__(self) -> str: return "<{} @ ({},{:},{})>".format(self.__class__.__name__, self.i, self.j, self.k) def __getstate__(self) -> Hashable: """Used in pickling and deepcopy, this detaches the grid.""" return (self._i, self._j, self._k, None) def __setstate__(self, state: Hashable): """Unpickle a locator, the grid will attach itself if it was also pickled, otherwise this will be detached.""" self.__init__(*state) @property def i(self) -> int: return self._i @property def j(self) -> int: return self._j @property def k(self) -> int: return self._k @property def grid(self) -> Optional["Grid"]: return self._grid def __getitem__(self, index: int) -> Union[int, "Grid"]: return (self.i, self.j, self.k, self.grid)[index] def __hash__(self) -> Hashable: """ Define a hash so we can use these as dict keys w/o having exact object. Notes ----- Including the ``grid`` attribute may be more robust; however, using only (i, j, k) allows dictionaries to use IndexLocations and (i,j,k) tuples interchangeably. """ return hash((self.i, self.j, self.k)) def __eq__(self, other: Union[IJKType, "LocationBase"]) -> bool: if isinstance(other, tuple): return (self.i, self.j, self.k) == other if isinstance(other, LocationBase): return self.i == other.i and self.j == other.j and self.k == other.k and self.grid is other.grid return NotImplemented def __lt__(self, that: "LocationBase") -> bool: """ A Locationbase is less than another if the pseudo-radius is less, or if equal, in order any index is less. Examples -------- >>> grid = grids.HexGrid.fromPitch(1.0) >>> grid[0, 0, 0] < grid[2, 3, 4] # the "radius" is less True >>> grid[2, 3, 4] < grid[2, 3, 4] # they are equal False >>> grid[2, 3, 4] < grid[-2, 3, 4] # 2 is greater than -2 False >>> grid[-2, 3, 4] < grid[2, 3, 4] # -2 is less than 2 True >>> grid[1, 3, 4] < grid[-2, 3, 4] # the "radius" is less True """ selfIndices = self.indices thatIndices = that.indices # this is not really r, but it is fast and consistent selfR = abs(selfIndices).sum() thatR = abs(thatIndices).sum() # this cannot be reduced to # return selfR < thatR or (selfIndices < thatIndices).any() # because the comparison is not symmetric. if selfR < thatR: return True else: for lt, eq in zip(selfIndices < thatIndices, selfIndices == thatIndices): if eq: continue return lt return False def __len__(self) -> int: """Returns 3, the number of directions.""" return 3 def associate(self, grid: "Grid"): """Re-assign locator to another Grid.""" self._grid = grid @property @abstractmethod def indices(self) -> np.ndarray: """Get the non-grid indices (i,j,k) of this locator. This strips off the annoying ``grid`` tagalong which is there to ensure proper equality (i.e. (0,0,0) in a storage rack is not equal to (0,0,0) in a core). It is a numpy array for two reasons: 1. It can be added and subtracted for the recursive computations through different coordinate systems. 2. It can be written/read from the database. """ class IndexLocation(LocationBase): """ An immutable location representing one cell in a grid. The locator is intimately tied to a grid and together, they represent a grid cell somewhere in the coordinate system of the grid. ``grid`` is not in the constructor (must be added after construction ) because the extra argument (grid) gives an inconsistency between __init__ and __new__. Unfortunately this decision makes whipping up IndexLocations on the fly awkward. But perhaps that's ok because they should only be created by their grids. """ __slots__ = () def __add__(self, that: Union[IJKType, "IndexLocation"]) -> "IndexLocation": """ Enable adding with other objects like this and/or 3-tuples. Tuples are needed so we can terminate the recursive additions with a (0,0,0) basis. """ # New location is not associated with any particular grid. return self.__class__(self[0] + that[0], self[1] + that[1], self[2] + that[2], None) def __sub__(self, that: Union[IJKType, "IndexLocation"]) -> "IndexLocation": return self.__class__(self[0] - that[0], self[1] - that[1], self[2] - that[2], None) def detachedCopy(self) -> "IndexLocation": """ Make a copy of this locator that is not associated with a grid. See Also -------- armi.reactor.reactors.detach : uses this """ return self.__class__(self.i, self.j, self.k, None) @property def parentLocation(self): """ Get the spatialLocator of the ArmiObject that this locator's grid is anchored to. For example, if this is one of many spatialLocators in a 2-D grid representing a reactor, then the ``parentLocation`` is the spatialLocator of the reactor, which will often be a ``CoordinateLocation``. """ grid = self.grid # performance matters a lot here so we remove a dot # check for None rather than __nonzero__ for speed (otherwise it checks the length) if grid is not None and grid.armiObject is not None and grid.armiObject.parent is not None: return grid.armiObject.spatialLocator return None @property def indices(self) -> np.ndarray: """ Get the non-grid indices (i,j,k) of this locator. This strips off the annoying ``grid`` tagalong which is there to ensure proper equality (i.e. (0,0,0) in a storage rack is not equal to (0,0,0) in a core). It is a numpy array for two reasons: 1. It can be added and subtracted for the recursive computations through different coordinate systems. 2. It can be written/read from the database. """ return np.array(self[:3]) def getCompleteIndices(self) -> IJKType: """ Transform the indices of this object up to the top mesh. The top mesh is either the one where there's no more parent (true top) or when an axis gets added twice. Unlike with coordinates, you can only add each index axis one time. Thus a *complete* set of indices is one where an index for each axis has been defined by a set of 1, 2, or 3 nested grids. This is useful for getting the reactor-level (i,j,k) indices of an object in a multi-layered 2-D(assemblies in core)/1-D(blocks in assembly) mesh like the one mapping blocks up to reactor in Hex reactors. The benefit of that particular mesh over a 3-D one is that different assemblies can have different axial meshes, a common situation. It will just return local indices for pin-meshes inside of blocks. A tuple is returned so that it is easy to compare pairs of indices. """ parentLocation = self.parentLocation # to avoid evaluating property if's twice indices = self.indices if parentLocation is not None: if parentLocation.grid is not None and addingIsValid(self.grid, parentLocation.grid): indices += parentLocation.indices return tuple(indices) def getLocalCoordinates(self, nativeCoords=False): """Return the coordinates of the center of the mesh cell here in cm.""" if self.grid is None: raise ValueError(f"Cannot get local coordinates of {self} because grid is None.") return self.grid.getCoordinates(self.indices, nativeCoords=nativeCoords) def getGlobalCoordinates(self, nativeCoords=False): """Get coordinates in global 3D space of the centroid of this object.""" parentLocation = self.parentLocation # to avoid evaluating property if's twice if parentLocation: return self.getLocalCoordinates(nativeCoords=nativeCoords) + parentLocation.getGlobalCoordinates( nativeCoords=nativeCoords ) return self.getLocalCoordinates(nativeCoords=nativeCoords) def getGlobalCellBase(self): """Return the cell base (i.e. "bottom left"), in global coordinate system.""" parentLocation = self.parentLocation # to avoid evaluating property if's twice if parentLocation: return parentLocation.getGlobalCellBase() + self.grid.getCellBase(self.indices) return self.grid.getCellBase(self.indices) def getGlobalCellTop(self): """Return the cell top (i.e. "top right"), in global coordinate system.""" parentLocation = self.parentLocation # to avoid evaluating property if's twice if parentLocation: return parentLocation.getGlobalCellTop() + self.grid.getCellTop(self.indices) return self.grid.getCellTop(self.indices) def getRingPos(self): """Return ring and position of this locator.""" return self.grid.getRingPos(self.getCompleteIndices()) def getSymmetricEquivalents(self): """ Get symmetrically-equivalent locations, based on Grid symmetry. See Also -------- Grid.getSymmetricEquivalents """ return self.grid.getSymmetricEquivalents(self.indices) def distanceTo(self, other: "IndexLocation") -> float: """Return the distance from this locator to another.""" return math.sqrt(((np.array(self.getGlobalCoordinates()) - np.array(other.getGlobalCoordinates())) ** 2).sum()) class MultiIndexLocation(IndexLocation): """ A collection of index locations that can be used as a spatialLocator. This allows components with multiplicity>1 to have location information within a parent grid. The implication is that there are multiple discrete components, each one residing in one of the actual locators underlying this collection. .. impl:: Store components with multiplicity greater than 1 :id: I_ARMI_GRID_MULT :implements: R_ARMI_GRID_MULT As not all grids are "full core symmetry", ARMI will sometimes need to track multiple positions for a single object: one for each symmetric portion of the reactor. This class doesn't calculate those positions in the reactor, it just tracks the multiple positions given to it. In practice, this class is mostly just a list of ``IndexLocation`` objects. """ # MIL's cannot be hashed, so we need to scrape off the implementation from LocationBase. This raises some # interesting questions of substitutability of the various Location classes, which should be addressed. __hash__ = None _locations: List[IndexLocation] def __init__(self, grid: "Grid"): IndexLocation.__init__(self, 0, 0, 0, grid) self._locations = [] def __eq__(self, other): """Considered equal if the grids are identical and contained locations are identical. Two ``MultiIndexLocation`` objects with the same total collection of locations, but in different orders, will not be considered equal. """ if isinstance(other, type(self)): return self.grid == other.grid and self._locations == other._locations # Different objects -> let other.__eq__(self) handle it return NotImplemented def __getstate__(self) -> List[IndexLocation]: """Used in pickling and deepcopy, this detaches the grid.""" return self._locations def __setstate__(self, state: List[IndexLocation]): """Unpickle a locator, the grid will attach itself if it was also pickled, otherwise this will be detached.""" self.__init__(None) self._locations = state def __repr__(self) -> str: return f"<{self.__class__.__name__} with {len(self._locations)} locations>" def __getitem__(self, index: int) -> IndexLocation: return self._locations[index] def __setitem__(self, index: int, obj: IndexLocation): self._locations[index] = obj def __iter__(self) -> Iterator[IndexLocation]: return iter(self._locations) def __len__(self) -> int: return len(self._locations) def detachedCopy(self) -> "MultiIndexLocation": loc = MultiIndexLocation(None) loc.extend(self._locations) return loc def associate(self, grid: "Grid"): self._grid = grid for loc in self._locations: loc.associate(grid) def getCompleteIndices(self) -> IJKType: raise NotImplementedError("Multi locations cannot do this yet.") def append(self, location: IndexLocation): self._locations.append(location) def extend(self, locations: List[IndexLocation]): self._locations.extend(locations) def pop(self, location: IndexLocation): self._locations.pop(location) @property def indices(self) -> List[np.ndarray]: """ Return indices for all locations. .. impl:: Return the location of all instances of grid components with multiplicity greater than 1. :id: I_ARMI_GRID_ELEM_LOC :implements: R_ARMI_GRID_ELEM_LOC This method returns the indices of all the ``IndexLocation`` objects. To be clear, this does not return the ``IndexLocation`` objects themselves. This is designed to be consistent with the Grid's ``__getitem__()`` method. """ return [loc.indices for loc in self._locations] class CoordinateLocation(IndexLocation): """ A triple representing a point in space. This is still associated with a grid. The grid defines the continuous coordinate space and axes that the location is within. This also links to the composite tree. """ __slots__ = () def __eq__(self, other): if isinstance(other, type(self)): # Mainly to avoid comparing against MultiIndexLocations. Fuel pins may have a multi index location and the # duct may have a coordinate location and we don't want them to be equal. return self.grid == other.grid and self.i == other.i and self.j == other.j and self.k == other.k return NotImplemented def __hash__(self): """Hash based on the coordinates but not the grid.""" return hash((self.i, self.j, self.k)) def getLocalCoordinates(self, nativeCoords=False): """Return x,y,z coordinates in cm within the grid's coordinate system.""" return self.indices def getCompleteIndices(self) -> IJKType: """Top of chain. Stop recursion and return basis.""" return 0, 0, 0 def getGlobalCellBase(self): return self.indices def getGlobalCellTop(self): return self.indices def addingIsValid(myGrid: "Grid", parentGrid: "Grid"): """ True if adding a indices from one grid to another is considered valid. In ARMI we allow the addition of a 1-D axial grid with a 2-D grid. We do not allow any other kind of adding. This enables the 2D/1D grid layout in Assemblies/Blocks but does not allow 2D indexing in pins to become inconsistent. """ return myGrid.isAxialOnly and not parentGrid.isAxialOnly ================================================ FILE: armi/reactor/grids/structuredGrid.py ================================================ # Copyright 2023 Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import itertools from abc import abstractmethod from typing import Iterable, List, Optional, Sequence, Tuple, Union import numpy as np from armi.reactor.grids.grid import Grid from armi.reactor.grids.locations import ( IJKType, IndexLocation, LocationBase, MultiIndexLocation, ) # data structure for database-serialization of grids GridParameters = collections.namedtuple( "GridParameters", ("unitSteps", "bounds", "unitStepLimits", "offset", "geomType", "symmetry"), ) class StructuredGrid(Grid): """ A connected set of cells characterized by indices mapping to space and vice versa. The cells may be characterized by any mixture of regular repeating steps and user-defined steps in any direction. For example, a 2-D hex lattice has constant, regular steps whereas a 3-D hex mesh may have user-defined axial meshes. Similar for Cartesian, RZT, etc. Parameters ---------- unitSteps : tuple of tuples, optional Describes the grid spatially as a function on indices. Each tuple describes how each ``(x,y,or z)`` dimension is influenced by ``(i,j,k)``. In other words, it is:: (dxi, dxj, jxk), (dyi, dyj, dyk), (dzi, dzj, dzk) where ``dmn`` is the distance (in cm) that dimension ``m`` will change as a function of index ``n``. Unit steps are used as a generic method for defining repetitive grids in a variety of geometries, including hexagonal and Cartesian. The tuples are not vectors in the direction of the translation, but rather grouped by direction. If the bounds argument is described for a direction, the bounds will be used rather than the unit step information. The default of (0, 0, 0) makes all dimensions insensitive to indices since the coordinates are calculated by the dot product of this and the indices. With this default, any dimension that is desired to change with indices should be defined with bounds. RZtheta grids are created exclusively with bounds. bounds : 3-tuple Absolute increasing bounds in cm including endpoints of a non-uniform grid. Each item represents the boundaries in the associated direction. Use Nones when unitSteps should be applied instead. Most useful for thetaRZ grids or other non-uniform grids. unitStepLimits : 3-tuple The limit of the steps in all three directions. This constrains step-defined grids to be finite so we can populate them with SpatialLocator objects. offset : 3-tuple, optional Offset in cm for each axis. By default the center of the (0,0,0)-th object is in the center of the grid. Offsets can move it so that the (0,0,0)-th object can be fully within a quadrant (i.e. in a Cartesian grid). armiObject : ArmiObject, optional The ArmiObject that this grid describes. For example if it's a 1-D assembly grid, the armiObject is the assembly. Note that ``self.armiObject.spatialGrid`` is ``self``. Examples -------- A 2D a rectangular grid with width (x) 2 and height (y) 3 would be:: >>> grid = Grid(unitSteps=((2, 0, 0), (0, 3, 0), (0, 0, 0))) A regular hex grid with pitch 1 is:: >>> grid = Grid(unitSteps= ((sqrt(3)/2, 0.0, 0.0), (0.5, 1.0, 0.0), (0, 0, 0)) .. note:: For this unit hex the magnitude of the vector constructed using the 0th index of each tuple is 1.0. Notes ----- Each dimension must either be defined through unitSteps or bounds. The combination of unitSteps with bounds was settled upon after some struggle to have one unified definition of a grid (i.e. just bounds). A hexagonal grid is somewhat challenging to represent with bounds because the axes are not orthogonal, so a unit-direction vector plus bounds would be required. And then the bounds would be wasted space because they can be derived simply by unit steps. Memory efficiency is important in this object so the compact representation of unitSteps-when-possible, bounds-otherwise was settled upon. Design considerations include: * unitSteps are more intuitive as operations starting from the center of a cell, particularly with hexagons and rectangles. Otherwise the 0,0 position of a hexagon in the center of 1/3-symmetric hexagon is at the phantom bottom left of the hexagon. * Users generally prefer to input mesh bounds rather than centers (e.g. starting at 0.5 instead of 0.0 in a unit mesh is weird). * If we store bounds, computing bounds is simple and computing centers takes ~2x the effort. If we store centers, it's the opposite. * Regardless of how we store things, we'll need a Grid that has the lower-left assembly fully inside the problem (i.e. for full core Cartesian) as well as another one that has the lower-left assembly half-way or quarter-way sliced off (for 1/2, 1/4, and 1/8 symmetries). The ``offset`` parameter handles this. * Looking up mesh boundaries (to define a mesh in another code) is generally more common than looking up centers (for plotting or measuring distance). * A grid can be anchored to the object that it is in with a backreference. This gives it the ability to traverse the composite tree and map local to global locations without having to duplicate the composite pattern on grids. This remains optional so grids can be used for non-reactor-package reasons. It may seem slightly cleaner to set the armiObject to the parent's spatialLocator itself but the major disadvantage of this is that when an object moves, the armiObject would have to be updated. By anchoring directly to Composite objects, the parent is always up to date no matter where or how things get moved. * Unit step calculations use dot products and must not be polluted by the bound indices. Thus we reduce the size of the unitSteps tuple accordingly. """ def __init__( self, unitSteps=(0, 0, 0), bounds=(None, None, None), unitStepLimits=((0, 1), (0, 1), (0, 1)), offset=None, geomType="", symmetry="", armiObject=None, ): super().__init__(geomType, symmetry, armiObject) # these lists contain the indices representing which dimensions for which steps # are used, or for which bounds are used. index 0 is x direction, etc. self._boundDims = [] self._stepDims = [] for dimensionIndex, bound in enumerate(bounds): if bound is None: self._stepDims.append(dimensionIndex) else: self._boundDims.append(dimensionIndex) # numpy prefers tuples like this to do slicing on arrays self._boundDims = (tuple(self._boundDims),) self._stepDims = (tuple(self._stepDims),) unitSteps = _tuplify(unitSteps) self._bounds = bounds self._unitStepLimits = _tuplify(unitStepLimits) # only represent unit steps in dimensions they're being used so as to not # pollute the dot product. This may reduce the length of this from 3 to 2 or 1 self._unitSteps = np.array(unitSteps)[self._stepDims] self._offset = np.zeros(3) if offset is None else np.array(offset) self._locations = {} self._buildLocations() # locations are owned by a grid, so the grid builds them. (_ii, iLen), (_ji, jLen), (_ki, kLen) = self.getIndexBounds() # True if only contains k-cells. self._isAxialOnly = iLen == jLen == 1 and kLen > 1 def __len__(self) -> int: return len(self._locations) @property def isAxialOnly(self) -> bool: return self._isAxialOnly def reduce(self) -> GridParameters: """Recreate the parameter necessary to create this grid.""" offset = None if not self._offset.any() else tuple(self._offset) bounds = _tuplify(self._bounds) # recreate a constructor-friendly version of `_unitSteps` from live data (may have been reduced from # length 3 to length 2 or 1 based on mixing the step-based definition and the bounds-based definition # described in Design Considerations above.) # We don't just save the original tuple passed in because that may miss transformations that # occur between instantiation and reduction. unitSteps = [] compressedSteps = list(self._unitSteps[:]) for i in range(3): # Recall _stepDims are stored as a single-value tuple (for numpy indexing) # So this just is grabbing the actual data. if i in self._stepDims[0]: unitSteps.append(compressedSteps.pop(0)) else: # Add dummy value which will never get used (it gets reduced away) unitSteps.append(0) unitSteps = _tuplify(unitSteps) return GridParameters( unitSteps, bounds, self._unitStepLimits, offset, self._geomType, self._symmetry, ) @property def offset(self) -> np.ndarray: """Offset in cm for each axis.""" return self._offset @offset.setter def offset(self, offset: np.ndarray): self._offset = offset def __repr__(self) -> str: msg = ( ["<{} -- {}\nBounds:\n".format(self.__class__.__name__, id(self))] + [" {}\n".format(b) for b in self._bounds] + ["Steps:\n"] + [" {}\n".format(b) for b in self._unitSteps] + [ "Anchor: {}\n".format(self.armiObject), "Offset: {}\n".format(self._offset), "Num Locations: {}>".format(len(self)), ] ) return "".join(msg) def __getitem__(self, ijk: Union[IJKType, List[IJKType]]) -> LocationBase: """ Get a location by (i, j, k) indices. If it does not exist, create a new one and return it. Parameters ---------- ijk : tuple of indices or list of the same If provided a tuple, an IndexLocation will be created (if necessary) and returned. If provided a list, each element will create a new IndexLocation (if necessary), and a MultiIndexLocation containing all of the passed indices will be returned. Notes ----- The method is defaultdict-like, in that it will create a new location on the fly. However, the class itself is not really a dictionary, it is just index-able. For example, there is no desire to have a ``__setitem__`` method, because the only way to create a location is by retrieving it or through ``buildLocations``. """ try: return self._locations[ijk] except (KeyError, TypeError): pass if isinstance(ijk, tuple): i, j, k = ijk val = IndexLocation(i, j, k, self) self._locations[ijk] = val elif isinstance(ijk, list): val = MultiIndexLocation(self) locators = [self[idx] for idx in ijk] val.extend(locators) else: raise TypeError("Unsupported index type `{}` for `{}`".format(type(ijk), ijk)) return val def items(self) -> Iterable[Tuple[IJKType, IndexLocation]]: return self._locations.items() def backUp(self): """Gather internal info that should be restored within a retainState.""" self._backup = self._unitSteps, self._bounds, self._offset def restoreBackup(self): self._unitSteps, self._bounds, self._offset = self._backup def getCoordinates(self, indices, nativeCoords=False) -> np.ndarray: """Return the coordinates of the center of the mesh cell at the given indices in cm. .. impl:: Get the coordinates from a location in a grid. :id: I_ARMI_GRID_GLOBAL_POS :implements: R_ARMI_GRID_GLOBAL_POS Probably the most common request of a structure grid will be to give the grid indices and return the physical coordinates of the center of the mesh cell. This is super handy in any situation where the coordinates have physical meaning. The math for finding the centroid turns out to be very easy, as the mesh is defined on the coordinates. So finding the mid-point along one axis is just taking the upper and lower bounds and dividing by two. And this is done for all axes. There are no more complicated situations where we need to find the centroid of a octagon on a rectangular mesh, or the like. """ indices = np.array(indices) return self._evaluateMesh(indices, self._centroidBySteps, self._centroidByBounds) def getCellBase(self, indices) -> np.ndarray: """Get the mesh base (lower left) of this mesh cell in cm.""" indices = np.array(indices) return self._evaluateMesh(indices, self._meshBaseBySteps, self._meshBaseByBounds) def getCellTop(self, indices) -> np.ndarray: """Get the mesh top (upper right) of this mesh cell in cm.""" indices = np.array(indices) + 1 return self._evaluateMesh(indices, self._meshBaseBySteps, self._meshBaseByBounds) def _evaluateMesh(self, indices, stepOperator, boundsOperator) -> np.ndarray: """ Evaluate some function of indices on this grid. Recall from above that steps are mesh-centered and bounds are mesh-edged. Notes ----- This method may be simplifiable. Complications arose from mixtures of bounds-based and step-based meshing. These were separate subclasses, but in practice many cases have some mix of step-based (hexagons, squares), and bounds based (radial, zeta). """ boundCoords = [] for ii, bounds in enumerate(self._bounds): if bounds is not None: boundCoords.append(boundsOperator(indices[ii], bounds)) # limit step operator to the step dimensions stepCoords = stepOperator(np.array(indices)[self._stepDims]) # now mix/match bounds coords with step coords appropriately. result = np.zeros(len(indices)) result[self._stepDims] = stepCoords result[self._boundDims] = boundCoords return result + self._offset def _centroidBySteps(self, indices): return np.dot(self._unitSteps, indices) def _meshBaseBySteps(self, indices): return (self._centroidBySteps(indices - 1) + self._centroidBySteps(indices)) / 2.0 @staticmethod def _centroidByBounds(index, bounds): if index < 0: # avoid wrap-around raise IndexError("Bounds-defined indices may not be negative.") return (bounds[index + 1] + bounds[index]) / 2.0 @staticmethod def _meshBaseByBounds(index, bounds): if index < 0: raise IndexError("Bounds-defined indices may not be negative.") return bounds[index] @staticmethod def getNeighboringCellIndices(i, j=0, k=0): """Return the indices of the immediate neighbors of a mesh point in the plane.""" return ((i + 1, j, k), (i, j + 1, k), (i - 1, j, k), (i, j - 1, k)) @staticmethod def getAboveAndBelowCellIndices(indices): i, j, k = indices return ((i, j, k + 1), (i, j, k - 1)) def getIndexBounds(self): """ Get min index and number of indices in this grid. Step-defined grids would be infinite but for the step limits defined in the constructor. Notes ----- This produces output that is intended to be passed to a ``range`` statement. """ indexBounds = [] for minMax, bounds in zip(self._unitStepLimits, self._bounds): if bounds is None: indexBounds.append(minMax) else: indexBounds.append((0, len(bounds))) return tuple(indexBounds) def getBounds( self, ) -> Tuple[Optional[Sequence[float]], Optional[Sequence[float]], Optional[Sequence[float]]]: """Return the grid bounds for each dimension, if present.""" return self._bounds def getLocatorFromRingAndPos(self, ring, pos, k=0): """ Return the location based on ring and position. Parameters ---------- ring : int Ring number (1-based indexing) pos : int Position number (1-based indexing) k : int, optional Axial index (0-based indexing) See Also -------- getIndicesFromRingAndPos This implements the transform into i, j indices based on ring and position. """ i, j = self.getIndicesFromRingAndPos(ring, pos) return self[i, j, k] @staticmethod @abstractmethod def getIndicesFromRingAndPos(ring: int, pos: int): """ Return i, j indices given ring and position. Note ---- This should be implemented as a staticmethod, since no Grids currently in existence actually need any instance data to perform this task, and staticmethods provide the convenience of calling the method without an instance of the class in the first place. """ @abstractmethod def getMinimumRings(self, n: int) -> int: """ Return the minimum number of rings needed to fit ``n`` objects. Warning ------- While this is useful and safe for answering the question of "how many rings do I need to hold N things?", is generally not safe to use it to answer "I have N things; within how many rings are they distributed?". This function provides a lower bound, assuming that objects are densely-packed. If they are not actually densely packed, this may be unphysical. """ @abstractmethod def getPositionsInRing(self, ring: int) -> int: """Return the number of positions within a ring.""" def getRingPos(self, indices) -> Tuple[int, int]: """ Get ring and position number in this grid. For non-hex grids this is just i and j. A tuple is returned so that it is easy to compare pairs of indices. """ # Regular grids don't know about ring and position. Check the parent. if ( self.armiObject is not None and self.armiObject.parent is not None and self.armiObject.parent.spatialGrid is not None ): return self.armiObject.parent.spatialGrid.getRingPos(indices) raise ValueError("No ring position found, because no spatial grid was found.") def getAllIndices(self): """Get all possible indices in this grid.""" iBounds, jBounds, kBounds = self.getIndexBounds() allIndices = tuple(itertools.product(range(*iBounds), range(*jBounds), range(*kBounds))) return allIndices def _buildLocations(self): """Populate all grid cells with a characteristic SpatialLocator.""" for i, j, k in self.getAllIndices(): loc = IndexLocation(i, j, k, self) self._locations[(i, j, k)] = loc @property @abstractmethod def pitch(self) -> Union[float, Tuple[float, float]]: """Grid pitch. Some implementations may rely on a single pitch, such as axial or hexagonal grids. Cartesian grids may use a single pitch between elements or separate pitches for the x and y dimensions. Returns ------- float or tuple of (float, float) Grid spacing in cm """ def _tuplify(maybeArray) -> tuple: if isinstance(maybeArray, (np.ndarray, list, tuple)): maybeArray = tuple(tuple(row) if isinstance(row, (np.ndarray, list)) else row for row in maybeArray) return maybeArray ================================================ FILE: armi/reactor/grids/tests/__init__.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/reactor/grids/tests/test_grids.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for grids.""" import math import pickle import unittest from io import BytesIO from random import randint import numpy as np from numpy.testing import assert_allclose, assert_array_equal from armi.reactor import geometry, grids from armi.utils import hexagon class MockLocator(grids.IndexLocation): """ Locator subclass that with direct location -> location paternity (to avoid needing blocks, assems). """ @property def parentLocation(self): return self._parent class MockCoordLocator(grids.CoordinateLocation): @property def parentLocation(self): return self._parent class MockArmiObject: """Any sort of object that can serve as a grid's armiObject attribute.""" def __init__(self, parent=None): self.parent = parent class MockStructuredGrid(grids.StructuredGrid): """Need a concrete class to test a lot of inherited methods. Abstract methods from the parent now raise ``NotImplementedError`` """ # De-abstract the mock structured grid to test some basic # properties, but let the abstract methods error def _throwsNotImplemented(*args, **kwargs): raise NotImplementedError for f in MockStructuredGrid.__abstractmethods__: setattr(MockStructuredGrid, f, _throwsNotImplemented) MockStructuredGrid.__abstractmethods__ = () class TestSpatialLocator(unittest.TestCase): def test_add(self): loc1 = grids.IndexLocation(1, 2, 0, None) loc2 = grids.IndexLocation(2, 2, 0, None) self.assertEqual(loc1 + loc2, grids.IndexLocation(3, 4, 0, None)) def test_multiIndexEq(self): """Check multi index locations are only true if they live on the same grid and have the same locations.""" a = grids.MultiIndexLocation(None) a.append(grids.IndexLocation(0, 0, 0, None)) b = grids.MultiIndexLocation(None) b.append(grids.IndexLocation(1, 1, 1, None)) self.assertNotEqual(a, b) c = grids.MultiIndexLocation(None) c.append(grids.IndexLocation(0, 0, 0, None)) self.assertEqual(a, c) def test_multiIndexEqWithLocations(self): """Two multi index locators on the same grid are equal.""" grid = MockStructuredGrid() a = grids.MultiIndexLocation(grid) a.extend((grids.IndexLocation(i, -i, i, grid) for i in range(5))) b = grids.MultiIndexLocation(grid) b.extend(a) self.assertEqual(a, b) # If the order differs but all the locations are the same, the locators are considered not equal locs = list(a) locs.insert(0, locs.pop()) c = grids.MultiIndexLocation(grid) c.extend(locs) self.assertNotEqual(c, a) def test_coordinateLocationEq(self): """Test for equality on the coordinate location object.""" base = grids.CoordinateLocation(1, -3, 5, MockStructuredGrid()) self.assertEqual(base, base) self.assertEqual(base, grids.CoordinateLocation(base.i, base.j, base.k, base.grid)) self.assertNotEqual(base, grids.CoordinateLocation(base.i, base.j, base.k, None)) # Pick some points with different indices in one dimension # Offsets are arbitrary self.assertNotEqual(base, grids.CoordinateLocation(base.i + 1, base.j, base.k, base.grid)) self.assertNotEqual(base, grids.CoordinateLocation(base.i, base.j - 2, base.k, base.grid)) self.assertNotEqual(base, grids.CoordinateLocation(base.i, base.j, base.k + 13, base.grid)) def test_coordinateLocationHash(self): """Ensure we can hash the location based on it's position, not the grid.""" a = grids.CoordinateLocation(5, 9, 1, MockStructuredGrid()) self.assertEqual(hash(a), hash((a.i, a.j, a.k))) b = grids.CoordinateLocation(a.i, a.j, a.k, None) self.assertEqual(hash(b), hash(a)) def test_recursion(self): """ Make sure things work as expected with a chain of locators/grids/locators. This makes a Cartesian-like reactor out of unit cubes. The origin is in the center of the central cube radially and the bottom axially due to the different way steps and bounds are set up. """ core = MockArmiObject() assem = MockArmiObject(core) block = MockArmiObject(assem) # build meshes just like how they're used on a regular system. # 2-D grid coreGrid = grids.CartesianGrid.fromRectangle(1.0, 1.0, armiObject=core) # 1-D z-mesh assemblyGrid = grids.AxialGrid.fromNCells(5, armiObject=assem) # pins sit in this 2-D grid. blockGrid = grids.CartesianGrid.fromRectangle(0.1, 0.1, armiObject=block) coreLoc = grids.CoordinateLocation(0.0, 0.0, 0.0, None) core.spatialLocator = coreLoc assemblyLoc = grids.IndexLocation(2, 3, 0, coreGrid) assem.spatialLocator = assemblyLoc blockLoc = grids.IndexLocation(0, 0, 3, assemblyGrid) block.spatialLocator = blockLoc pinIndexLoc = grids.IndexLocation(1, 5, 0, blockGrid) pinFree = grids.CoordinateLocation(1.0, 2.0, 3.0, blockGrid) assert_allclose(blockLoc.getCompleteIndices(), np.array((2, 3, 3))) assert_allclose(blockLoc.getGlobalCoordinates(), (2.0, 3.0, 3.5)) assert_allclose(blockLoc.getGlobalCellBase(), (1.5, 2.5, 3)) assert_allclose(blockLoc.getGlobalCellTop(), (2.5, 3.5, 4)) # check coordinates of pins in block assert_allclose(pinFree.getGlobalCoordinates(), (2.0 + 1.0, 3.0 + 2.0, 3.5 + 3.0)) # epic assert_allclose(pinIndexLoc.getGlobalCoordinates(), (2.0 + 0.1, 3.0 + 0.5, 3.5)) # wow # pin indices should not combine with the parent indices. assert_allclose(pinIndexLoc.getCompleteIndices(), (1, 5, 0)) def test_recursionPin(self): """Ensure pin the center assem has axial coordinates consistent with a pin in an off-center assembly. """ core = MockArmiObject() assem = MockArmiObject(core) block = MockArmiObject(assem) # 2-D grid coreGrid = grids.CartesianGrid.fromRectangle(1.0, 1.0, armiObject=core) # 1-D z-mesh assemblyGrid = grids.AxialGrid.fromNCells(5, armiObject=assem) # pins sit in this 2-D grid. blockGrid = grids.CartesianGrid.fromRectangle(0.1, 0.1, armiObject=block) coreLoc = grids.CoordinateLocation(0.0, 0.0, 0.0, None) core.spatialLocator = coreLoc assemblyLoc = grids.IndexLocation(0, 0, 0, coreGrid) assem.spatialLocator = assemblyLoc blockLoc = grids.IndexLocation(0, 0, 3, assemblyGrid) block.spatialLocator = blockLoc pinIndexLoc = grids.IndexLocation(1, 5, 0, blockGrid) assert_allclose(pinIndexLoc.getCompleteIndices(), (1, 5, 0)) class TestGrid(unittest.TestCase): def test_basicPosition(self): """ Ensure a basic Cartesian grid works as expected. The default stepped grid defines zero at the center of the (0,0,0)th cell. Its centroid is 0., 0., 0). This convention is nicely compatible with 120-degree hex grid. Full core Cartesian meshes will want to be shifted to bottom left of 0th cell. """ grid = MockStructuredGrid(unitSteps=((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))) assert_allclose(grid.getCoordinates((1, 1, 1)), (1, 1, 1)) assert_allclose(grid.getCoordinates((0, 0, 0)), (0.0, 0.0, 0.0)) assert_allclose(grid.getCoordinates((0, 0, -1)), (0, 0, -1)) assert_allclose(grid.getCoordinates((1, 0, 0)), (1, 0, 0)) def test_neighbors(self): grid = MockStructuredGrid(unitSteps=((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))) neighbs = grid.getNeighboringCellIndices(0, 0, 0) self.assertEqual(len(neighbs), 4) def test_label(self): grid = MockStructuredGrid(unitSteps=((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))) self.assertEqual(grid.getLabel((1, 1, 2)), "001-001-002") def test_isAxialOnly(self): grid = grids.HexGrid.fromPitch(1.0, numRings=3) self.assertAlmostEqual(grid.pitch, 1.0) self.assertEqual(grid.isAxialOnly, False) grid2 = grids.AxialGrid.fromNCells(10) self.assertEqual(grid2.isAxialOnly, True) def test_lookupFactory(self): grid = grids.HexGrid.fromPitch(1.0, numRings=3) self.assertAlmostEqual(grid.pitch, 1.0) self.assertEqual(grid[10, 5, 0].i, 10) def test_quasiReduce(self): """Make sure our DB-friendly version of reduce works.""" grid = grids.HexGrid.fromPitch(1.0, numRings=3) self.assertAlmostEqual(grid.pitch, 1.0) reduction = grid.reduce() self.assertAlmostEqual(reduction[0][1][1], 1.0) def test_generateSortedHexLocationList(self): for pitch in [1.0, 3.14]: for rings in [3, 12]: grid = grids.HexGrid.fromPitch(pitch, numRings=rings) lst = grid.generateSortedHexLocationList(1) self.assertEqual(len(lst), 1) self.assertEqual(lst[0].indices.tolist(), [0, 0, 0]) lst = grid.generateSortedHexLocationList(2) self.assertEqual(len(lst), 2) self.assertEqual(lst[0].indices.tolist(), [0, 0, 0]) self.assertEqual(lst[1].indices.tolist(), [-1, 0, 0]) lst = grid.generateSortedHexLocationList(4) self.assertEqual(len(lst), 4) self.assertEqual(lst[0].indices.tolist(), [0, 0, 0]) self.assertEqual(lst[1].indices.tolist(), [-1, 0, 0]) self.assertEqual(lst[2].indices.tolist(), [-1, 1, 0]) self.assertEqual(lst[3].indices.tolist(), [0, -1, 0]) def test_getitem(self): """ Test that locations are created on demand, and the multi-index locations are returned when necessary. .. test:: Return the locations of grid items with multiplicity greater than one. :id: T_ARMI_GRID_ELEM_LOC :tests: R_ARMI_GRID_ELEM_LOC """ grid = grids.HexGrid.fromPitch(1.0, numRings=0) self.assertAlmostEqual(grid.pitch, 1.0) self.assertNotIn((0, 0, 0), grid._locations) _ = grid[0, 0, 0] self.assertIn((0, 0, 0), grid._locations) multiLoc = grid[[(0, 0, 0), (1, 0, 0), (0, 1, 0)]] self.assertIsInstance(multiLoc, grids.MultiIndexLocation) self.assertIn((1, 0, 0), grid._locations) i = multiLoc.indices i = [ii.tolist() for ii in i] self.assertEqual(i, [[0, 0, 0], [1, 0, 0], [0, 1, 0]]) def test_ringPosFromIndicesIncorrect(self): """Test the getRingPos fails if there is no armiObect or parent.""" grid = MockStructuredGrid(unitSteps=((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))) grid.armiObject = None with self.assertRaises(ValueError): grid.getRingPos(((0, 0), (1, 1))) class TestHexGrid(unittest.TestCase): """A set of tests for the Hexagonal Grid.""" def test_getCoordinatesFlatsUp(self): """Test getCoordinates() for flats up hex grids.""" grid = grids.HexGrid.fromPitch(1.0, cornersUp=False) self.assertAlmostEqual(grid.pitch, 1.0) side = 1.0 / math.sqrt(3) assert_allclose(grid.getCoordinates((0, 0, 0)), (0.0, 0.0, 0.0)) assert_allclose(grid.getCoordinates((1, 0, 0)), (1.5 * side, 0.5, 0.0)) assert_allclose(grid.getCoordinates((-1, 0, 0)), (-1.5 * side, -0.5, 0.0)) assert_allclose(grid.getCoordinates((0, 1, 0)), (0, 1.0, 0.0)) assert_allclose(grid.getCoordinates((1, -1, 0)), (1.5 * side, -0.5, 0.0)) unitSteps = grid.reduce()[0] iDirection = tuple(direction[0] for direction in unitSteps) jDirection = tuple(direction[1] for direction in unitSteps) for directionVector in (iDirection, jDirection): self.assertAlmostEqual( (sum(val**2 for val in directionVector)) ** 0.5, 1.0, msg=f"Direction vector {directionVector} should have magnitude 1 for pitch 1.", ) assert_allclose(grid.getCoordinates((1, 0, 0)), iDirection) assert_allclose(grid.getCoordinates((0, 1, 0)), jDirection) def test_getCoordinatesCornersUp(self): """Test getCoordinates() for corners up hex grids.""" grid = grids.HexGrid.fromPitch(1.0, cornersUp=True) self.assertAlmostEqual(grid.pitch, 1.0) side = 1.0 / math.sqrt(3) assert_allclose(grid.getCoordinates((0, 0, 0)), (0.0, 0.0, 0.0)) assert_allclose(grid.getCoordinates((1, 0, 0)), (0.5, 1.5 * side, 0.0)) assert_allclose(grid.getCoordinates((-1, 0, 0)), (-0.5, -1.5 * side, 0.0)) assert_allclose(grid.getCoordinates((0, 1, 0)), (-0.5, 1.5 * side, 0.0)) assert_allclose(grid.getCoordinates((1, -1, 0)), (1, 0.0, 0.0)) unitSteps = grid.reduce()[0] iDirection = tuple(direction[0] for direction in unitSteps) jDirection = tuple(direction[1] for direction in unitSteps) for directionVector in (iDirection, jDirection): self.assertAlmostEqual( (sum(val**2 for val in directionVector)) ** 0.5, 1.0, msg=f"Direction vector {directionVector} should have magnitude 1 for pitch 1.", ) assert_allclose(grid.getCoordinates((1, 0, 0)), iDirection) assert_allclose(grid.getCoordinates((0, 1, 0)), jDirection) def test_getLocalCoordinatesHex(self): """Test getLocalCoordinates() is different for corners up vs flats up hex grids.""" grid0 = grids.HexGrid.fromPitch(1.0, cornersUp=True) grid1 = grids.HexGrid.fromPitch(1.0, cornersUp=False) for i in range(3): for j in range(3): if i == 0 and j == 0: continue coords0 = grid0[i, j, 0].getLocalCoordinates() coords1 = grid1[i, j, 0].getLocalCoordinates() self.assertNotEqual(coords0[0], coords1[0], msg=f"X @ ({i}, {j})") self.assertNotEqual(coords0[1], coords1[1], msg=f"Y @ ({i}, {j})") self.assertEqual(coords0[2], coords1[2], msg=f"Z @ ({i}, {j})") def test_getLocalCoordinatesCornersUp(self): """Test getLocalCoordinates() for corners up hex grids.""" # validate the first ring of a corners-up hex grid grid = grids.HexGrid.fromPitch(1.0, cornersUp=True) vals = [] for pos in range(grid.getPositionsInRing(2)): i, j = grid.getIndicesFromRingAndPos(2, pos + 1) vals.append(grid[i, j, 0].getLocalCoordinates()) # short in Y maxY = max(v[1] for v in vals) minY = min(v[1] for v in vals) val = math.sqrt(3) / 2 self.assertAlmostEqual(maxY, val, delta=0.0001) self.assertAlmostEqual(minY, -val, delta=0.0001) # long in X maxX = max(v[0] for v in vals) minX = min(v[0] for v in vals) self.assertAlmostEqual(maxX, 1) self.assertAlmostEqual(minX, -1) def test_getLocalCoordinatesFlatsUp(self): """Test getLocalCoordinates() for flats up hex grids.""" # validate the first ring of a flats-up hex grid grid = grids.HexGrid.fromPitch(1.0, cornersUp=False) vals = [] for pos in range(grid.getPositionsInRing(2)): i, j = grid.getIndicesFromRingAndPos(2, pos + 1) vals.append(grid[i, j, 0].getLocalCoordinates()) # long in Y maxY = max(v[1] for v in vals) minY = min(v[1] for v in vals) self.assertAlmostEqual(maxY, 1) self.assertAlmostEqual(minY, -1) # short in X maxX = max(v[0] for v in vals) minX = min(v[0] for v in vals) val = math.sqrt(3) / 2 self.assertAlmostEqual(maxX, val, delta=0.0001) self.assertAlmostEqual(minX, -val, delta=0.0001) def test_neighbors(self): grid = grids.HexGrid.fromPitch(1.0) neighbs = grid.getNeighboringCellIndices(0, 0, 0) self.assertEqual(len(neighbs), 6) self.assertIn((1, -1, 0), neighbs) def test_ringPosFromIndices(self): """Test conversion from<-->to ring/position based on hand-prepared right answers.""" grid = grids.HexGrid.fromPitch(1.0) for indices, ringPos in [ ((0, 0), (1, 1)), ((1, 0), (2, 1)), ((0, 1), (2, 2)), ((-1, 1), (2, 3)), ((-1, 0), (2, 4)), ((0, -1), (2, 5)), ((1, -1), (2, 6)), ((1, 1), (3, 2)), ((11, -7), (12, 60)), ((-1, -2), (4, 12)), ((-3, 1), (4, 9)), ((-2, 3), (4, 6)), ((1, 2), (4, 3)), ((2, -4), (5, 19)), ]: self.assertEqual(indices, grid.getIndicesFromRingAndPos(*ringPos)) self.assertEqual(ringPos, grid.getRingPos(indices)) def test_label(self): grid = grids.HexGrid.fromPitch(1.0) indices = grid.getIndicesFromRingAndPos(12, 5) label1 = grid.getLabel(indices) self.assertEqual(label1, "012-005") self.assertEqual(grids.locatorLabelToIndices(label1), (12, 5, None)) label2 = grid.getLabel(indices + (5,)) self.assertEqual(label2, "012-005-005") self.assertEqual(grids.locatorLabelToIndices(label2), (12, 5, 5)) def test_overlapsWhichSymmetryLine(self): grid = grids.HexGrid.fromPitch(1.0) self.assertEqual( grid.overlapsWhichSymmetryLine(grid.getIndicesFromRingAndPos(5, 3)), grids.BOUNDARY_60_DEGREES, ) self.assertEqual( grid.overlapsWhichSymmetryLine(grid.getIndicesFromRingAndPos(5, 23)), grids.BOUNDARY_0_DEGREES, ) self.assertEqual( grid.overlapsWhichSymmetryLine(grid.getIndicesFromRingAndPos(3, 4)), grids.BOUNDARY_120_DEGREES, ) def test_getSymmetricIdenticalsThird(self): """Retrieve equivalent contents based on third symmetry. .. test:: Equivalent contents in third geometry are retrievable. :id: T_ARMI_GRID_EQUIVALENTS :tests: R_ARMI_GRID_EQUIVALENTS """ g = grids.HexGrid.fromPitch(1.0) g.symmetry = str(geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC)) self.assertEqual(g.getSymmetricEquivalents((3, -2)), [(-1, 3), (-2, -1)]) self.assertEqual(g.getSymmetricEquivalents((2, 1)), [(-3, 2), (1, -3)]) symmetrics = g.getSymmetricEquivalents(g.getIndicesFromRingAndPos(5, 3)) self.assertEqual([(5, 11), (5, 19)], [g.getRingPos(indices) for indices in symmetrics]) def test_thirdAndFullSymmetry(self): """Test that we can construct a full and a 1/3 core grid. .. test:: Test 1/3 and full cores have the correct positions and rings. :id: T_ARMI_GRID_SYMMETRY :tests: R_ARMI_GRID_SYMMETRY """ full = grids.HexGrid.fromPitch(1.0, symmetry="full core") third = grids.HexGrid.fromPitch(1.0, symmetry="third core periodic") # check full core self.assertEqual(full.getMinimumRings(2), 2) self.assertEqual(full.getIndicesFromRingAndPos(2, 2), (0, 1)) self.assertEqual(full.getPositionsInRing(3), 12) self.assertEqual(full.getSymmetricEquivalents((3, -2)), []) # check 1/3 core self.assertEqual(third.getMinimumRings(2), 2) self.assertEqual(third.getIndicesFromRingAndPos(2, 2), (0, 1)) self.assertEqual(third.getPositionsInRing(3), 12) self.assertEqual(third.getSymmetricEquivalents((3, -2)), [(-1, 3), (-2, -1)]) def test_cornersUpFlatsUp(self): """Test the cornersUp attribute of the fromPitch method. .. test:: Build a points-up and a flats-up hexagonal grids. :id: T_ARMI_GRID_HEX_TYPE :tests: R_ARMI_GRID_HEX_TYPE """ flatsUp = grids.HexGrid.fromPitch(1.0, cornersUp=False) self.assertAlmostEqual(flatsUp._unitSteps[0][0], math.sqrt(3) / 2) self.assertAlmostEqual(flatsUp.pitch, 1.0) cornersUp = grids.HexGrid.fromPitch(1.0, cornersUp=True) self.assertAlmostEqual(cornersUp._unitSteps[0][0], 0.5) self.assertAlmostEqual(cornersUp.pitch, 1.0) def test_triangleCoords(self): g = grids.HexGrid.fromPitch(8.15) indices1 = g.getIndicesFromRingAndPos(5, 3) + (0,) indices2 = g.getIndicesFromRingAndPos(5, 23) + (0,) indices3 = g.getIndicesFromRingAndPos(3, 4) + (0,) cur = g.triangleCoords(indices1) ref = [ (16.468_916_428_634_078, 25.808_333_333_333_337), (14.116_214_081_686_351, 27.166_666_666_666_67), (11.763_511_734_738_627, 25.808_333_333_333_337), (11.763_511_734_738_627, 23.091_666_666_666_67), (14.116_214_081_686_351, 21.733_333_333_333_334), (16.468_916_428_634_078, 23.091_666_666_666_67), ] assert_allclose(cur, ref) cur = grids.HexGrid.fromPitch(2.5).triangleCoords(indices2) ref = [ (9.381_941_874_331_42, 0.416_666_666_666_666_7), (8.660_254_037_844_387, 0.833_333_333_333_333_4), (7.938_566_201_357_355_5, 0.416_666_666_666_666_7), (7.938_566_201_357_355_5, -0.416_666_666_666_666_7), (8.660_254_037_844_387, -0.833_333_333_333_333_4), (9.381_941_874_331_42, -0.416_666_666_666_666_7), ] assert_allclose(cur, ref) cur = grids.HexGrid.fromPitch(3.14).triangleCoords(indices3) ref = [ (-1.812_879_845_255_425, 5.233_333_333_333_333), (-2.719_319_767_883_137_6, 5.756_666_666_666_667), (-3.625_759_690_510_850_2, 5.233_333_333_333_333), (-3.625_759_690_510_850_2, 4.186_666_666_666_666_5), (-2.719_319_767_883_137_6, 3.663_333_333_333_333), (-1.812_879_845_255_425, 4.186_666_666_666_666_5), ] assert_allclose(cur, ref) def test_getIndexBounds(self): numRings = 5 g = grids.HexGrid.fromPitch(1.0, numRings=numRings) boundsIJK = g.getIndexBounds() self.assertEqual(boundsIJK, ((-numRings, numRings), (-numRings, numRings), (0, 1))) def test_getAllIndices(self): grid = grids.HexGrid.fromPitch(1.0, numRings=3) indices = grid.getAllIndices() self.assertIn((1, 2, 0), indices) def test_buildLocations(self): grid = grids.HexGrid.fromPitch(1.0, numRings=3) loc1 = grid[1, 2, 0] self.assertEqual(loc1.i, 1) self.assertEqual(loc1.j, 2) def test_is_pickleable(self): grid = grids.HexGrid.fromPitch(1.0, numRings=3) loc = grid[1, 1, 0] for protocol in range(pickle.HIGHEST_PROTOCOL + 1): buf = BytesIO() pickle.dump(loc, buf, protocol=protocol) buf.seek(0) newLoc = pickle.load(buf) assert_allclose(loc.indices, newLoc.indices) def test_adjustPitchFlatsUp(self): """Adjust the pitch of a hexagonal lattice, for a flats up grid. .. test:: Construct a hexagonal lattice with three rings. :id: T_ARMI_GRID_HEX0 :tests: R_ARMI_GRID_HEX .. test:: Return the grid coordinates of different locations. :id: T_ARMI_GRID_GLOBAL_POS0 :tests: R_ARMI_GRID_GLOBAL_POS """ # run this test for a grid with no offset, and then a few random offset values for offset in [0, 1, 1.123, 3.14]: # build a hex grid with pitch=1, 3 rings, and the above offset grid = grids.HexGrid( unitSteps=((1.5 / math.sqrt(3), 0.0, 0.0), (0.5, 1, 0.0), (0, 0, 0)), unitStepLimits=((-3, 3), (-3, 3), (0, 1)), offset=np.array([offset, offset, offset]), ) # test number of rings before converting pitch self.assertEqual(grid._unitStepLimits[0][1], 3) # test that we CAN change the pitch, and it scales the grid (but not the offset) v1 = grid.getCoordinates((1, 0, 0)) grid.changePitch(2.0) self.assertAlmostEqual(grid.pitch, 2.0) v2 = grid.getCoordinates((1, 0, 0)) assert_allclose(2 * v1 - offset, v2) # basic sanity: test number of rings has not changed self.assertEqual(grid._unitStepLimits[0][1], 3) # basic sanity: check the offset exists and is correct for i in range(3): self.assertEqual(grid.offset[i], offset) def test_adjustPitchCornersUp(self): """Adjust the pich of a hexagonal lattice, for a "corners up" grid. .. test:: Construct a hexagonal lattice with three rings. :id: T_ARMI_GRID_HEX1 :tests: R_ARMI_GRID_HEX .. test:: Return the grid coordinates of different locations. :id: T_ARMI_GRID_GLOBAL_POS1 :tests: R_ARMI_GRID_GLOBAL_POS """ # run this test for a grid with no offset, and then a few random offset values for offset in [0, 1, 1.123, 3.14]: offsets = [offset, 0, 0] # build a hex grid with pitch=1, 3 rings, and the above offset grid = grids.HexGrid( unitSteps=( (0.5, -0.5, 0), (1.5 / math.sqrt(3), 1.5 / math.sqrt(3), 0), (0, 0, 0), ), unitStepLimits=((-3, 3), (-3, 3), (0, 1)), offset=np.array(offsets), ) # test number of rings before converting pitch self.assertEqual(grid._unitStepLimits[0][1], 3) # test that we CAN change the pitch, and it scales the grid (but not the offset) v1 = grid.getCoordinates((1, 0, 0)) grid.changePitch(2.0) self.assertAlmostEqual(grid.pitch, 2.0, delta=1e-9) v2 = grid.getCoordinates((1, 0, 0)) correction = np.array([0.5, math.sqrt(3) / 2, 0]) assert_allclose(v1 + correction, v2) # basic sanity: test number of rings has not changed self.assertEqual(grid._unitStepLimits[0][1], 3) # basic sanity: check the offset exists and is correct for i, off in enumerate(offsets): self.assertEqual(grid.offset[i], off) def test_badIndices(self): grid = grids.HexGrid.fromPitch(1.0, numRings=3) # this is actually ok because step-defined grids are infinite self.assertEqual(grid.getCoordinates((-100, 2000, 5))[2], 0.0) grid = grids.AxialGrid.fromNCells(10) with self.assertRaises(IndexError): grid.getCoordinates((0, 5, -1)) def test_isInFirstThird(self): """Determine if grid is in the first third. .. test:: Determine if grid is in the first third. :id: T_ARMI_GRID_SYMMETRY_LOC :tests: R_ARMI_GRID_SYMMETRY_LOC """ grid = grids.HexGrid.fromPitch(1.0, numRings=10) self.assertTrue(grid.isInFirstThird(grid[0, 0, 0])) self.assertTrue(grid.isInFirstThird(grid[1, 0, 0])) self.assertTrue(grid.isInFirstThird(grid[3, -1, 0])) self.assertFalse(grid.isInFirstThird(grid[1, -1, 0])) self.assertFalse(grid.isInFirstThird(grid[-1, -1, 0])) self.assertFalse(grid.isInFirstThird(grid[3, -2, 0])) def test_indicesAndEdgeFromRingAndPos(self): i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(0, 0) self.assertEqual(i, 0) self.assertEqual(j, -1) self.assertEqual(edge, 1) i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(1, 1) self.assertEqual(i, 0) self.assertEqual(j, 0) self.assertEqual(edge, 0) i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(3, 11) self.assertEqual(i, 2) self.assertEqual(j, -2) self.assertEqual(edge, 5) i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(3, 9) self.assertEqual(i, 0) self.assertEqual(j, -2) self.assertEqual(edge, 4) i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(3, 7) self.assertEqual(i, -2) self.assertEqual(j, 0) self.assertEqual(edge, 3) i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(3, 5) self.assertEqual(i, -2) self.assertEqual(j, 2) self.assertEqual(edge, 2) i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(3, 3) self.assertEqual(i, 0) self.assertEqual(j, 2) self.assertEqual(edge, 1) i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(7, 3) self.assertEqual(i, 4) self.assertEqual(j, 2) self.assertEqual(edge, 0) with self.assertRaises(ValueError): _ = grids.HexGrid._indicesAndEdgeFromRingAndPos(3, 13) with self.assertRaises(ValueError): _ = grids.HexGrid._indicesAndEdgeFromRingAndPos(1, 3) def test_rotatedIndices(self): """Test that a hex grid can produce a rotated cell location.""" g = grids.HexGrid.fromPitch(1.0, numRings=3) center: grids.IndexLocation = g[(0, 0, 0)] notRotated = self._rotateAndCheckAngle(g, center, 0) self.assertEqual(notRotated, center) # One rotation for a trivial check northEast: grids.IndexLocation = g[(1, 0, 0)] dueNorth: grids.IndexLocation = g[(0, 1, 0)] northWest: grids.IndexLocation = g[(-1, 1, 0)] actual = self._rotateAndCheckAngle(g, northEast, 1) self.assertEqual(actual, dueNorth) np.testing.assert_allclose(dueNorth.getLocalCoordinates(), [0.0, 1.0, 0.0]) actual = self._rotateAndCheckAngle(g, dueNorth, 1) self.assertEqual(actual, northWest) np.testing.assert_allclose(northWest.getLocalCoordinates(), [-hexagon.SQRT3 / 2, 0.5, 0]) # Two rotations from the "first" object in the first full ring actual = self._rotateAndCheckAngle(g, northEast, 2) self.assertEqual(actual, northWest) # Fuzzy rotation: if we rotate an location, and then rotate it back, we get the same location for _ in range(10): startI = randint(-10, 10) startJ = randint(-10, 10) start = g[(startI, startJ, 0)] rotations = randint(-10, 10) postRotate = self._rotateAndCheckAngle(g, start, rotations) if startI == 0 and startJ == 0: self.assertEqual(postRotate, start) continue if rotations % 6: self.assertNotEqual(postRotate, start, msg=rotations) else: self.assertEqual(postRotate, start, msg=rotations) reversed = self._rotateAndCheckAngle(g, postRotate, -rotations) self.assertEqual(reversed, start) def _rotateAndCheckAngle(self, g: grids.HexGrid, start: grids.IndexLocation, rotations: int) -> grids.IndexLocation: """Rotate a location and verify it lands where we expected.""" finish = g.rotateIndex(start, rotations) self._checkAngle(start, finish, rotations) return finish def _checkAngle(self, start: grids.IndexLocation, finish: grids.IndexLocation, rotations: int): """Compare two locations that should be some number of 60 degree CCW rotations apart.""" startXY = start.getLocalCoordinates()[:2] theta = math.pi / 3 * rotations rotationMatrix = np.array( [ [math.cos(theta), -math.sin(theta)], [math.sin(theta), math.cos(theta)], ] ) expected = rotationMatrix.dot(startXY) finishXY = finish.getLocalCoordinates()[:2] np.testing.assert_allclose(finishXY, expected, atol=1e-8) def test_inconsistentRotationGrids(self): """Test that only locations in consistent grids are rotatable.""" base = grids.HexGrid.fromPitch(1, cornersUp=False) larger = grids.HexGrid.fromPitch(base.pitch * 2, cornersUp=base.cornersUp) fromLarger = larger[1, 0, 0] with self.assertRaises(TypeError): base.rotateIndex(fromLarger) differentOrientation = grids.HexGrid.fromPitch(base.pitch, cornersUp=not base.cornersUp) fromDiffOrientation = differentOrientation[0, 1, 0] with self.assertRaises(TypeError): base.rotateIndex(fromDiffOrientation) axialGrid = grids.AxialGrid.fromNCells(5) fromAxial = axialGrid[2, 0, 0] with self.assertRaises(TypeError): base.rotateIndex(fromAxial) def test_rotatedIndexGridAssignment(self): """Test that the grid of the rotated index is identical through rotation.""" base = grids.HexGrid.fromPitch(1) other = grids.HexGrid.fromPitch(base.pitch, cornersUp=base.cornersUp) for i, j in ((0, 0), (1, 1), (2, 1), (-1, 3)): loc = grids.IndexLocation(i, j, k=0, grid=other) postRotate = base.rotateIndex(loc, rotations=2) self.assertIs(postRotate.grid, loc.grid) def test_rotatedIndexRoughEqualPitch(self): """Test indices can be rotated in close but not exactly equal grids.""" base = grids.HexGrid.fromPitch(1.345) other = grids.HexGrid.fromPitch(base.pitch * 1.00001) for i, j in ((0, 0), (1, 1), (2, 1), (-1, 3)): loc = grids.IndexLocation(i, j, k=0, grid=base) fromBase = base.rotateIndex(loc, rotations=2) fromOther = other.rotateIndex(loc, rotations=2) self.assertEqual((fromBase.i, fromBase.j), (fromOther.i, fromOther.j)) class TestBoundsDefinedGrid(unittest.TestCase): def test_positions(self): grid = MockStructuredGrid(bounds=([0, 1, 2, 3, 4], [0, 10, 20, 50], [0, 20, 60, 90])) assert_allclose(grid.getCoordinates((1, 1, 1)), (1.5, 15.0, 40.0)) def test_base(self): grid = MockStructuredGrid(bounds=([0, 1, 2, 3, 4], [0, 10, 20, 50], [0, 20, 60, 90])) assert_allclose(grid.getCellBase((1, 1, 1)), (1.0, 10.0, 20.0)) def test_positionsMixedDefinition(self): grid = MockStructuredGrid(unitSteps=((1.0, 0.0), (0.0, 1.0)), bounds=(None, None, [0, 20, 60, 90])) assert_allclose(grid.getCoordinates((1, 1, 1)), (1, 1, 40.0)) def test_getIndexBounds(self): grid = MockStructuredGrid(bounds=([0, 1, 2, 3, 4], [0, 10, 20, 50], [0, 20, 60, 90])) boundsIJK = grid.getIndexBounds() self.assertEqual(boundsIJK, ((0, 5), (0, 4), (0, 4))) class TestThetaRZGrid(unittest.TestCase): """A set of tests for the RZTheta Grid.""" def test_positions(self): grid = grids.ThetaRZGrid(bounds=(np.linspace(0, 2 * math.pi, 13), [0, 2, 2.5, 3], [0, 10, 20, 30])) assert_allclose(grid.getCoordinates((1, 0, 1)), (math.sqrt(2) / 2, math.sqrt(2) / 2, 15.0)) # test round trip ring position ringPos = (1, 1) indices = grid.getIndicesFromRingAndPos(*ringPos) ringPosFromIndices = grid.getRingPos(indices) self.assertEqual(ringPos, ringPosFromIndices) class TestCartesianGrid(unittest.TestCase): """A set of tests for the Cartesian Grid.""" def test_ringPosNoSplit(self): grid = grids.CartesianGrid.fromRectangle(1.0, 1.0, isOffset=True) expectedRing = [ [3, 3, 3, 3, 3, 3], [3, 2, 2, 2, 2, 3], [3, 2, 1, 1, 2, 3], [3, 2, 1, 1, 2, 3], [3, 2, 2, 2, 2, 3], [3, 3, 3, 3, 3, 3], ] expectedPos = [ [6, 5, 4, 3, 2, 1], [7, 4, 3, 2, 1, 20], [8, 5, 2, 1, 12, 19], [9, 6, 3, 4, 11, 18], [10, 7, 8, 9, 10, 17], [11, 12, 13, 14, 15, 16], ] expectedPos.reverse() for j in range(-3, 3): for i in range(-3, 3): ring, pos = grid.getRingPos((i, j)) self.assertEqual(ring, expectedRing[j + 3][i + 3]) self.assertEqual(pos, expectedPos[j + 3][i + 3]) # Bonus test of getMinimumRings() using the above grid self.assertEqual(grid.getMinimumRings(7), 2) self.assertEqual(grid.getMinimumRings(17), 3) def test_ringPosSplit(self): grid = grids.CartesianGrid.fromRectangle(1.0, 1.0) expectedRing = [ [4, 4, 4, 4, 4, 4, 4], [4, 3, 3, 3, 3, 3, 4], [4, 3, 2, 2, 2, 3, 4], [4, 3, 2, 1, 2, 3, 4], [4, 3, 2, 2, 2, 3, 4], [4, 3, 3, 3, 3, 3, 4], [4, 4, 4, 4, 4, 4, 4], ] expectedPos = [ [7, 6, 5, 4, 3, 2, 1], [8, 5, 4, 3, 2, 1, 24], [9, 6, 3, 2, 1, 16, 23], [10, 7, 4, 1, 8, 15, 22], [11, 8, 5, 6, 7, 14, 21], [12, 9, 10, 11, 12, 13, 20], [13, 14, 15, 16, 17, 18, 19], ] expectedPos.reverse() for j in range(-3, 4): for i in range(-3, 4): ring, pos = grid.getRingPos((i, j)) self.assertEqual(ring, expectedRing[j + 3][i + 3]) self.assertEqual(pos, expectedPos[j + 3][i + 3]) def test_symmetry(self): # PERIODIC, no split grid = grids.CartesianGrid.fromRectangle( 1.0, 1.0, symmetry=str(geometry.SymmetryType(geometry.DomainType.QUARTER_CORE, geometry.BoundaryType.PERIODIC)), ) expected = { (0, 0): {(-1, 0), (-1, -1), (0, -1)}, (1, 0): {(-1, 1), (-2, -1), (0, -2)}, (2, 1): {(-2, 2), (-3, -2), (1, -3)}, (2, 2): {(-3, 2), (-3, -3), (2, -3)}, (0, 1): {(-2, 0), (-1, -2), (1, -1)}, (-2, 2): {(-3, -2), (1, -3), (2, 1)}, } for idx, expectedEq in expected.items(): equivalents = {i for i in grid.getSymmetricEquivalents(idx)} self.assertEqual(expectedEq, equivalents) # PERIODIC, split grid = grids.CartesianGrid.fromRectangle( 1.0, 1.0, symmetry=geometry.SymmetryType( geometry.DomainType.QUARTER_CORE, geometry.BoundaryType.PERIODIC, throughCenterAssembly=True, ), ) expected = { (0, 0): set(), (1, 0): {(0, 1), (-1, 0), (0, -1)}, (2, 2): {(-2, 2), (-2, -2), (2, -2)}, (2, 1): {(-1, 2), (-2, -1), (1, -2)}, (-1, 3): {(-3, -1), (1, -3), (3, 1)}, (0, 2): {(-2, 0), (0, -2), (2, 0)}, } for idx, expectedEq in expected.items(): equivalents = {i for i in grid.getSymmetricEquivalents(idx)} self.assertEqual(expectedEq, equivalents) # REFLECTIVE, no split grid = grids.CartesianGrid.fromRectangle( 1.0, 1.0, symmetry=geometry.SymmetryType(geometry.DomainType.QUARTER_CORE, geometry.BoundaryType.REFLECTIVE), ) expected = { (0, 0): {(-1, 0), (-1, -1), (0, -1)}, (1, 0): {(-2, 0), (-2, -1), (1, -1)}, (-2, 2): {(-2, -3), (1, -3), (1, 2)}, } for idx, expectedEq in expected.items(): equivalents = {i for i in grid.getSymmetricEquivalents(idx)} self.assertEqual(expectedEq, equivalents) # REFLECTIVE, split grid = grids.CartesianGrid.fromRectangle( 1.0, 1.0, symmetry=geometry.SymmetryType( geometry.DomainType.QUARTER_CORE, geometry.BoundaryType.REFLECTIVE, throughCenterAssembly=True, ), ) expected = { (0, 0): set(), (1, 0): {(-1, 0)}, (-1, 2): {(-1, -2), (1, -2), (1, 2)}, (-2, 0): {(2, 0)}, (0, -2): {(0, 2)}, } for idx, expectedEq in expected.items(): equivalents = {i for i in grid.getSymmetricEquivalents(idx)} self.assertEqual(expectedEq, equivalents) # Full core grid = grids.CartesianGrid.fromRectangle( 1.0, 1.0, symmetry=geometry.FULL_CORE, ) self.assertEqual(grid.getSymmetricEquivalents((5, 6)), []) # 1/8 core not supported yet grid = grids.CartesianGrid.fromRectangle( 1.0, 1.0, symmetry=geometry.SymmetryType( geometry.DomainType.EIGHTH_CORE, geometry.BoundaryType.REFLECTIVE, ), ) with self.assertRaises(NotImplementedError): grid.getSymmetricEquivalents((5, 6)) class TestAxialGrid(unittest.TestCase): def test_simpleBounds(self): N_CELLS = 5 g = grids.AxialGrid.fromNCells(N_CELLS) _x, _y, z = g.getBounds() self.assertEqual(len(z), N_CELLS + 1) assert_array_equal(z, [0, 1, 2, 3, 4, 5]) self.assertTrue(g.isAxialOnly) def test_getLocations(self): N_CELLS = 10 g = grids.AxialGrid.fromNCells(N_CELLS) for count in range(N_CELLS): index = g[(0, 0, count)] x, y, z = index.getLocalCoordinates() self.assertEqual(x, 0.0) self.assertEqual(y, 0.0) self.assertEqual(z, count + 0.5) ================================================ FILE: armi/reactor/grids/thetarz.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import NoReturn import numpy as np from armi.reactor.grids.locations import IJKType, IJType from armi.reactor.grids.structuredGrid import StructuredGrid TAU = math.tau class ThetaRZGrid(StructuredGrid): """ A grid characterized by azimuthal, radial, and zeta indices. The angular meshes are limited to 0 to 2pi radians. R and Zeta are as in other meshes. See Figure 2.2 in Derstine 1984, ANL. [DIF3D]_. """ def getSymmetricEquivalents(self, indices: IJType) -> NoReturn: raise NotImplementedError(f"{self.__class__.__name__} does not support symmetric equivalents") def getRingPos(self, indices): return (indices[1] + 1, indices[0] + 1) @staticmethod def getIndicesFromRingAndPos(ring: int, pos: int) -> IJType: return (pos - 1, ring - 1) def getCoordinates(self, indices, nativeCoords=False) -> np.ndarray: meshCoords = theta, r, z = super().getCoordinates(indices, nativeCoords=nativeCoords) if not 0 <= theta <= TAU: raise ValueError("Invalid theta value: {}. Check mesh.".format(theta)) if nativeCoords: # return Theta, R, Z values directly. return meshCoords else: # return x, y ,z return np.array((r * math.cos(theta), r * math.sin(theta), z)) def indicesOfBounds( self, rad0: float, rad1: float, theta0: float, theta1: float, sigma: float = 1e-4, ) -> IJKType: """ Return indices corresponding to upper and lower radial and theta bounds. Parameters ---------- rad0 : float inner radius of control volume rad1 : float outer radius of control volume theta0 : float inner azimuthal location of control volume in radians theta1 : float inner azimuthal of control volume in radians sigma: float acceptable relative error (i.e. if one of the positions in the mesh are within this error it'll act the same if it matches a position in the mesh) Returns ------- tuple : i, j, k of given bounds """ i = int(np.abs(self._bounds[0] - theta0).argmin()) j = int(np.abs(self._bounds[1] - rad0).argmin()) return (i, j, 0) @staticmethod def locatorInDomain(*args, **kwargs) -> bool: """ ThetaRZGrids do not check for bounds, though they could if that becomes a problem. """ return True @staticmethod def getMinimumRings(n: int) -> NoReturn: raise NotImplementedError @staticmethod def getPositionsInRing(ring: int) -> NoReturn: raise NotImplementedError @staticmethod def overlapsWhichSymmetryLine(indices: IJType) -> None: return None @staticmethod def pitch() -> NoReturn: raise NotImplementedError() ================================================ FILE: armi/reactor/parameters/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" The parameters hold state info for everything in ARMI's composites structure. .. list-table:: Example Parameters :widths: 50 50 :header-rows: 1 * - Object - Parameters * - :py:class:`~armi.reactor.reactors.Reactor` - :py:mod:`Reactor Parameters <armi.reactor.reactorParameters>` * - :py:class:`~armi.reactor.assemblies.Assembly` - :py:mod:`Assembly Parameters <armi.reactor.assemblyParameters>` * - :py:class:`~armi.reactor.blocks.Block` - :py:mod:`Block Parameters <armi.reactor.blockParameters>` * - :py:class:`~armi.reactor.components.Component` - :py:mod:`Component Parameters <armi.reactor.components.componentParameters>` Basic Usage =========== Given an ARMI reactor model object such as ``r``, one may set or get a parameter just like any other instance attribute on ``r.p``:: >>> r.p.cycleLength 350.0 Alternatively, dictionary-like access is supported:: >>> r.p["cycleLength"] 350.0 .. note:: The data themselves are stored in special hidden fields, which are typically accessed through the ``Parameter`` definition that describes them. The name for such a parameter field looks like ``"_p_" + paramName``. For example, to get ``cycleLength`` one could do:: >>> r.core.p._p_cycleLength 350.0 However, it is not recommended to access parameters in this way, as it circumvents the setters and getters that may have been implemented for a given parameter. One should always use the style from the first two examples to access parameter values. Furthermore, ``ParameterCollection`` classes have some extra controls to make sure that someone doesn't try to set random extra attributes on them. Only parameters that were defined before a particular ``ParameterCollection`` class is instantiated may be accessed.The rationale behind this is documented in the Design Considerations section below. Most parameters in ARMI are block parameters. These include flux, power, temperatures, number densities, etc. Parameters can be any basic type (float, int, str), or an array of any such types. The type within a given array should be homogeneous. Examples:: >>> b.p.flux = 2.5e13 >>> b.p.fuelTemp = numpy.array(range(217), dtype=float) >>> b.p.fuelTemp[58] = 600 The parameter attributes can be access via the ``paramDefs`` property. Perhaps a user is curious about the units of a block parameter: >>> defs = b.p.paramDefs >>> defs["heightBOL"] <ParamDef name:heightBOL collectionType:BlockParameterCollection units:cm assigned:29> # Or, more simply: >>> defs["heightBOL"].units 'cm' .. note:: There have been many discussions on what the specific name of this module/system should be. After great deliberation, the definition of parameter seemed very suitable: One of a set of measurable factors, such as temperature and pressure, that define a system and determine its behavior and are varied in an experiment ~ `thefreedictionary`_ any of a set of physical properties whose values determine the characteristics or behavior of something <parameters of the atmosphere such as temperature, pressure, and density> ~ `Meriam-Webster`_ The parameters system is composed of several classes: :py:class:`~armi.reactor.parameters.parameterDefinitions.Parameter` : These store metadata about each parameter including the name, description, its units, etc. :py:class:`Parameters <parameterDefinitions.Parameter>` also define some behaviors such as setters/getters, and what to do when retrieving a value that has not been set, and whether or not to store the parameter in the database. The :py:class:`parameterDefinitions.Parameter` object implement the Python descriptor protocol (the magic behind ``@property``), and are stored on corresponding :py:class:`parameterCollections.ParameterCollection` classes to access their underlying values. :py:class:`~armi.reactor.parameters.parameterDefinitions.ParameterDefinitionCollection` : As the name suggests, these represent a collection of parameter definitions. Each :py:class:`ParameterCollection` gets a :py:class:`ParameterDefinitionCollection`, and there are also module-global collections, such as ``ALL_DEFINITIONS`` (containing all defined parameters over all ``ArmiObject`` classes), and others which break parameters down by their categories, associated composite types, etc. :py:class:`~armi.reactor.parameters.parameterDefinitions.ParameterBuilder` : These are used to aid in the creation of :py:class:`Parameter` instances, and store default arguments to the :py:class:`Parameter` constructor. :py:class:`~armi.reactor.parameters.parameterCollections.ParameterCollection` : These are used to store parameter values for a specific instance of an item in the ARMI composite structure, and have features for accessing those parameters and their definitions. The actual parameter values are stored in secret `"_p_"+paramName` fields, and accessed through the Parameter definition, which functions as a descriptor. Parameter definitions are stored as class attributes so that they can be shared amongst instances. All parameter fields are filled with an initial value in their ``__init__()`` to benefit from the split-key dictionaries introduced in PEP-412. This and protections to prevent setting any other attributes form a sort of "``__slots__`` lite". :py:class:`~armi.reactor.parameters.resolveCollections.ResolveParametersMeta` : This metaclass is used by the base ``ArmiObject`` class to aid in the creation of a hierarchy of ``ParameterCollection`` classes that appropriately represent a specific ``ArmiObject`` subclass's parameters. In short, it looks at the class attributes of an ``ArmiObject`` subclass to see if there is a ``pDefs`` attribute (which should be an instance of ``ParameterDefinitionCollection``). If the ``pDefs`` attribute exists, the class will get its own ``ParameterCollection`` class, which will itself be a subclass of the parameter collection class associated with the most immediate ancestor that also had its own ``pDefs``. If an ``ArmiObject`` subclass has not ``pDefs`` attribute of its own, it will simply be associated with the parameter collection class of its parent. This rather roundabout approach is used to address many of the design considerations laid out below. Namely that pains be taken to minimize memory consumption, properties be used to control data access, and that it be relatively difficult to introduce programming errors related to improperly-defined or colliding parameters. Design Considerations ===================== .. list-table:: Design considerations :header-rows: 1 * - Issue - Resolution/Consequences * - Metadata about parameters is necessary for determining whether a parameter should be stored in the database, and to allow the user to toggle this switch. - Parameters must uniquely named within a ``Composite`` subclass. Also, we need to have :py:class:`Parameter` classes to store this metadata. * - There should not be any naming restrictions between different ``Composite`` subclasses. - Parameters must be defined or associated with a specific ``ParameterCollection`` subclass. * - PyLint cannot find programming errors related to incorrect strings. - We would like to use methods/functions for controlling state information. This also eliminated the possibility of using resource files to define the properties, otherwise we would be mapping names between some resource file and the associated parameter/property definition. * - Creating getters and setters for every parameter would be overwhelming and unsustainable. - We will use Python descriptors, which have *most* of the functionality used in getters and setters. :py:class:`ParameterCollection` knows how to generate descriptors for itself, based on a :py:class:`ParameterDefinitionCollection`. * - The majority of memory consumption occurs in parameters, strings and dictionaries. Minimizing the storage requirements of the parameters is desirable. - Python ``__slots__`` are a language feature which eliminates the need for each class instance to have a ``__dict__``. This saves memory when there are many instances of a class. Slot access can sometimes be faster as well. In the past, ``__slots__`` were used to store parameter values. This became rather onerous when we wanted to support parameter definitions from plugins. We now use the traditional ``__dict__``, but take pains to make sure that we can get the memory savings from the key-sharing dicts provided by PEP-412. Namely, all attributes from the parameter definitions and other state are initialized to __something__ within the ``__init__()`` routine. * - Parameters are just fancy properties with meta data. - Implementing the descriptor interface on a :py:class:`Parameter` removes the need to construct a :py:class:`Parameter` without a name, then come back through with the ``applyParameters()`` method to apply the :py:class:`Parameter` as a descriptor. .. _thefreedictionary: http://www.thefreedictionary.com/parameter .. _Meriam-Webster: http://www.merriam-webster.com/dictionary/parameter """ # ruff: noqa: F401 from armi.reactor.parameters.exceptions import ( ParameterDefinitionError, ParameterError, UnknownParameterError, ) from armi.reactor.parameters.parameterCollections import ( ParameterCollection, applyAllParameters, collectPluginParameters, ) from armi.reactor.parameters.parameterDefinitions import ( ALL_DEFINITIONS, NEVER, SINCE_ANYTHING, SINCE_BACKUP, SINCE_INITIALIZATION, SINCE_LAST_DISTRIBUTE_STATE, SINCE_LAST_GEOMETRY_TRANSFORMATION, Category, NoDefault, Parameter, ParameterDefinitionCollection, ParamLocation, Serializer, ) forType = ALL_DEFINITIONS.forType inCategory = ALL_DEFINITIONS.inCategory byNameAndType = ALL_DEFINITIONS.byNameAndType resetAssignmentFlag = ALL_DEFINITIONS.resetAssignmentFlag since = ALL_DEFINITIONS.since def reset(): """Reset the status of all parameter definitions. This may become necessary when the state of the global parameter definitions becomes invalid. Typically this happens when running multiple cases for the same import of this module, e.g. in unit tests. In this case things like the assigned flags will persist across test cases, leading to strange and incorrect behavior. """ for pd in ALL_DEFINITIONS: pd.assigned = NEVER ================================================ FILE: armi/reactor/parameters/exceptions.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class ParameterDefinitionError(Exception): """Exception raised due to a programming error. Programming errors include: * Attempting to create two parameters with the same name. * Attempting to create a parameter outside of a :py:class:`ParameterFactory` ``with`` statement. """ def __init__(self, message): Exception.__init__( self, "This is a programming error, and needs to be addressed by the developer encountering it:\n" + message, ) class ParameterError(Exception): """Exception raised due to a usage error. Usage errors include: * Attempting to get the value of a parameter that has not been defined a value, and has no default. * Attempting to set the value of a parameter that cannot be set through ``setParam``. """ class UnknownParameterError(ParameterError): """Exception raised due to a usage error. Usage errors include: * Attempting to set the value of a parameter that has no definition and no rename """ ================================================ FILE: armi/reactor/parameters/parameterCollections.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import pickle import sys from typing import Any, Callable, Iterator, List, Optional, Set import numpy as np from armi import runLog from armi.reactor.parameters import exceptions, parameterDefinitions from armi.reactor.parameters.parameterDefinitions import ( NEVER, SINCE_ANYTHING, SINCE_BACKUP, SINCE_LAST_DISTRIBUTE_STATE, ) from armi.utils import units GLOBAL_SERIAL_NUM = -1 """ The serial number for all ParameterCollections This is a counter of the number of instances of all types. They are useful for tracking items through the history of a database. Warning ------- This is not MPI safe. We also have not done anything to make it thread safe, except that the GIL exists. """ def _getBaseParameterDefinitions(): pDefs = parameterDefinitions.ParameterDefinitionCollection() pDefs.add( parameterDefinitions.Parameter( "serialNum", units=units.UNITLESS, description=( "Unique serial integer for all objects in the ARMI Composite Tree. " "The numbers are only unique for a simulation, on an MPI rank." ), location=None, saveToDB=True, default=parameterDefinitions.NoDefault, setter=parameterDefinitions.NoDefault, categories=set(), ) ) return pDefs class _ParameterCollectionType(type): """ Simple metaclass to make sure that expected class attributes are present. These attributes shouldn't be shared among different subclasses, so this makes sure that each subclass gets its own. """ def __new__(mcl, name, bases, attrs): attrs["pDefs"] = attrs.get("pDefs") or None attrs["_ArmiObject"] = None attrs["_allFields"] = [] return type.__new__(mcl, name, bases, attrs) class ParameterCollection(metaclass=_ParameterCollectionType): """An empty class for holding state information in the ARMI data structure. A parameter collection stores one or more formally-defined values ("parameters"). Until a given ParameterCollection subclass has been instantiated, new parameters may be added to its parameter definitions (e.g., from plugins). Upon first instantiation, ``applyParameters()`` will be called, binding the parameter definitions to the Collection class as descriptors. It is illegal to redefine a parameter with the same name in the same class, or its subclasses, and attempting to do so should result in exceptions in ``applyParameters()``. Attributes ---------- _backup : str A pickle dump of the __getstate__, or None. _hist : dict Keys are ``(paramName, timeStep)``. assigned : int Flag indicates the synchronization state of the parameter collection. This is used to reduce the amount of information that is transmitted during database, and MPI operations as well as determine the collection's state when exiting a ``Composite.retainState``. This attribute when used with the ``Parameter.assigned`` attribute allows us to efficiently perform many operations. See Also -------- armi.reactors.parameters """ pDefs: parameterDefinitions.ParameterDefinitionCollection = _getBaseParameterDefinitions() _allFields: List[str] = [] _ArmiObject = None """The ArmiObject class that this ParameterCollection belongs to. Crucially **not** the instance that owns this collection. For any ``ArmiObject``, the following are true:: >>> self.p._ArmiObject is not self >>> isinstance(self, self.p._ArmiObject) """ # A set of all instance attributes that are settable on an instance. This prevents inadvertent # setting of values that aren't proper parameters. Named _slots, as it is used to emulate some # of the behaviors of __slots__. _slots: Set[str] = set() def __init__(self, _state: Optional[List[Any]] = None): """ Create a new ParameterCollection instance. Parameters ---------- _state: Optional list of parameter values, ordered by _allFields. Passed values should come from a call to __getstate__(). This should only be used internally to this model. """ # add a hook to make this readOnly self._slots.add("readOnly") self.readOnly = False if self.pDefs is None or not self.pDefs.locked: type(self).applyParameters() assert self.pDefs.locked, ( "It looks like parameter definitions haven't been " "set up yet for {}; be sure that applyAllParameters() is being called " "somewhere.".format(type(self)) ) self._backup = None # used by the history tracker when a parameter key is a tuple (name, timestep) self._hist = {} # Initialize all parameter values to **something**. This is crucial to getting # the split-key dictionary memory savings in lieu of using __slots__! if _state is None: for pDef in self.paramDefs: setattr(self, pDef.fieldName, pDef.default) else: for key, val in zip(self._allFields, _state): self.__dict__[key] = val self.assigned = NEVER global GLOBAL_SERIAL_NUM self.serialNum = GLOBAL_SERIAL_NUM = GLOBAL_SERIAL_NUM + 1 if self.serialNum > sys.maxsize: runLog.warning("Created serial number larger than an integer. Current serial: {}".format(GLOBAL_SERIAL_NUM)) @classmethod def applyParameters(cls): """ Apply the definitions from a ParameterDefinitionCollection as properties. This places the parameter definitions in the associated ParameterDefinitionCollection onto this ParameterCollection class as class attributes. In the process it recursively calls the same method on base classes, and adds their parameter definitions as well. Since each instance of Parameter implements the descriptor protocol, these are effectively behaving as ``@property``-style accessors. This function must act on each ParameterCollection subclass before the first instance is created. Subsequent calls will short-circuit. Before calling this method, it is possible to add more Parameters to the associated ParameterDefinitionCollection, ``cls.pDefs``. After calling this method, the ParameterDefinitionCollection will be locked, preventing any further additions. This method is called in the ``__init__()`` method, but can also be called proactively to compile the parameter definitions earlier, if desired. See Also -------- armi.reactor.parameters.parameterDefinitions.ParameterDefinitionCollection """ if bool(cls._allFields): # Short-circuit if this has already been done return # Ensure that we have at least something to start with cls.pDefs = cls.pDefs or parameterDefinitions.ParameterDefinitionCollection() # Collect definitions from base ParameterCollection classes. E.g., # HelixParameterCollection also gets parameter definitions from # ComponentParameterCollection. if not cls.pDefs.locked: basePDefs = parameterDefinitions.ParameterDefinitionCollection() for base in [b for b in cls.__bases__ if issubclass(b, ParameterCollection)]: base.applyParameters() if base.pDefs is not None: basePDefs.extend(base.pDefs) # Check for duplicate parameter definitions seen = set() duplicates = set() for name in cls.pDefs.names: if name in seen: duplicates.add(name) seen.add(name) if duplicates: raise exceptions.ParameterDefinitionError( "The following parameters were multiply-defined:\n {}".format(duplicates) ) overriddenParameters = set(cls.pDefs.names).intersection(set(basePDefs.names)) if overriddenParameters: raise exceptions.ParameterDefinitionError( "The following parameters " "have been redefined in a subclass: {}\n" "current type: {}\n" "bases: {}".format(overriddenParameters, cls, cls.__bases__) ) # Bind the parameter definitions as descriptors to the collection for pd in cls.pDefs: pd.collectionType = cls setattr(cls, pd.name, pd) parameterDefinitions.ALL_DEFINITIONS.add(pd) cls.pDefs.extend(basePDefs) # prevent the addition of new parameter definitions. This will lead to errors # early, rather than mysterious attribute access errors later. cls.pDefs.lock() cls._allFields = list(sorted(["_backup", "_hist", "assigned"] + [pd.fieldName for pd in cls.pDefs])) cls._slots = set(cls._allFields).union({pd.name for pd in cls.pDefs}) cls._slots.add("readOnly") def __repr__(self): return "<{} assigned:{}>".format(self.__class__.__name__, self.assigned) def __setattr__(self, key, value): assert key in self._slots, "Trying to set undefined attribute `{}` on a ParameterCollection!".format(key) if getattr(self, "readOnly", False): if key == "readOnly": raise RuntimeError("A read-only Parameter Collection cannot be made writeable.") else: raise RuntimeError(f"Cannot set a read-only parameter {key}.") object.__setattr__(self, key, value) def __deepcopy__(self, memo): """ Returns a new instance of ParameterCollection with a new ``serialNum``. Notes ----- This operates under the assumption that ``__deepcopy__`` is used when needing a new instance, which should get its own serial number. This follows from the assumption that parameter collections are typically copied when copying an ArmiObject to which it may belong. In this case, serialNum needs to be incremented so that the objects are unique. serialNum is special. """ # Grabbing state first and passing it into __init__() as a performance # optimization. This avoids the extra work in __init__() of defaulting all of # the parameters, only to set them in __setstate__(). Instead we pass them in, # so that __init__() can set them. state = copy.deepcopy(self.__getstate__(), memo) memo[id(self)] = newPC = self.__class__(_state=state) return newPC def __reduce__(self): """ Implement pickle __reduce__ protocol. We need to do this because most subclasses of ParameterCollection are created from a metaclass, and are therefore not top-level objects and not trivially picklable. This implementation works by asking the ArmiObject itself to give an instance of its associated ParameterCollection class, then setting its state. """ assert type(self)._ArmiObject is not None, ( "Cannot reduce {}, since it does not have an associated ArmiObject, and is " "therefore not tied to the world of the living.".format(type(self)) ) return type(self)._ArmiObject.getParameterCollection, (), self.__getstate__() def __getstate__(self): # reduce data to one giant list, ordered by _allFields (sorted). Use NoDefault # when a value is missing data = [getattr(self, fieldName, parameterDefinitions.NoDefault) for fieldName in self._allFields] return data def __setstate__(self, state): # does the reverse of __getstate__ for key, val in zip(self._allFields, state): setattr(self, key, val) def __getitem__(self, name): try: return getattr(self, name) except TypeError: # allows for history parameter tuples return self._hist[name] except AttributeError: raise exceptions.UnknownParameterError("Parameter {} is not defined for {}".format(name, type(self))) def __setitem__(self, name, value): try: setattr(self, name, value) except TypeError: # allows for history parameter tuples if isinstance(name, tuple): self._hist[name] = value else: raise except AttributeError: # for clarity raise exceptions.UnknownParameterError( "Cannot locate definition for parameter {} in {}".format(name, type(self)) ) def __delitem__(self, name): if isinstance(name, str): pd = self.paramDefs[name] if hasattr(self, pd.fieldName): pd.assigned = SINCE_ANYTHING delattr(self, pd.fieldName) else: del self._hist[name] def __contains__(self, name): if isinstance(name, str): return hasattr(self, "_p_" + name) else: return name in self._hist def __eq__(self, other: "ParameterCollection"): if not isinstance(other, self.__class__): return False for pd in self.paramDefs: fieldName = pd.fieldName haveValue = (hasattr(self, fieldName), hasattr(other, fieldName)) if all(haveValue): if getattr(self, fieldName) != getattr(self, fieldName): return False elif any(haveValue): return False return True def __iter__(self) -> Iterator[str]: """Iterate over names of assigned parameters define on this collection.""" return ( pd.name for pd in self.paramDefs if pd.assigned != NEVER and getattr(self, pd.fieldName) is not parameterDefinitions.NoDefault ) def items(self): keys = list(iter(self)) return zip(keys, (getattr(self, key) for key in keys)) def get(self, key, default=None): """Return a requested parameter value, if possible. This functions similarly to the same method on a dict or similar. If there is a value present for the requested parameter on this parameter collection, return it. Otherwise, return the supplied default. The main reason for using this is for safely attempting to access a parameter that doesn't have a default value, and may not have been set. Other methods for accessing parameters would raise an exception. """ try: return self[key] except exceptions.ParameterError: return default def keys(self): return list(iter(self)) + list(self._hist.keys()) def values(self): paramVals = list(getattr(self, pd.fieldName) for pd in self.paramDefs if hasattr(self, pd.fieldName)) return paramVals + list(self._hist.values()) def update(self, someDict): for k, val in someDict.items(): self[k] = val @property def paramDefs(self) -> parameterDefinitions.ParameterDefinitionCollection: r""" Get the :py:class:`ParameterDefinitionCollection` associated with this instance. This serves as both an alias for the pDefs class attribute, and as a read-only accessor for them. Most non-parameter-system related interactions with an object's ``ParameterCollection`` should go through this. In the future, it probably makes sense to make the ``pDefs`` that the ``applyDefinitions`` and ``ResolveParametersMeta`` things are sensitive to more hidden from outside the parameter system. """ return type(self).pDefs def getSyncData(self): """ Get all changed parameters SINCE_LAST_DISTRIBUTE_STATE (or ``syncMpiState``). If this ParmaterCollection (proxy for a ``Composite``) has been modified ``SINCE_LAST_DISTRIBUTE_STATE``, this will return a dictionary of parameter name keys and values, otherwise ``None``. """ if self.assigned & SINCE_LAST_DISTRIBUTE_STATE: syncData = { paramDef.name: getattr(self, paramDef.fieldName) for paramDef in self.paramDefs if paramDef.assigned & SINCE_LAST_DISTRIBUTE_STATE and paramDef.name in self } return syncData return None def backUp(self): """Back up the state in a Pickle.""" try: self._backup = pickle.dumps(self.__getstate__()) # this reads as assigned & everything_but(SINCE_BACKUP) self.assigned &= ~SINCE_BACKUP except: runLog.error("Attempted to pickle {}.".format(self)) raise def restoreBackup(self, paramsToApply): """Restore the backed up the state in a from a pickle. Parameters ---------- paramsToApply : list of ParmeterDefinitions restores the state of all parameters not in `paramsToApply` """ currentData = dict() if self.assigned & SINCE_BACKUP: compParams = (pd for pd in paramsToApply.intersection(set(self.paramDefs))) currentData = {pd: getattr(self, pd.fieldName) for pd in compParams if hasattr(self, pd.fieldName)} self.__setstate__(pickle.loads(self._backup)) for pd, currentValue in currentData.items(): # correct for global paramDef.assigned assumption retainedValue = getattr(self, pd.fieldName) if isinstance(retainedValue, np.ndarray) or isinstance(currentValue, np.ndarray): if (retainedValue != currentValue).any(): setattr(self, pd.fieldName, currentValue) pd.assigned = SINCE_ANYTHING self.assigned = SINCE_ANYTHING elif retainedValue != currentValue: setattr(self, pd.fieldName, currentValue) pd.assigned = SINCE_ANYTHING self.assigned = SINCE_ANYTHING def where(self, f: Callable[[parameterDefinitions.Parameter], bool]) -> Iterator[parameterDefinitions.Parameter]: """Produce an iterator over parameters that meet some criteria. Parameters ---------- f : callable function f(parameter) -> bool Function to check if a parameter should be fetched during the iteration. Returns ------- iterator of :class:`armi.reactor.parameters.Parameter` Iterator, **not** list or tuple, that produces each parameter that meets ``f(parameter) == True``. Examples -------- >>> block = r.core[0][0] >>> pdef = block.p.paramDefs >>> for param in pdef.where(lambda pd: pd.atLocation(ParamLocation.EDGES)): ... print(param.name, block.p[param.name]) """ return filter(f, self.paramDefs) def collectPluginParameters(pm): """Apply parameters from plugins to their respective object classes.""" for pluginParamDefnCollections in pm.hook.defineParameters(): for klass, pDefs in pluginParamDefnCollections.items(): klass.pDefs.extend(pDefs) def applyAllParameters(klass=None): klass = klass or ParameterCollection klass.applyParameters() for derived in klass.__subclasses__(): applyAllParameters(derived) ================================================ FILE: armi/reactor/parameters/parameterDefinitions.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" This module contains the code necessary to represent parameter definitions. ``ParameterDefinition``\ s are the metadata that describe specific parameters, and aid in enforcing certain rules upon the parameters themselves and the parameter collections that contain them. This module also describes the ``ParameterDefinitionCollection`` class, which serves as a specialized container to manage related parameter definitions. See Also -------- armi.reactor.parameters """ import enum import functools import re from typing import Any, Dict, Optional, Sequence, Tuple, Type import numpy as np from armi.reactor.flags import Flags from armi.reactor.parameters.exceptions import ParameterDefinitionError, ParameterError # bitwise masks for high-speed operations on the `assigned` attribute # see: https://web.archive.org/web/20120225043338/http://www.vipan.com/htdocs/bitwisehelp.html # Note that the various operations are responsible for clearing the flags on the events. # These should be interpreted as: # The Parameter or ParameterCollection has been modified SINCE_<time-description> # In order for that to happen, the flags need to be cleared when the <time-description> begins. SINCE_INITIALIZATION = 1 SINCE_LAST_DISTRIBUTE_STATE = 4 SINCE_LAST_GEOMETRY_TRANSFORMATION = 8 SINCE_BACKUP = 16 SINCE_ANYTHING = SINCE_LAST_DISTRIBUTE_STATE | SINCE_INITIALIZATION | SINCE_LAST_GEOMETRY_TRANSFORMATION | SINCE_BACKUP NEVER = 32 class Category: """ A "namespace" for storing parameter categories. Notes ----- * `cumulative` parameters are accumulated over many time steps * `pinQuantities` parameters are defined on the pin level within a block * `multiGroupQuantities` parameters have group dependence (often a 1D numpy array) * `fluxQuantities` parameters are related to neutron or gamma flux * `neutronics` parameters are calculated in a neutronics global flux solve * `gamma` parameters are calculated in a fixed-source gamma solve * `detailedAxialExpansion` parameters are marked as such so that they are mapped from the uniform mesh back to the non-uniform mesh * `reactivity coefficients` parameters are related to reactivity coefficient or kinetics parameters for kinetics solutions * `thermal hydraulics` parameters come from a thermal hydraulics physics plugin (e.g., flow rates, temperatures, etc.) """ depletion = "depletion" cumulative = "cumulative" cumulativeOverCycle = "cumulative over cycle" assignInBlueprints = "assign in blueprints" retainOnReplacement = "retain on replacement" pinQuantities = "pinQuantities" fluxQuantities = "fluxQuantities" multiGroupQuantities = "multi-group quantities" neutronics = "neutronics" gamma = "gamma" detailedAxialExpansion = "detailedAxialExpansion" reactivityCoefficients = "reactivity coefficients" thermalHydraulics = "thermal hydraulics" class ParamLocation(enum.Flag): """Represents the point on which a parameter is physically meaningful.""" TOP = 1 CENTROID = 2 BOTTOM = 4 AVERAGE = 10 # 2 + 8 MAX = 16 CORNERS = 32 EDGES = 64 VOLUME_INTEGRATED = 128 CHILDREN = 256 # on some child of a composite, like a pin NA = 512 # no location class NoDefault: """Class used to allow distinction between not setting a default and setting a default of ``None``. """ def __init__(self): raise NotImplementedError("You cannot create an instance of NoDefault") class _Undefined: """Class used to identify a parameter property as being in the undefined state.""" def __init__(self): raise NotImplementedError("You cannot create an instance of _Undefined.") class Serializer: r""" Abstract class describing serialize/deserialize operations for Parameter data. Parameters need to be stored to and read from database files. This currently requires that the Parameter data be converted to a numpy array of a datatype supported by the ``h5py`` package. Some parameters may contain data that are not trivially representable in numpy/HDF5, and need special treatment. Subclassing ``Serializer`` and setting it as a ``Parameter``\ s ``serializer`` allows for special operations to be performed on the parameter values as they are stored to the database or read back in. The ``Database`` already knows how to handle certain cases where the data are not straightforward to get into a numpy array, such as when: - There are ``None``\ s. - The dimensions of the values stored on each object are inconsistent (e.g., "jagged" arrays) So, in these cases, a Serializer is not needed. Serializers are necessary for when the actual data need to be converted to a native data type (e.g., int, float, etc). For example, we use a Serializer to handle writing ``Flags`` to the Database, as they tend to be too big to fit into a system-native integer. .. important:: Defining a Serializer for a Parameter in part defines the underlying representation of the data within a database file; the data stored in a database are sensitive to the code that wrote them. Changing the method that a Serializer uses to pack or unpack data may break compatibility with old database files. Therefore, Serializers should be diligent about signaling changes by updating their version. It is also good practice, whenever possible, to support reading old versions so that database files written by old versions can still be read. .. impl:: Users can define custom parameter serializers. :id: I_ARMI_PARAM_SERIALIZE :implements: R_ARMI_PARAM_SERIALIZE Important physical parameters are stored in every ARMI object. These parameters represent the plant's state during execution of the model. Currently, this requires that the parameters be serializable to a numpy array of a datatype supported by the ``h5py`` package so that the data can be written to, and subsequently read from, an HDF5 file. This class allows for these parameters to be serialized in a custom manner by providing interfaces for packing and unpacking parameter data. The user or downstream plugin is able to specify how data is serialized if that data is not naturally serializable. See Also -------- armi.bookkeeping.db.database.packSpecialData armi.bookkeeping.db.database.unpackSpecialData armi.reactor.flags.FlagSerializer """ # This will accompany the packed data as an attribute when written, and will be provided to the # unpack() method when reading. If the underlying format of the data changes, make sure to # change this. version: Optional[str] = None @staticmethod def pack(data: Sequence[any]) -> Tuple[np.ndarray, Dict[str, any]]: """ Given unpacked data, return packed data and a dictionary of attributes needed to unpack it. This should perform the fundamental packing operation, returning the packed data and any metadata ("attributes") that would be necessary to unpack the data. The class's version is always stored, so no need to provide it as an attribute. See Also -------- armi.reactor.flags.FlagSerializer.pack """ raise NotImplementedError() @classmethod def unpack(cls, data: np.ndarray, version: Any, attrs: Dict[str, any]) -> Sequence[any]: """Given packed data and attributes, return the unpacked data.""" raise NotImplementedError() def isNumpyArray(paramStr): """Helper meta-function to create a method that sets a Parameter value to a NumPy array. Parameters ---------- paramStr : str Name of the Parameter we want to set. Returns ------- function A setter method on the Parameter class to force the value to be a NumPy array. """ def setParameter(selfObj, value): if value is None or isinstance(value, np.ndarray): setattr(selfObj, "_p_" + paramStr, value) else: setattr(selfObj, "_p_" + paramStr, np.array(value)) return setParameter def isNumpyF32Array(paramStr: str): """Helper meta-function to create a method that sets a Parameter value to a 32 bit float NumPy array. Parameters ---------- paramStr Name of the Parameter we want to set. Returns ------- function A setter method on the Parameter class to force the value to be a 32 bit NumPy array. """ def setParameter(selfObj, value): if value is None: # allow default of None to exist setattr(selfObj, "_p_" + paramStr, value) else: # force to 32 bit setattr(selfObj, "_p_" + paramStr, np.array(value, dtype=np.float32)) return setParameter @functools.total_ordering class Parameter: """Metadata about a specific parameter.""" _validName = re.compile("^[a-zA-Z0-9_]+$") # Using slots because Parameters are pretty static and mostly POD. __slots__ make this official, # and offer some performance benefits in memory (not too important; there aren't that many # instances of Parameter to begin with) and attribute access time (more important, since we need # to go through Parameter objects to get to a specific parameter's value in a # ParameterCollection) __slots__ = ( "name", "fieldName", "collectionType", "location", "saveToDB", "serializer", "units", "default", "_getter", "_setter", "description", "categories", "assigned", "_backup", ) def __init__( self, name, units, description, location, saveToDB, default, setter, categories, serializer: Optional[Type[Serializer]] = None, ): # nonsensical to have a serializer with no intention of saving to DB assert not (serializer is not None and not saveToDB) assert serializer is None or saveToDB assert self._validName.match(name), "{} is not a valid param name".format(name) assert len(description), f"Parameter {name} defined without description." self.collectionType = _Undefined self.name = name self.fieldName = "_p_" + name self.location = location self.saveToDB = saveToDB self.serializer = serializer self.description = description self.units = units self.default = default self.categories = categories self.assigned = NEVER self._backup = None if self.default is not NoDefault: def paramGetter(p_self): return getattr(p_self, self.fieldName, self.default) else: def paramGetter(p_self): value = getattr(p_self, self.fieldName) if value is NoDefault: raise ParameterError( "Cannot get value for parameter `{}` in `{}` as no default has been " "defined, and no value has been assigned.".format(self.name, type(p_self)) ) return value self._getter = paramGetter self._setter = None # actually, it gets assigned with this: self.setter(setter) def __repr__(self): return "<ParamDef name:{} collectionType:{} units:{} assigned:{}>".format( self.name, self.collectionType.__name__, self.units, self.assigned ) def __eq__(self, other): """Name defines equality.""" return self.name == other.name def __ne__(self, other): return not (self == other) def __lt__(self, other): """Sort alphabetically by name.""" return self.name < other.name def __hash__(self): return hash(self.name) + id(self) def __setstate__(self, state): self._backup = state[0] # a tuple of 1 element. def __set__(self, obj, val): """This is a property setter, see Python documentation for "descriptor".""" self._setter(obj, val) def __get__(self, obj, cls=None): """This is a property getter, see Python documentation for "descriptor". Notes ----- We do not check to see if ``cls != None``. This is an optimization choice, that someone may deem unnecessary. As a result, unlike Python's ``property`` class, a subclass cannot override the getter method. """ return self._getter(obj) def setter(self, setter): """Decorator method for assigning setter. .. impl:: Provide a way to signal if a parameter needs updating across processes. :id: I_ARMI_PARAM_PARALLEL :implements: R_ARMI_PARAM_PARALLEL Parameters need to be handled properly during parallel code execution. This includes notifying processes if a parameter has been updated by another process. This method allows for setting a parameter's value as well as an attribute that signals whether this parameter has been updated. Future processes will be able to query this attribute so that the parameter's status is properly communicated. Notes ----- Unlike the traditional Python ``property`` class, this does not return a new instance of a ``Parameter``; therefore it cannot be reassigned in the same way that a Python ``property`` can be. Examples -------- >>> class MyParameterCollection(parameters.ParameterCollection): ... mass = parameters.Parameter(...) ... ... @mass.setter ... def mass(self, value): ... if value < 0: ... raise ValueError("Negative mass is not possible, consider a diet.") ... self._p_speed = value """ if setter is NoDefault: def paramSetter(p_self, value): self.assigned = SINCE_ANYTHING p_self.assigned = SINCE_ANYTHING setattr(p_self, self.fieldName, value) elif setter is None: def paramSetter(p_self, value): raise ParameterError( "Cannot set value for parameter `{}` on {} to `{}`, it has a restricted setter.".format( self.name, p_self, value ) ) elif callable(setter): def paramSetter(p_self, value): self.assigned = SINCE_ANYTHING p_self.assigned = SINCE_ANYTHING setter(p_self, value) else: raise ParameterDefinitionError( "The setter for parameter `{}` must be callable. Setter attribute: {}".format(self.name, setter) ) self._setter = paramSetter return self def backUp(self): """Back up the assigned state.""" self._backup = (self._backup, self.assigned) def restoreBackup(self, paramsToApply): """Restore the backed up state.""" if self in paramsToApply: # retain self.assigned if self in a category self._backup, _assigned = self._backup else: self._backup, self.assigned = self._backup def atLocation(self, loc): """True if parameter is defined at location.""" return self.location and self.location & loc def hasCategory(self, category: str) -> bool: """True if a parameter has a specific category.""" return category in self.categories class ParameterDefinitionCollection: """ A very specialized container for managing parameter definitions. Notes ----- ``_representedTypes`` is used to detect if this ``ParameterDefinitionCollection`` contains definitions for only one type. If the collection only exists for 1 type, the lookup (``__getitem__``) can short circuit O(n) logic for O(1) dictionary lookup. """ # Slots are not being used here as an attempt at optimization. Rather, they serve to add some # needed rigidity to the parameter system. __slots__ = ("_paramDefs", "_paramDefDict", "_representedTypes", "_locked") def __init__(self): self._paramDefs = list() self._paramDefDict = dict() self._representedTypes = set() self._locked = False def __iter__(self): return iter(self._paramDefs) def __len__(self): return len(self._paramDefs) def __getitem__(self, name): """Get a parameter by name. Notes ----- This method might break if the collection is for multiple composite types, and there exists a parameter with the same name in multiple types. """ # O(1) lookup if there is only 1 type, could still raise a KeyError if len(self._representedTypes) == 1: return self._paramDefDict[name, next(iter(self._representedTypes))] # "matches" only checks for the same name, while the add method checks both name and # collectionType matches = [pd for pd in self if pd.name == name] if len(matches) != 1: raise KeyError( "Too {} parameters with the name `{}`. Matches:\n{}".format( "many" if len(matches) > 1 else "few", name, "\n".join(str(pd) for pd in matches), ) ) return matches[0] def add(self, paramDef): """Add a :py:class:`Parameter` to this collection.""" assert not self._locked, "This ParameterDefinitionCollection has been locked." self._paramDefs.append(paramDef) self._paramDefDict[paramDef.name, paramDef.collectionType] = paramDef self._representedTypes.add(paramDef.collectionType) def _filter(self, filterFunc): pdc = ParameterDefinitionCollection() pdc.extend(filter(filterFunc, self._paramDefs)) return pdc def items(self): return self._paramDefDict.items() def extend(self, other): """Grow a parameter definition collection by another parameter definition collection.""" assert not self._locked, "This ParameterDefinitionCollection ({}) has been locked.".format( self._representedTypes ) assert self is not other if other is None: raise ValueError( f"Cannot extend {self} with `None`. Ensure return value of parameter definitions returns something." ) for pd in other: self.add(pd) def inCategory(self, categoryName): """ Create a :py:class:`ParameterDefinitionCollection` that contains definitions that are in a specific category. """ return self._filter(lambda pd: categoryName in pd.categories) def atLocation(self, paramLoc): """ Make a param definition collection with all defs defined at a specific location. Parameters can be defined at various locations within their container based on :py:class:`ParamLocation`. This allows selection by those values. """ return self._filter(lambda pd: pd.atLocation(paramLoc)) def since(self, mask): """ Create a :py:class:`ParameterDefinitionCollection` that contains definitions that have been modified since a specific set of actions. """ return self._filter(lambda pd: pd.assigned & mask) def unchanged_since(self, mask): """ Create a :py:class:`ParameterDefinitionCollection` that contains definitions that have not been modified since a specific set of actions. This is the complementary set of the collection returned by `since`. """ return self._filter(lambda pd: not (pd.assigned & mask)) def forType(self, compositeType): """ Create a :py:class:`ParameterDefinitionCollection` that contains definitions for a specific composite type. """ return self._filter(lambda pd: issubclass(compositeType.paramCollectionType, pd.collectionType)) def resetAssignmentFlag(self, mask): """ Clear the `assigned` flag for a certain operation on all parameters. These flags will get set by the param definition setters if they get changed again. Notes ----- See http://www.vipan.com/htdocs/bitwisehelp.html to understand the bitwise operations """ for pd in self._paramDefs: pd.assigned &= ~mask def setAssignmentFlag(self, mask): for pd in self._paramDefs: pd.assigned |= mask def byNameAndType(self, name, compositeType): """Get a :py:class:`Parameter` by compositeType and name.""" return self._paramDefDict[name, compositeType.paramCollectionType] def byNameAndCollectionType(self, name, collectionType): """Get a :py:class:`Parameter` by collectionType and name.""" return self._paramDefDict[name, collectionType] @property def categories(self): """Get the categories of all the :py:class:`~Parameter` instances within this collection.""" categories = set() for paramDef in self: categories |= paramDef.categories return categories @property def names(self): return [pd.name for pd in self] def lock(self): self._locked = True @property def locked(self): return self._locked def toWriteToDB(self, assignedMask: Optional[int] = None): """ Get a list of acceptable parameters to store to the database for a level of the data model. .. impl:: Filter parameters to write to DB. :id: I_ARMI_PARAM_DB :implements: R_ARMI_PARAM_DB This method is called when writing the parameters to the database file. It queries the parameter's ``saveToDB`` attribute to ensure that this parameter is desired for saving to the database file. It returns a list of parameters that should be included in the database write operation. Parameters ---------- assignedMask : int A bitmask to down-filter which params to use based on how "stale" they are. """ mask = assignedMask or SINCE_ANYTHING return [p for p in self if p.saveToDB and p.assigned & mask] def createBuilder(self, *args, **kwargs): """ Create an associated object that can create definitions into this collection. Using the returned ParameterBuilder will add all defined parameters to this ParameterDefinitionCollection, using the passed arguments as defaults. Arguments should be valid arguments to ``ParameterBuilder.__init__()`` """ paramBuilder = ParameterBuilder(*args, **kwargs) paramBuilder.associateParameterDefinitionCollection(self) return paramBuilder class ParameterBuilder: """Factory for creating Parameter and parameter properties.""" def __init__( self, location=ParamLocation.AVERAGE, default=NoDefault, categories=None, saveToDB=True, ): """Create a :py:class:`ParameterBuilder`.""" self._entered = False self._defaultLocation = location self._defaultCategories = set(categories or []) # make sure it is always a set self._defaultValue = default self._assertDefaultIsProperType(default) self._saveToDB = saveToDB self._paramDefs = None def __enter__(self): self._entered = True return self def __exit__(self, exc_type, exc_value, tracebac): if exc_type is not None: # allow exceptions to be raised normally, to prevent confusing stack traces return self._entered = False @staticmethod def _assertDefaultIsProperType(default): if default in (NoDefault, None) or isinstance(default, (int, str, float, bool, Flags)): return raise AssertionError( "Cannot specify a default mutable type ({}) value to a parameter; all instances would " "share the same list.".format(type(default)) ) def associateParameterDefinitionCollection(self, paramDefs): """ Associate this parameter factory with a specific ParameterDefinitionCollection. Subsequent calls to defParam will automatically add the created ParameterDefinitions to this ParameterDefinitionCollection. This results in a cleaner syntax when defining many ParameterDefinitions. """ self._paramDefs = paramDefs def defParam( self, name, units, description, location=None, saveToDB=NoDefault, default=NoDefault, setter=NoDefault, categories=None, serializer: Optional[Type[Serializer]] = None, ): r"""Create a parameter as a property (with get/set) on a class. Parameters ---------- name: str the official name of the parameter units: str string representation of the units description: str a brief, but precise-as-possible description of what the parameter is used for. location: str string representation of the location the attribute is applicable to, such as average, max, etc. saveToDB: bool indicator as to whether the parameter should be written to the database. The actual default is defined by the :py:class:`ParameterBuilder`, and is :code:`True`. default: immutable type a default value for this parameter which must be an immutable type. If the type is mutable, e.g. a list, dict, an exception should be raised, or unknown behavior. setter: None or callable If ``None``, there is no direct way to set the parameter. If some other callable method, (which may have the same name as the property!) then the setter method is used instead. categories: List of str A list of categories to which this Parameter should belong. Categories are typically used to engage special treatment for certain Parameters. serializer: Optional subclass of Serializer A class describing how the parameter data should be stored to the database. This is usually only needed in exceptional cases where it is difficult to store a parameter in a numpy array. Notes ----- It is not possible to initialize the parameter on the class this method would be used on, because there is no instance (i.e. self) when this method is run. However, this method could access a globally available set of definitions, if one existed. """ self._assertDefaultIsProperType(default) if location is None and self._defaultLocation is None: raise ParameterDefinitionError( "The default location is not specified for {}; a parameter-specific location is required.".format(self) ) paramDef = Parameter( name, units=units, description=description, location=location or self._defaultLocation, saveToDB=saveToDB if saveToDB is not NoDefault else self._saveToDB, default=default if default is not NoDefault else self._defaultValue, setter=setter, categories=set(categories or []).union(self._defaultCategories), serializer=serializer, ) if self._paramDefs is not None: self._paramDefs.add(paramDef) return paramDef # Container for all parameter definition collections that have been bound to an ArmiObject or # subclass. These are added from the applyParameters() method on the ParameterCollection class. ALL_DEFINITIONS = ParameterDefinitionCollection() ================================================ FILE: armi/reactor/parameters/resolveCollections.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains the magic that makes the Parameter system and ARMI composite model play nicely together. The contained metaclass is useful for maintaining a hierarchy of ``ParameterCollection`` classes, which mimic the hierarchy of ``ArmiObject`` s to which they apply. Some ``ArmiObject`` subclasses define their own parameters, while others do not, so we do not want to blindly create a ``ParameterCollectionClass`` for each ``ArmiObject`` subclass. Instead, we want to be able to skip generations when no additional parameters were requested for that level. For instance if we have a hierarchy like: ``ArmiObject`` <- ``A`` <- ``B``, where ``ArmiObject`` and ``B`` define parameters, while ``A`` does not define any parameters of its own, we want to have a ``ArmiObjectParameterCollection`` and a ``BParameterCollection`` (with ``BParameterCollection`` being a subclass of ``ArmiObjectParameterCollection``). ``ArmiObject`` and ``A`` will both *share* the ``ArmiObjectParameterCollection``, while ``B`` will use ``BParameterCollection``. ``BParameterCollection`` will contain all of the parameters defined in ``ArmiObjectParameterCollection``, plus whatever additional parameters were defined on ``B``. The above scenario should behave rather intuitively for someone used to classes and inheritance, but maintaining this hierarchy by hand would be onerous and error-prone. What if one day we decide to add some parameters to ``A``? We need to remember to add a new class for its parameters, and make sure to make ``BParameterCollection`` a subclass of that new ``ParameterCollection`` class. With the below metaclass, we needn't worry ourselves with any of that; it is taken care of automatically. If you want to know how the sausage is made, the ``ResolveParametersMeta`` metaclass is responsible for forming a hierarchy of ``ParameterCollection`` classes that correspond to the related hierarchy of classes inheriting the root ``ArmiObject`` class. It should be rare for an ARMI developer not engaged directly with Framework development to need to know exactly how this works, but a proficient ARMI developer must keep in mind the following rules about how this system behaves in practice: * When defining subclasses of ``ArmiObject``, defining a class attribute called ``pDefs`` of the ``ParameterDefinitionCollection`` type signals to the system that this is a *Parameter Class*. * When defining a *Parameter Class*, it will trigger the creation of a new ``ParameterCollection`` class, which will be derived from the ``ParameterCollection`` class of the most immediate *Parameter Class* ancestor the new class's inheritance tree. * All classes derived from ``ArmiObject`` will receive an associated subclass of ``ParameterCollection``, which will ultimately include all of the relevant Parameters for that class. The specific class is the ``ParameterCollection`` subclass defined for the most immediate *Parameter Class* in the classes inheritance tree. * Parameter definitions can be added to a *Parameter Class*'s ``pDefs`` until Parameters have been "compiled" for it. After compiling parameters, the ``pDefs`` are locked, and any attempts at defining additional parameters will cause an error. * ``ArmiObject`` s cannot be instantiated until after parameters have been compiled. """ from armi.reactor.parameters.parameterCollections import ParameterCollection from armi.reactor.parameters.parameterDefinitions import ParameterDefinitionCollection class ResolveParametersMeta(type): """Metaclass for automatically defining associated ParameterCollection classes. Any class invoking this metaclass will automatically create an associated sub-class of the ``ParameterCollection`` type, if it has a class attribute called ``pDefs`` that is an instance of ``ParameterDefinitionCollection``. This new class will itself be a subclass of the ``ParameterCollection`` class that is associated with the invoking class's parent. If no ``pDefs`` class attribute is present, the invoking class will adopt the ``ParameterCollection`` class associated with it's parent, or ``None`` if it cannot find one. The associated ``ParameterCollection`` will be stored on the new class's ``paramCollectionType`` attribute. For example, when this metaclass is applied to the ``Block`` class it will create a new class named ``BlockParameterCollection``, and add it as a class attribute called ``Block.paramCollectionType``. The ``BlockParameterCollection`` class will itself be a subclass of ``ArmiObjectParameterCollection``, which it would have found from the ``Composite`` class from which the ``Block`` class inherits. The ``Composite`` class, on the other hand, would have obtained the ``ArmiObjectParameterCollection`` from it's parent (``ArmiObject``), since it does not have a ``pDefs`` attribute of its own. """ def __new__(mcl, name, bases, attrs): assert attrs.get("paramCollectionType") is None, "{} already has parameter collection".format(name) baseCollections = [b.paramCollectionType for b in bases if hasattr(b, "paramCollectionType")] # Make sure that these are what we expect them to be assert all([issubclass(c, ParameterCollection) for c in baseCollections if c is not None]) # Pull out the one element of the list if it exists inferredBaseCollection = next(iter(baseCollections), None) # pDefs can be defined in the class definition; if it is, this is is a Parameter # Class! pDefs = attrs.get("pDefs") makeNewPC = pDefs is not None if makeNewPC: # We may have our own parameters, so we need to spin up a new # XParameterCollection class to store them. assert isinstance(pDefs, ParameterDefinitionCollection) collectionName = name + "ParameterCollection" collectionBase = inferredBaseCollection or ParameterCollection # Note that we also give a reference to the pDefs to the parameter # collection. This is so that the ParmameterCollection hierarchy can do all # of the parameter definitions work, while plugins can associate definitions # with the ArmiObjects paramCollectionType = type( collectionName, (collectionBase,), { "pDefs": pDefs, }, ) else: # We will not be defining our own parameters, so we will defer to to those # of our parent classes if they have any paramCollectionType = inferredBaseCollection attrs["paramCollectionType"] = paramCollectionType nt = type.__new__(mcl, name, bases, attrs) if makeNewPC: paramCollectionType._ArmiObject = nt return nt ================================================ FILE: armi/reactor/reactorParameters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Reactor parameter definitions.""" from armi.reactor import parameters from armi.reactor.parameters import ParamLocation from armi.utils import units def defineReactorParameters(): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0) as pb: pb.defParam( "cycle", units=units.UNITLESS, description="Current cycle of the simulation (integer)", default=0, ) pb.defParam( "cycleLength", units=units.DAYS, description="Length of the cycle, including outage time described by availabilityFactor", ) pb.defParam("stepLength", units=units.DAYS, description="Length of current step") pb.defParam( "availabilityFactor", units=units.UNITLESS, description="Availability factor of the plant. This is the fraction of the time that " "the plant is operating.", default=1.0, ) pb.defParam( "capacityFactor", units=units.UNITLESS, description="The fraction of power produced by the plant this cycle over the " "full-power, 100% uptime potential of the plant.", default=1.0, ) pb.defParam( "time", units=units.YEARS, description="Time of reactor life from BOL to current time node", categories=["depletion"], ) pb.defParam("timeNode", units=units.UNITLESS, description="Integer timeNode", default=0) pb.defParam( "maxAssemNum", units=units.UNITLESS, description="Max number of assemblies created so far in the Reactor (integer)", default=0, ) return pDefs def defineCoreParameters(): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=ParamLocation.CENTROID) as pb: pb.defParam( "orientation", units=units.DEGREES, description=( "Triple representing rotations counterclockwise around each spatial axis. For " "example, a hex assembly rotated by 1/6th has orientation (0,0,60.0)" ), default=None, ) with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0) as pb: pb.defParam( "maxAssemNum", units=units.UNITLESS, description="Maximum assembly number", default=0, ) pb.defParam("numMoves", units=units.UNITLESS, description="numMoves", default=0) with pDefs.createBuilder(location=ParamLocation.NA, categories=["control rods"]) as pb: pb.defParam( "crMostValuablePrimaryRodLocation", default="", units=units.UNITLESS, saveToDB=True, description=("Core assembly location for the most valuable primary control rod."), ) pb.defParam( "crMostValuableSecondaryRodLocation", default="", units=units.UNITLESS, saveToDB=True, description=("Core assembly location for the most valuable secondary control rod."), ) pb.defParam( "crTransientOverpowerWorth", default=0.0, units=units.PCM, saveToDB=True, description=( "Reactivity worth introduced by removal of the highest worth primary control rod " "from the core, starting from its critical position" ), ) with pDefs.createBuilder() as pb: pb.defParam( "axialMesh", units=units.CM, description="Global axial mesh of the reactor core from bottom to top.", default=None, location=ParamLocation.TOP, ) with pDefs.createBuilder(default=0.0, location=ParamLocation.NA) as pb: pb.defParam( "referenceBlockAxialMesh", units=units.CM, description=("The axial block boundaries that assemblies should conform to in a uniform mesh case."), default=None, ) pb.defParam("fissileMass", units=units.GRAMS, description="Fissile mass of the reactor") pb.defParam( "heavyMetalMass", units=units.GRAMS, description="Heavy Metal mass of the reactor", ) pb.defParam( "keffUnc", units=units.UNITLESS, saveToDB=True, default=0.0, description="Uncontrolled k-effective for the reactor core (with control rods fully removed).", ) pb.defParam( "maxDPA", units=units.DPA, description="Maximum DPA based on pin-level max if it exists, block level max otherwise", ) pb.defParam("maxGridDpa", units=units.DPA, description="Grid plate max dpa") pb.defParam( "maxProcessMemoryInMB", units=units.MB, description="Maximum memory used by an ARMI process", ) pb.defParam( "minProcessMemoryInMB", units=units.MB, description="Minimum memory used by an ARMI process", ) pb.defParam( "minutesSinceStart", units=units.MINUTES, description="Run time since the beginning of the calculation", ) pb.defParam( "peakGridDpaAt60Years", units=units.DPA, description="Grid plate peak dpa after 60 years irradiation", ) with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0, categories=["neutronics"]) as pb: pb.defParam( "power", units=units.WATTS, description="Thermal power of the reactor core. Corresponds to the nuclear power generated in the core.", ) pb.defParam( "powerDensity", units=f"{units.WATTS}/{units.GRAMS}", description="BOL Power density of the reactor core, in units of Watts per" "grams of Heavy Metal Mass. After the BOL, the power parameter will be set, " "and this will entirely overridden by that.", ) pb.defParam( "maxdetailedDpaPeak", units=units.DPA, description="Highest peak dpa of any block in the problem", ) pb.defParam( "maxFlux", units=f"n/{units.CM}^2/{units.SECONDS}", description="Max neutron flux in the core", ) pb.defParam( "maxDetailedDpaThisCycle", units=units.DPA, description="Max increase in dpa this cycle (only defined at EOC)", ) pb.defParam( "dpaFullWidthHalfMax", units=units.CM, description="Full width at half max of the detailedDpa distribution", ) pb.defParam( "elevationOfACLP3Cycles", units=units.CM, description="minimum axial location of the ACLP for 3 cycles at peak dose", ) pb.defParam( "elevationOfACLP7Cycles", units=units.CM, description="minimum axial location of the ACLP for 7 cycles at peak dose", ) pb.defParam( "maxpercentBu", units=units.PERCENT_FIMA, description="Max percent burnup on any block in the problem", ) pb.defParam("rxSwing", units=units.PCM, description="Reactivity swing") pb.defParam( "maxBuF", units=units.PERCENT, description="Maximum burnup seen in any feed assemblies", ) pb.defParam( "maxBuI", units=units.PERCENT, description="Maximum burnup seen in any igniter assemblies", ) pb.defParam("keff", units=units.UNITLESS, description="Global multiplication factor") pb.defParam( "peakKeff", units=units.UNITLESS, description="Maximum keff in the simulation", ) pb.defParam( "fastFluxFrAvg", units=units.UNITLESS, description="Fast flux fraction average", ) pb.defParam( "maxpdens", units=f"{units.WATTS}/{units.CM}^3", description="Maximum avg. volumetric power density of all blocks", ) pb.defParam( "maxPD", units=f"{units.MW}/{units.METERS}^2", description="Maximum areal power density of all assemblies", ) pb.defParam( "jumpRing", units=units.UNITLESS, description=( "Radial ring number where bred-up fuel assemblies shuffle jump from the low power " "to the high power region." ), ) with pDefs.createBuilder( default=0.0, location=ParamLocation.AVERAGE, categories=["reactivity coefficients", "kinetics"], ) as pb: pb.defParam( "beta", units=units.UNITLESS, description="Effective delayed neutron fraction", default=None, ) pb.defParam( "betaComponents", units=units.UNITLESS, description="Group-wise delayed neutron fractions", default=None, ) pb.defParam( "betaDecayConstants", units=f"1/{units.SECONDS}", description="Group-wise precursor decay constants", default=None, ) with pDefs.createBuilder( default=0.0, location=ParamLocation.AVERAGE, categories=["reactivity coefficients", "core wide"], ) as pb: # CORE WIDE REACTIVITY COEFFICIENTS pb.defParam( "rxFuelAxialExpansionCoeffPerTemp", units=f"{units.REACTIVITY}/{units.DEGK}", description="Fuel Axial Expansion Coefficient", ) pb.defParam( "rxGridPlateRadialExpansionCoeffPerTemp", units=f"{units.REACTIVITY}/{units.DEGK}", description="Grid Plate Radial Expansion Coefficient", ) with pDefs.createBuilder(location=ParamLocation.AVERAGE, categories=["equilibrium"]) as pb: pb.defParam( "cyclics", units=units.UNITLESS, description=("The integer number of cyclic mode equilibrium-cycle iterations that have occurred so far"), default=0, ) with pDefs.createBuilder(location=ParamLocation.AVERAGE, categories=["equilibrium"]) as pb: pb.defParam( "axialExpansionPercent", units=units.PERCENT, description="Percent of axial growth of fuel blocks", default=0.0, ) pb.defParam( "coupledIteration", units=units.UNITLESS, description="Pre-defined number of tightly coupled iterations.", default=0, ) return pDefs def makeParametersReadOnly(r): """Convert all the parameters in a Reactor to read-only. This method is pretty simple. It goes through all the children of a Reactor object, recursively, and converts the parameters to read-only mode. This will affect the Core, but also any Spent Fuel Pools or other high-level reactor systems. Parameters ---------- r : Reactor Full reactor object, to be modified. Notes ----- Once you make one Reactor read-only, you cannot make it writeable again. """ r.p.readOnly = True for child in r.iterChildren(deep=True): child.p.readOnly = True ================================================ FILE: armi/reactor/reactors.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Reactor objects represent the highest level in the hierarchy of structures that compose the system to be modeled.""" import copy from armi import getPluginManagerOrFail, runLog from armi.reactor import composites, reactorParameters from armi.reactor.cores import Core from armi.reactor.excoreStructure import ExcoreCollection, ExcoreStructure from armi.settings.fwSettings.globalSettings import CONF_SORT_REACTOR from armi.utils import directoryChangers class Reactor(composites.Composite): """ Top level of the composite structure, potentially representing all components in a reactor. This class contains the core and any ex-core structures that are to be represented in the ARMI model. Historically, the ``Reactor`` contained only the core. To support better representation of ex-core structures, the old ``Reactor`` functionality was moved to the newer `Core` class, which has a ``Reactor`` parent. .. impl:: The user-specified reactor. :id: I_ARMI_R :implements: R_ARMI_R The :py:class:`Reactor <armi.reactor.reactors.Reactor>` is the top level of the composite structure, which can represent all components within a reactor core. The reactor contains a :py:class:`Core <armi.reactor.reactors.Core>`, which contains a collection of :py:class:`Assembly <armi.reactor.assemblies.Assembly>` objects arranged in a hexagonal or Cartesian grid. Each Assembly consists of a stack of :py:class:`Block <armi.reactor.blocks.Block>` objects, which are each composed of one or more :py:class:`Component <armi.reactor.components.component.Component>` objects. Each :py:class:`Interface <armi.interfaces.Interface>` is able to interact with the reactor and its child :py:class:`Composites <armi.reactor.composites.Composite>` by retrieving data from it or writing new data to it. This is the main medium through which input information and the output of physics calculations is exchanged between interfaces and written to an ARMI database. """ pDefs = reactorParameters.defineReactorParameters() def __init__(self, name, blueprints): composites.Composite.__init__(self, "R-{}".format(name)) self.o = None self.spatialGrid = None self.spatialLocator = None self.p.maxAssemNum = 0 self.p.cycle = 0 self.core = None self.excore = ExcoreCollection() self.blueprints = blueprints def __getstate__(self): """Applies a settings and parent to the reactor and components.""" state = composites.Composite.__getstate__(self) state["o"] = None return state def __setstate__(self, state): composites.Composite.__setstate__(self, state) def __deepcopy__(self, memo): memo[id(self)] = newR = self.__class__.__new__(self.__class__) newR.__setstate__(copy.deepcopy(self.__getstate__(), memo)) newR.name = f"{self.name}-copy" return newR def __repr__(self): return f"<{self.__class__.__name__}: {self.name} id:{id(self)}>" @property def nuclideBases(self): from armi.nucDirectory import nuclideBases if nuclideBases.nuclideBases is None: nuclideBases.factory() return nuclideBases.nuclideBases def add(self, container): composites.Composite.add(self, container) cores = [c for c in self.getChildren(deep=True) if isinstance(c, Core)] if cores: if len(cores) != 1: raise ValueError( f"Only 1 core may be specified at this time. Please adjust input. {len(cores)} cores found." ) self.core = cores[0] if isinstance(container, ExcoreStructure): nomen = container.name.replace(" ", "").lower() if nomen == "spentfuelpool": nomen = "sfp" self.excore[nomen] = container def incrementAssemNum(self): """ Increase the max assembly number by one and returns the current value. Notes ----- The "max assembly number" is not currently used in the Reactor. So the idea is that we return the current number, then iterate it for the next assembly. Obviously, this method will be unused for non-assembly-based reactors. Returns ------- int The new max Assembly number. """ val = int(self.p.maxAssemNum) self.p.maxAssemNum += 1 return val def normalizeNames(self): """ Renumber and rename all the Assemblies and Blocks. This method normalizes the names in the Core then the SFP. Returns ------- int The new max Assembly number. """ self.p.maxAssemNum = 0 ind = self.core.normalizeNames(self.p.maxAssemNum) self.p.maxAssemNum = ind if self.excore.sfp is not None: ind = self.excore.sfp.normalizeNames(self.p.maxAssemNum) self.p.maxAssemNum = ind return ind def loadFromCs(cs) -> Reactor: """ Load a Reactor based on the input settings. Parameters ---------- cs: Settings A relevant settings object Returns ------- Reactor Reactor loaded from settings file """ from armi.reactor import blueprints bp = blueprints.loadFromCs(cs) return factory(cs, bp) def factory(cs, bp) -> Reactor: """Build a reactor from input settings and blueprints.""" runLog.header("=========== Constructing Reactor and Verifying Inputs ===========") getPluginManagerOrFail().hook.beforeReactorConstruction(cs=cs) r = Reactor(cs.caseTitle, bp) # For now, ARMI will create a default Spent Fuel Pool and add it to every reactor. if not any(structure.typ == "sfp" for structure in bp.systemDesigns.values()): bp.addDefaultSFP() with directoryChangers.DirectoryChanger(cs.inputDirectory, dumpOnException=False): # always construct the core first (for assembly serial number purposes) if not bp.systemDesigns: raise ValueError("The input must define a `core` system, but does not. Update inputs") for structure in bp.systemDesigns: structure.construct(cs, bp, r) runLog.debug(f"Reactor: {r}") # return a Reactor object if cs[CONF_SORT_REACTOR]: r.sort() else: runLog.info(f"Due to the setting {CONF_SORT_REACTOR}, this Reactor is unsorted.") return r ================================================ FILE: armi/reactor/spentFuelPool.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A nuclear reactor frequently has storage pools (or 'ponds') for spent fuel. This file implements a simple/default representation of such as an ARMI "system". ARMI systems, like the core are grids filled with ArmiObjects. This module also includes some helper tools to aid transferring spent fuel assemblies from the core to the SFP. """ import itertools from armi.reactor.excoreStructure import ExcoreStructure class SpentFuelPool(ExcoreStructure): """The Spent Fuel Pool (SFP) is a place to store discharged assemblies. This class is a core-like system object, so it has a spatial grid that Assemblies can fit in. .. impl:: The user-specified spent fuel pool. :id: I_ARMI_SFP :implements: R_ARMI_SFP The SpentFuelPool is a composite structure meant to represent storage ponds for used fuel assemblies. As a data structure, it is little more than a container for ``Assembly`` objects. It should be able to easily support adding or removing ``Assembly`` objects. And at every time node the current state of the SFP will be written to the database. """ def __init__(self, name, parent=None): ExcoreStructure.__init__(self, name) self.parent = parent self.spatialGrid = None self.numColumns = None def add(self, assem, loc=None): """ Add an Assembly to the list. Parameters ---------- assem : Assembly The Assembly to add to the spent fuel pool loc : LocationBase, optional If provided, the assembly is inserted at this location. If it is not provided, the locator on the Assembly object will be used. If the Assembly's loc belongs to ``self.spatialGrid``, it will not be used. """ if loc is not None and loc.grid is not self.spatialGrid: raise ValueError(f"An assembly cannot be added to {self} using a spatial locator from another grid.") if self.numColumns is None: self._updateNumberOfColumns() # If the assembly added has a negative ID, that is a placeholder, fix it. if assem.p.assemNum < 0: newNum = self.r.incrementAssemNum() assem.renumber(newNum) # Make sure the location of the new assembly is valid locProvided = loc is not None or ( assem.spatialLocator is not None and assem.spatialLocator.grid is self.spatialGrid ) if locProvided: loc = loc or assem.spatialLocator else: loc = self._getNextLocation() # orient the blocks to match this grid assem.orientBlocks(parentSpatialGrid=self.spatialGrid) super().add(assem, loc) def getAssembly(self, name): """Get a specific assembly by name.""" for a in self: if a.getName() == name: return a return None def _updateNumberOfColumns(self): """Determine the number of columns in the spatial grid.""" locs = self.spatialGrid.items() self.numColumns = len(set([ll[0][0] for ll in locs])) def _getNextLocation(self): """Helper method to allow each discharged assembly to be easily dropped into the SFP. The logic here is that we assume that the SFP is a rectangular-ish grid, with a set number of columns per row. So when you add an Assembly here, if you don't provide a location, the grid is filled in a col/row order with whatever grid cell is found open first. """ filledLocations = {a.spatialLocator for a in self} grid = self.spatialGrid for idx in itertools.count(): j = idx // self.numColumns i = idx % self.numColumns loc = grid[i, j, 0] if loc not in filledLocations: return loc return None def normalizeNames(self, startIndex=0): """ Renumber and rename all the Assemblies and Blocks. Parameters ---------- startIndex : int, optional The default is to start counting at zero. But if you are renumbering assemblies across the entire Reactor, you may want to start at a different number. Returns ------- int The new max Assembly number. """ ind = startIndex for a in self: oldName = a.getName() newName = a.makeNameFromAssemNum(ind) if oldName == newName: ind += 1 continue a.p.assemNum = ind a.setName(newName) for b in a: axialIndex = int(b.name.split("-")[-1]) b.name = b.makeName(ind, axialIndex) ind += 1 return ind ================================================ FILE: armi/reactor/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/reactor/tests/test_assemblies.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests assemblies.py.""" import math import pathlib import random import unittest from unittest.mock import patch import numpy as np from numpy.testing import assert_allclose from armi import settings, tests from armi.physics.neutronics.settings import CONF_LOADING_FILE, CONF_XS_KERNEL from armi.reactor import assemblies, blocks, blueprints, components, geometry, parameters, reactors from armi.reactor.assemblies import Flags, HexAssembly, copy, grids, runLog from armi.reactor.parameters import ParamLocation from armi.reactor.tests import test_reactors from armi.tests import TEST_ROOT, mockRunLogs from armi.utils import directoryChangers, textProcessors NUM_BLOCKS = 3 def buildTestAssemblies(): """ Build some assembly objects that will be used in testing. This builds 2 HexBlocks: * One with half UZr pins and half UTh pins * One with all UZr pins """ settings.Settings() temperature = 273.0 fuelID = 0.0 fuelOD = 1.0 cladOD = 1.1 # generate a reactor with assemblies # generate components with materials nPins = 100 fuelDims = { "Tinput": temperature, "Thot": temperature, "od": fuelOD, "id": fuelID, "mult": nPins, } fuelUZr = components.Circle("fuel", "UZr", **fuelDims) fuelUTh = components.Circle("fuel UTh", "ThU", **fuelDims) fuelDims2nPins = { "Tinput": temperature, "Thot": temperature, "od": fuelOD, "id": fuelID, "mult": 2 * nPins, } fuelUZrB = components.Circle("fuel B", "UZr", **fuelDims2nPins) cladDims = { "Tinput": temperature, "Thot": temperature, "od": cladOD, "id": fuelOD, "mult": 2 * nPins, } clad = components.Circle("clad", "HT9", **cladDims) interDims = { "Tinput": temperature, "Thot": temperature, "op": 16.8, "ip": 16.0, "mult": 1.0, } interSodium = components.Hexagon("interCoolant", "Sodium", **interDims) block = blocks.HexBlock("fuel") block2 = blocks.HexBlock("fuel") block.setType("fuel") block.setHeight(10.0) block.add(fuelUZr) block.add(fuelUTh) block.add(clad) block.add(interSodium) block.p.axMesh = 1 block.p.molesHmBOL = 1.0 block.p.molesHmNow = 1.0 block2.setType("fuel") block2.setHeight(10.0) block2.add(fuelUZrB) block2.add(clad) block2.add(interSodium) block2.p.axMesh = 1 block2.p.molesHmBOL = 2 block2.p.molesHmNow = 1.0 assemblieObjs = [] for numBlocks, blockTemplate in zip([1, 1, 5, 4], [block, block2, block, block]): assembly = assemblies.HexAssembly("testAssemblyType") assembly.spatialGrid = grids.AxialGrid.fromNCells(numBlocks) assembly.spatialGrid.armiObject = assembly for _i in range(numBlocks): newBlock = copy.deepcopy(blockTemplate) assembly.add(newBlock) assembly.calculateZCoords() assembly.reestablishBlockOrder() assemblieObjs.append(assembly) return assemblieObjs class MaterialInAssembly_TestCase(unittest.TestCase): @classmethod def setUpClass(cls): cls.assembly, cls.assembly2, cls.assembly3, cls.assembly4 = buildTestAssemblies() def test_sortNoLocator(self): self.assembly.spatialLocator = None self.assembly2.spatialLocator = None self.assertFalse(self.assembly < self.assembly2) self.assertFalse(self.assembly2 < self.assembly) grid = grids.HexGrid() self.assembly.spatialLocator = grid[0, 0, 0] self.assembly2.spatialLocator = grid[0, 1, 0] self.assertTrue(self.assembly < self.assembly2) self.assertFalse(self.assembly2 < self.assembly) def test_UThZrMaterial(self): """Test the ternary UZr material.""" b2 = self.assembly2[0] uZrFuel = b2.getComponent(Flags.FUEL | Flags.B) mat = uZrFuel.getProperties() mat.applyInputParams(0.1, 0.0) self.assertAlmostEqual(uZrFuel.getMass("U235") / (uZrFuel.getMass("U238") + uZrFuel.getMass("U235")), 0.1) def makeTestAssembly(numBlocks, assemNum, spatialGrid=grids.HexGrid.fromPitch(1.0), r=None): coreGrid = r.core.spatialGrid if r is not None else spatialGrid a = HexAssembly("TestAssem", assemNum=assemNum) a.spatialGrid = grids.AxialGrid.fromNCells(numBlocks) a.spatialGrid.armiObject = a a.spatialLocator = coreGrid[2, 2, 0] return a class AssemblyReadOnlyTests(unittest.TestCase): """These tests of Assemblies do not modify the test assembly, which can be created in a setUpClass method.""" @classmethod def setUpClass(cls): cls.name = "A0015" cls.assemNum = 15 cls.height = 10 cls.cs = settings.Settings() # Print nothing to the screen that would normally go to the log. runLog.setVerbosity("error") cls.r = tests.getEmptyHexReactor() cls.r.core.symmetry = geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC) cls.assembly = makeTestAssembly(NUM_BLOCKS, cls.assemNum, r=cls.r) # Use these if they are needed cls.blockParams = { "height": cls.height, "bondRemoved": 0.0, "envGroupNum": 0, "buLimit": 35, "buRate": 0.0, "eqRegion": -1, "id": 212.0, "pdens": 10.0, "percentBu": 25.3, "power": 100000.0, "residence": 4.0, "smearDensity": 0.6996721711791459, "timeToLimit": 2.7e5, "xsTypeNum": 65, "zbottom": 97.3521, "ztop": 111.80279999999999, } # add some blocks with a component cls.blockList = [] for i in range(NUM_BLOCKS): b = blocks.HexBlock("TestHexBlock") b.setHeight(cls.height) cls.hexDims = { "Tinput": 273.0, "Thot": 273.0, "op": 0.76, "ip": 0.0, "mult": 1.0, } h = components.Hexagon("fuel", "UZr", **cls.hexDims) # non-flaggy name important for testing b.setType("igniter fuel unitst") b.add(h) b.parent = cls.assembly b.setName(b.makeName(cls.assembly.getNum(), i)) cls.assembly.add(b) cls.blockList.append(b) cls.r.core.add(cls.assembly) cls.assembly.calculateZCoords() def test_isOnWhichSymmetryLine(self): line = self.assembly.isOnWhichSymmetryLine() self.assertEqual(line, 2) def test_iter(self): cur = [] for block in self.assembly: cur.append(block) ref = self.blockList self.assertEqual(cur, ref) def test_len(self): cur = len(self.assembly) ref = len(self.blockList) self.assertEqual(cur, ref) def test_getName(self): cur = self.assembly.getName() ref = self.name self.assertEqual(cur, ref) def test_getNum(self): cur = self.assembly.getNum() ref = self.assemNum self.assertEqual(cur, ref) def test_getLocation(self): """ Test for getting string location of assembly. .. test:: Assembly location is retrievable. :id: T_ARMI_ASSEM_POSI0 :tests: R_ARMI_ASSEM_POSI """ cur = self.assembly.getLocation() ref = str("005-003") self.assertEqual(cur, ref) def test_getArea(self): """Tests area calculation for hex assembly.""" # Default case: for assemblies with no blocks a = HexAssembly("TestAssem", assemNum=10) self.assertIsNone(a.getArea()) # more realistic case: a hex block/assembly cur = self.assembly.getArea() ref = math.sqrt(3) / 2.0 * self.hexDims["op"] ** 2 self.assertAlmostEqual(cur, ref, places=6) def test_getVolume(self): """Tests volume calculation for hex assembly.""" cur = self.assembly.getVolume() ref = math.sqrt(3) / 2.0 * self.hexDims["op"] ** 2 * self.height * NUM_BLOCKS places = 6 self.assertAlmostEqual(cur, ref, places=places) def test_getAxialMesh(self): cur = self.assembly.getAxialMesh() ref = [i * self.height + self.height for i in range(NUM_BLOCKS)] self.assertEqual(cur, ref) def test_calculateZCoords(self): self.assembly.calculateZCoords() places = 6 bottom = 0.0 for b in self.assembly: top = bottom + self.height cur = b.p.z ref = bottom + (top - bottom) / 2.0 self.assertAlmostEqual(cur, ref, places=places) cur = b.p.zbottom ref = bottom self.assertAlmostEqual(cur, ref, places=places) cur = b.p.ztop ref = top self.assertAlmostEqual(cur, ref, places=places) bottom = top def test_getTotalHeight(self): cur = self.assembly.getTotalHeight() ref = self.height * NUM_BLOCKS places = 6 self.assertAlmostEqual(cur, ref, places=places) def test_getHeight(self): """Test height of assembly calculation.""" cur = self.assembly.getHeight() ref = self.height * NUM_BLOCKS places = 6 self.assertAlmostEqual(cur, ref, places=places) def test_getReactiveHeight(self): self.assembly[2].getComponent(Flags.FUEL).adjustMassEnrichment(0.01) self.assembly[2].setNumberDensity("PU239", 0.0) bottomElevation, reactiveHeight = self.assembly.getReactiveHeight(enrichThresh=0.02) self.assertEqual(bottomElevation, 0.0) self.assertEqual(reactiveHeight, 20.0) def test_hasFlags(self): self.assembly.setType("fuel") cur = self.assembly.hasFlags(Flags.FUEL) self.assertTrue(cur) def test_getBlocks(self): cur = self.assembly.getBlocks() ref = self.blockList self.assertEqual(cur, ref) def test_getFirstBlock(self): cur = self.assembly.getFirstBlock() ref = self.blockList[0] self.assertAlmostEqual(cur, ref) def test_getFirstBlockByType(self): b = self.assembly.getFirstBlockByType("igniter fuel unitst") self.assertEqual(b.getType(), "igniter fuel unitst") b = self.assembly.getFirstBlockByType("i do not exist") self.assertIsNone(b) def test_getDim(self): """Tests dimensions are retrievable.""" # quick test, if there are no blocks a = HexAssembly("TestAssem", assemNum=10) self.assertIsNone(a.getDim(Flags.FUEL, "op")) # more interesting test, with blocks cur = self.assembly.getDim(Flags.FUEL, "op") ref = self.hexDims["op"] places = 6 self.assertAlmostEqual(cur, ref, places=places) def test_getDominantMaterial(self): cur = self.assembly.getDominantMaterial(Flags.FUEL).getName() ref = "UZr" self.assertEqual(cur, ref) self.assertEqual(self.assembly.getDominantMaterial().getName(), ref) def test_countBlocksOfType(self): cur = self.assembly.countBlocksWithFlags(Flags.IGNITER | Flags.FUEL) self.assertEqual(cur, 3) def test_iteration(self): """Tests the ability to doubly-loop over assemblies (under development).""" a = self.assembly for bi, b in enumerate(a): if bi == 2: h = 0.0 for bi2, b2 in enumerate(a): if bi2 == 0: self.assertEqual( b2, a[0], msg="First block in new iteration is not the first block of assembly", ) h += b2.getHeight() # make sure the loop continues with the right counter self.assertEqual( b, a[bi], msg="The {0}th block in the loop ({1}) is not equal to the {0}th block in the assembly {2}".format( bi, b, "dummy" ), ) def test_getBlocksAndZ(self): blocksAndCenters = self.assembly.getBlocksAndZ() lastZ = -1.0 for b, c in blocksAndCenters: self.assertIn(b, self.assembly.getBlocks()) self.assertGreater(c, lastZ) lastZ = c self.assertRaises(TypeError, self.assembly.getBlocksAndZ, 1.0) def test_getBlocksBetweenElevations(self): # assembly should have 3 blocks of 10 cm in it blocksAndHeights = self.assembly.getBlocksBetweenElevations(0, 10) self.assertEqual(blocksAndHeights[0], (self.assembly[0], 10.0)) blocksAndHeights = self.assembly.getBlocksBetweenElevations(0, 5.0) self.assertEqual(blocksAndHeights[0], (self.assembly[0], 5.0)) blocksAndHeights = self.assembly.getBlocksBetweenElevations(1.0, 5.0) self.assertEqual(blocksAndHeights[0], (self.assembly[0], 4.0)) blocksAndHeights = self.assembly.getBlocksBetweenElevations(9.0, 21.0) self.assertEqual(blocksAndHeights[0], (self.assembly[0], 1.0)) self.assertEqual(blocksAndHeights[1], (self.assembly[1], 10.0)) self.assertEqual(blocksAndHeights[2], (self.assembly[2], 1.0)) blocksAndHeights = self.assembly.getBlocksBetweenElevations(-10, 1000.0) self.assertEqual(len(blocksAndHeights), len(self.assembly)) self.assertAlmostEqual(sum([height for _b, height in blocksAndHeights]), self.assembly.getHeight()) def test_hasContinuousCoolantChannel(self): self.assertFalse(self.assembly.hasContinuousCoolantChannel()) modifiedAssem = self.assembly coolantDims = {"Tinput": 273.0, "Thot": 273.0} h = components.DerivedShape("coolant", "Sodium", **coolantDims) for b in modifiedAssem: b.add(h) self.assertTrue(modifiedAssem.hasContinuousCoolantChannel()) def test_carestianCoordinates(self): """Check the coordinates of the assembly within the core with a CarestianGrid. .. test:: Cartesian coordinates are retrievable. :id: T_ARMI_ASSEM_POSI1 :tests: R_ARMI_ASSEM_POSI """ a = makeTestAssembly( numBlocks=1, assemNum=1, spatialGrid=grids.CartesianGrid.fromRectangle(1.0, 1.0), ) self.assertEqual(a.coords(), (2.0, 2.0)) def test_assem_hex_type(self): """Test that all children of a hex assembly are hexagons.""" for b in self.assembly: # For a hex assem, confirm they are of type "Hexagon" pitch_comp_type = b.PITCH_COMPONENT_TYPE[0] self.assertEqual(pitch_comp_type.__name__, "Hexagon") def test_getElevationBoundariesByBlockType(self): elevations = self.assembly.getElevationBoundariesByBlockType() self.assertEqual(elevations, [0.0, 10.0, 10.0, 20.0, 20.0, 30.0]) class AssemblyTests(unittest.TestCase): """These tests of Assemblies modify the test assembly, so each unit tests needs a fresh test assembly.""" def setUp(self): self.name = "A0015" self.assemNum = 15 self.height = 10 self.cs = settings.Settings() # Print nothing to the screen that would normally go to the log. runLog.setVerbosity("error") self.r = tests.getEmptyHexReactor() self.r.core.symmetry = geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC) self.assembly = makeTestAssembly(NUM_BLOCKS, self.assemNum, r=self.r) # Use these if they are needed self.blockParams = { "height": self.height, "bondRemoved": 0.0, "envGroupNum": 0, "buLimit": 35, "buRate": 0.0, "eqRegion": -1, "id": 212.0, "pdens": 10.0, "percentBu": 25.3, "power": 100000.0, "residence": 4.0, "smearDensity": 0.6996721711791459, "timeToLimit": 2.7e5, "xsTypeNum": 65, "zbottom": 97.3521, "ztop": 111.80279999999999, } # add some blocks with a component self.blockList = [] for i in range(NUM_BLOCKS): b = blocks.HexBlock("TestHexBlock") b.setHeight(self.height) self.hexDims = { "Tinput": 273.0, "Thot": 273.0, "op": 0.76, "ip": 0.0, "mult": 1.0, } h = components.Hexagon("fuel", "UZr", **self.hexDims) # non-flaggy name important for testing b.setType("igniter fuel unitst") b.add(h) b.parent = self.assembly b.setName(b.makeName(self.assembly.getNum(), i)) self.assembly.add(b) self.blockList.append(b) self.r.core.add(self.assembly) self.assembly.calculateZCoords() def test_notesParameter(self): self.assertEqual(self.assembly.p.notes, "") with self.assertRaises(ValueError): # try to assign a non-string self.assembly.p.notes = 1 note = "This is a short, acceptable not about the assembly" self.assembly.p.notes = note self.assertEqual(self.assembly.p.notes, note) tooLongNote = "a" * 1001 self.assembly.p.notes = tooLongNote self.assertEqual(self.assembly.p.notes, tooLongNote[0:1000]) def test_append(self): b = blocks.HexBlock("TestBlock") self.blockList.append(b) self.assembly.append(b) cur = self.assembly.getBlocks() ref = self.blockList self.assertEqual(cur, ref) def test_extend(self): blockList = [] for _ in range(2): b = blocks.HexBlock("TestBlock") self.blockList.append(b) blockList.append(b) self.assembly.extend(blockList) cur = self.assembly.getBlocks() ref = self.blockList self.assertEqual(cur, ref) for c in self.assembly: self.assertIs(c.parent, self.assembly) def test_add(self): a = makeTestAssembly(1, 1) # successfully add some Blocks to an Assembly for n in range(3): self.assertEqual(len(a), n) b = blocks.HexBlock("TestBlock") a.add(b) self.assertIn(b, a) self.assertEqual(b.parent, a) self.assertEqual(len(a), n + 1) with self.assertRaises(TypeError): a.add(blocks.CartesianBlock("Test Cart Block")) def test_moveTo(self): ref = self.r.core.spatialGrid.getLocatorFromRingAndPos(3, 10) i, j = grids.HexGrid.getIndicesFromRingAndPos(3, 10) locator = self.r.core.spatialGrid[i, j, 0] self.assembly.moveTo(locator) cur = self.assembly.spatialLocator self.assertEqual(cur, ref) def test_scaleParamsWhenMoved(self): """Volume integrated parameters must be scaled when an assembly is placed on a core boundary.""" with patch.object(self.assembly.p.paramDefs["chargeFis"], "location", ParamLocation.VOLUME_INTEGRATED): # patch makes chargeFis look volume integrated assemblyParams = {"chargeFis": 6.0, "chargeTime": 2} blockParams = { # volume integrated parameters "massHmBOL": 9.0, "molesHmBOL": np.array([[1, 2, 3], [4, 5, 6]]), # ndarray for testing "adjMgFlux": [1, 2, 3], # Should normally be an ndarray, list for testing "lastMgFlux": "foo", # Should normally be an ndarray, str for testing } self.assembly.p.update(assemblyParams) for b in self.assembly.iterBlocks(Flags.FUEL): b.p.update(blockParams) i, j = grids.HexGrid.getIndicesFromRingAndPos(1, 1) locator = self.r.core.spatialGrid[i, j, 0] self.assertEqual(self.assembly.getSymmetryFactor(), 1) self.assembly.moveTo(locator) self.assertEqual(self.assembly.getSymmetryFactor(), 3) for b in self.assembly.iterBlocks(Flags.FUEL): # float assert_allclose(b.p["massHmBOL"] / blockParams["massHmBOL"], 1 / 3) # np.ndarray assert_allclose(b.p["molesHmBOL"] / blockParams["molesHmBOL"], 1 / 3) # list assert_allclose(np.array(b.p["adjMgFlux"]) / np.array(blockParams["adjMgFlux"]), 1 / 3) # string self.assertEqual(b.p["lastMgFlux"], blockParams["lastMgFlux"]) self.assertEqual(self.assembly.p["chargeFis"] / assemblyParams["chargeFis"], 1 / 3) self.assertEqual(self.assembly.p["chargeTime"] / assemblyParams["chargeTime"], 1) def test_adjustResolution(self): # Make a second assembly with 4 times the resolution assemNum2 = self.assemNum * 4 height2 = self.height / 4.0 assembly2 = makeTestAssembly(assemNum2, assemNum2) # add some blocks with a component for _ in range(assemNum2): b = blocks.HexBlock("TestBlock") b.setHeight(height2) assembly2.add(b) self.assembly.adjustResolution(assembly2) cur = len(self.assembly.getBlocks()) ref = 4.0 * len(self.blockList) self.assertEqual(cur, ref) cur = self.assembly.getBlocks()[0].getHeight() ref = self.height / 4.0 places = 6 self.assertAlmostEqual(cur, ref, places=places) def test_getFissileMass(self): for b in self.assembly: b.p.massHmBOL = b.getHMMass() b.p.enrichmentBOL = b.getFissileMassEnrich() cur = self.assembly.getFissileMass() ref = sum(bi.getMass(["U235", "PU239"]) for bi in self.assembly) self.assertAlmostEqual(cur, ref) def test_getMass(self): mass0 = self.assembly.getMass("U235") mass1 = sum(bi.getMass("U235") for bi in self.assembly) self.assertAlmostEqual(mass0, mass1) fuelBlock = next(self.assembly.iterBlocks(Flags.FUEL)) blockU35Mass = fuelBlock.getMass("U235") fuelBlock.setMass("U235", 2 * blockU35Mass) self.assertAlmostEqual(fuelBlock.getMass("U235"), blockU35Mass * 2) self.assertAlmostEqual(self.assembly.getMass("U235"), mass0 + blockU35Mass) fuelBlock.setMass("U238", 0.0) self.assertAlmostEqual(blockU35Mass * 2, fuelBlock.getMass("U235")) def test_getAge(self): res = 5.0 for b in self.assembly: b.p.residence = res cur = self.assembly.getAge() ref = res places = 6 self.assertAlmostEqual(cur, ref, places=places) def test_makeAxialSnapList(self): # Make a second assembly with 4 times the resolution assemNum2 = self.assemNum * 4 height2 = self.height / 4.0 assembly2 = makeTestAssembly(assemNum2, assemNum2) # add some blocks with a component for _i in range(assemNum2): self.hexDims = { "Tinput": 273.0, "Thot": 273.0, "op": 0.76, "ip": 0.0, "mult": 1.0, } h = components.Hexagon("fuel", "UZr", **self.hexDims) b = blocks.HexBlock("fuel") b.setType("igniter fuel") b.add(h) b.setHeight(height2) assembly2.add(b) self.assembly.makeAxialSnapList(assembly2) cur = [] for b in self.assembly: cur.append(b.p.topIndex) ref = [3, 7, 11] self.assertEqual(cur, ref) def test_snapAxialMeshToReference(self): ref = [11, 22, 33] for b, i in zip(self.assembly, range(self.assemNum)): b.p.topIndex = i self.assembly.setBlockMesh(ref) cur = [] for b in self.assembly: cur.append(b.p.ztop) self.assertEqual(cur, ref) def test_updateFromAssembly(self): assembly2 = makeTestAssembly(self.assemNum, self.assemNum) params = {} params["maxPercentBu"] = 30.0 params["numMoves"] = 5.0 params["maxPercentBu"] = 0 params["timeToLimit"] = 2.7e5 params["arealPd"] = 110.0 params["maxDpaPeak"] = 14.0 params["kInf"] = 60.0 for key, param in params.items(): assembly2.p[key] = param self.assembly.updateParamsFrom(assembly2) for key, param in params.items(): cur = self.assembly.p[key] ref = param self.assertEqual(cur, ref) def _setup_blueprints(self, filename="refSmallReactor.yaml"): # need this for the getAllNuclides call with directoryChangers.DirectoryChanger(TEST_ROOT): newSettings = {CONF_LOADING_FILE: filename} self.cs = self.cs.modified(newSettings=newSettings) with open(self.cs[CONF_LOADING_FILE], "r") as y: y = textProcessors.resolveMarkupInclusions(y, pathlib.Path(self.cs.inputDirectory)) self.r.blueprints = blueprints.Blueprints.load(y) self.r.blueprints._prepConstruction(self.cs) def test_duplicate(self): self._setup_blueprints() # Perform the copy assembly2 = copy.deepcopy(self.assembly) for refBlock, curBlock in zip(self.assembly, assembly2): numNucs = 0 for nuc in self.assembly.getAncestor( lambda c: isinstance(c, reactors.Reactor) ).blueprints.allNuclidesInProblem: numNucs += 1 # Block level density ref = refBlock.getNumberDensity(nuc) cur = curBlock.getNumberDensity(nuc) self.assertEqual(cur, ref) self.assertGreater(numNucs, 5) refFracs = refBlock.getVolumeFractions() curFracs = curBlock.getVolumeFractions() # Block level area fractions for ref, cur in zip(refFracs, curFracs): ref = ref[1] cur = cur[1] places = 6 self.assertAlmostEqual(cur, ref, places=places) # Block level params for refParam in refBlock.p: if refParam == "serialNum": continue ref = refBlock.p[refParam] cur = curBlock.p[refParam] if isinstance(cur, np.ndarray): self.assertTrue((cur == ref).all()) else: if refParam == "location": ref = str(ref) cur = str(cur) self.assertEqual( cur, ref, msg="The {} param differs: {} vs. {}".format(refParam, cur, ref), ) # Block level height for b, b2 in zip(self.assembly, assembly2): ref = b.getHeight() cur = b2.getHeight() self.assertEqual(cur, ref) assert_allclose(b.spatialLocator.indices, b2.spatialLocator.indices) # Assembly level params for param in self.assembly.p: if param == "serialNum": continue ref = self.assembly.p[param] cur = assembly2.p[param] if isinstance(cur, np.ndarray): assert_allclose(cur, ref) else: self.assertEqual(cur, ref) # Block level core and parent for b in assembly2: self.assertEqual(b.core, None) self.assertEqual(b.parent, assembly2) def test_pinPlenumVolume(self): """Test the volume of a pin in the assembly's plenum.""" pinPlenumVolume = 5.951978e-05 self._setup_blueprints("refSmallReactorBase.yaml") assembly = self.r.blueprints.assemblies.get("igniter fuel") self.assertAlmostEqual(pinPlenumVolume, assembly.getPinPlenumVolumeInCubicMeters()) def test_renameBlocksAccordingToAssemblyNum(self): self.assembly.p.assemNum = 55 self.assembly.renameBlocksAccordingToAssemblyNum() self.assertIn("{0:04d}".format(self.assembly.getNum()), self.assembly[1].getName()) def test_getBlockData(self): paramDict = { "timeToLimit": 40.0, "power": 10000.0, "envGroup": 4, "residence": 3.145, "eqRegion": -1, "id": 299.0, "bondRemoved": 0.337, "buRate": 42.0, } # Set some params for b in self.assembly: for param, paramVal in paramDict.items(): b.p[param] = paramVal for param in paramDict: cur = list(self.assembly.getChildParamValues(param)) ref = [] for i, b in enumerate(self.blockList): ref.append(self.blockList[i].p[param]) self.assertAlmostEqual(cur, ref, places=6) def test_getMaxParam(self): for bi, b in enumerate(self.assembly): b.p.power = bi self.assertAlmostEqual(self.assembly.getMaxParam("power"), len(self.assembly) - 1) def test_getElevationsMatchingParamValue(self): self.assembly[0].p.power = 0.0 self.assembly[1].p.power = 20.0 self.assembly[2].p.power = 10.0 heights = self.assembly.getElevationsMatchingParamValue("power", 15.0) self.assertListEqual(heights, [12.5, 20.0]) def test_calcAvgParam(self): nums = [] for b in self.assembly: nums.append(random.random()) b.p.power = nums[-1] self.assertGreater(len(nums), 2) self.assertAlmostEqual(self.assembly.calcAvgParam("power"), sum(nums) / len(nums)) def test_calcTotalParam(self): # Remake original assembly self.assembly = makeTestAssembly(self.assemNum, self.assemNum) # add some blocks with a component for i in range(self.assemNum): b = blocks.HexBlock("TestBlock") # Set the 1st block to have higher params than the rest. self.blockParamsTemp = {} for key, val in self.blockParams.items(): # Iterate with i in self.assemNum, so higher assemNums get the high values. if key != "xsTypeNum": # must keep valid b.p[key] = self.blockParamsTemp[key] = val * i b.setHeight(self.height) b.setType("fuel") self.hexDims = {"Tinput": 273.0, "Thot": 273.0, "op": 0.76, "ip": 0.0, "mult": 1.0} h = components.Hexagon("intercoolant", "Sodium", **self.hexDims) b.add(h) self.assembly.add(b) for param in self.blockParamsTemp: tot = 0.0 for b in self.assembly: try: tot += b.p[param] except TypeError: pass ref = tot try: cur = self.assembly.calcTotalParam(param) places = 6 self.assertAlmostEqual(cur, ref, places=places) except TypeError: pass def test_reattach(self): # Remake original assembly self.assembly = makeTestAssembly(self.assemNum, self.assemNum) self.assertEqual(0, len(self.assembly)) # add some blocks with a component for i in range(self.assemNum): b = blocks.HexBlock("TestBlock") # Set the 1st block to have higher params than the rest. self.blockParamsTemp = {} for key, val in self.blockParams.items(): # Iterate with i in self.assemNum, so higher assemNums get the high values. b.p[key] = self.blockParamsTemp[key] = val * (i + 1) b.setHeight(self.height) b.setType("fuel") self.hexDims = { "Tinput": 273.0, "Thot": 273.0, "op": 0.76, "ip": 0.0, "mult": 1.0, } h = components.Hexagon("intercoolant", "Sodium", **self.hexDims) b.add(h) self.assembly.add(b) self.assertEqual(self.assemNum, len(self.assembly)) for b in self.assembly: self.assertEqual("fuel", b.getType()) def test_reestablishBlockOrder(self): self.assertEqual(self.assembly.spatialLocator.indices[0], 2) self.assertEqual(self.assembly[0].spatialLocator.getRingPos(), (5, 3)) self.assertEqual(self.assembly[0].spatialLocator.indices[2], 0) axialIndices = [2, 1, 0] for ai, b in zip(axialIndices, self.assembly): b.spatialLocator = self.assembly.spatialGrid[0, 0, ai] self.assembly.reestablishBlockOrder() cur = [] for b in self.assembly: cur.append(b.getLocation()) ref = ["005-003-000", "005-003-001", "005-003-002"] self.assertEqual(cur, ref) def test_getParamValuesAtZ(self): # single value param for b, temp in zip(self.assembly, [80, 85, 90]): b.p.percentBu = temp percentBuDef = b.p.paramDefs["percentBu"] originalLoc = percentBuDef.location try: self.assertAlmostEqual(87.5, self.assembly.getParamValuesAtZ("percentBu", 20.0)) percentBuDef.location = parameters.ParamLocation.BOTTOM self.assertAlmostEqual( 82.5, self.assembly.getParamValuesAtZ("percentBu", 5.0, fillValue="extend"), ) percentBuDef.location = parameters.ParamLocation.TOP self.assertAlmostEqual(82.5, self.assembly.getParamValuesAtZ("percentBu", 15.0)) for b in self.assembly: b.p.percentBu = None self.assertTrue(np.isnan(self.assembly.getParamValuesAtZ("percentBu", 25.0))) # multiDimensional param for b, flux in zip(self.assembly, [[1, 10], [2, 8], [3, 6]]): b.p.mgFlux = flux self.assertTrue(np.allclose([2.5, 7.0], self.assembly.getParamValuesAtZ("mgFlux", 20.0))) self.assertTrue(np.allclose([1.5, 9.0], self.assembly.getParamValuesAtZ("mgFlux", 10.0))) for b in self.assembly: b.p.mgFlux = [0.0] * 2 self.assertTrue(np.allclose([0.0, 0.0], self.assembly.getParamValuesAtZ("mgFlux", 10.0))) # single value param at corner for b, temp in zip(self.assembly, [100, 200, 300]): b.p.THcornTemp = [temp + iCorner for iCorner in range(6)] value = self.assembly.getParamValuesAtZ("THcornTemp", 20.0) self.assertTrue(np.allclose([300, 301, 302, 303, 304, 305], value)) finally: percentBuDef.location = originalLoc def test_averagePlenumTemperature(self): """Test an assembly's average plenum temperature with a single block outlet.""" averagePlenumTemp = 42.0 plenumBlock = makeTestAssembly(1, 2, grids.CartesianGrid.fromRectangle(1.0, 1.0)) plenumBlock.setType("plenum", Flags.PLENUM) plenumBlock.p.THcoolantOutletT = averagePlenumTemp self.assembly.setBlockMesh([10.0, 20.0, 30.0], conserveMassFlag="auto") self.assembly.append(plenumBlock) self.assertEqual(averagePlenumTemp, self.assembly.getAveragePlenumTemperature()) def test_rotate(self): """Test rotation of an assembly spatial objects. .. test:: An assembly can be rotated about its z-axis. :id: T_ARMI_ROTATE_HEX_ASSEM :tests: R_ARMI_ROTATE_HEX """ a = makeTestAssembly(1, 1) b = blocks.HexBlock("TestBlock") b.p.THcornTemp = [400, 450, 500, 550, 600, 650] rotTemp = [600, 650, 400, 450, 500, 550] b.p.displacementX = 0 b.p.displacementY = 1 rotX = -math.sqrt(3) / 2 rotY = -0.5 a.add(b) a.rotate(math.radians(120)) # test list rotation b = a[0] self.assertEqual(b.p.THcornTemp, rotTemp) self.assertAlmostEqual(b.p.displacementX, rotX) self.assertAlmostEqual(b.p.displacementY, rotY) b.p.THcornTemp = np.array([400, 450, 500, 550, 600, 650]) rotTemp = np.array([600, 650, 400, 450, 500, 550]) a.rotate(math.radians(120)) # test np.ndarray rotation for i in range(len(b.p.THcornTemp)): self.assertEqual(b.p.THcornTemp[i], rotTemp[i]) # test that floats and ints are left alone b.p.THcornTemp = 3 a.rotate(math.radians(120)) self.assertEqual(b.p.THcornTemp, 3) b.p.THcornTemp = 4.0 a.rotate(math.radians(120)) self.assertEqual(b.p.THcornTemp, 4.0) # check that TypeError is raised for unexpected data type b.p.THcornTemp = "bad data" with self.assertRaises(TypeError): a.rotate(math.radians(120)) # check that list of len != 6 ends up in runlog warning # list len=5 b.p.THcornTemp = [400, 450, 500, 550, 600] with mockRunLogs.BufferLog() as mock: self.assertEqual("", mock.getStdout()) a.rotate(math.radians(120)) self.assertIn("No rotation method defined", mock.getStdout()) # np.ndarray len=5 b.p.THcornTemp = np.array([400, 450, 500, 550, 600]) with mockRunLogs.BufferLog() as mock: self.assertEqual("", mock.getStdout()) a.rotate(math.radians(120)) self.assertIn("No rotation method defined", mock.getStdout()) # list len=7 b.p.THcornTemp = [400, 450, 500, 550, 600, 650, 700] with mockRunLogs.BufferLog() as mock: self.assertEqual("", mock.getStdout()) a.rotate(math.radians(120)) self.assertIn("No rotation method defined", mock.getStdout()) # np.ndarray len=7 b.p.THcornTemp = np.array([400, 450, 500, 550, 600, 650, 700]) with mockRunLogs.BufferLog() as mock: self.assertEqual("", mock.getStdout()) a.rotate(math.radians(120)) self.assertIn("No rotation method defined", mock.getStdout()) with self.assertRaisesRegex(ValueError, expected_regex="60 degree"): a.rotate(math.radians(40)) def test_assemBlockTypes(self): """Test that all children of an assembly are blocks, ordered from top to bottom. .. test:: Validate child types of assembly are blocks, ordered from top to bottom. :id: T_ARMI_ASSEM_BLOCKS :tests: R_ARMI_ASSEM_BLOCKS """ coords = [] for b in self.assembly.iterBlocks(): # Confirm children are blocks self.assertIsInstance(b, blocks.Block) # get coords from the child blocks coords.append(b.getLocation()) # get the Z-coords for each block zCoords = [int(c.split("-")[-1]) for c in coords] # verify the blocks are ordered top-to-bottom, vertically for i in range(1, len(zCoords)): self.assertGreater(zCoords[i], zCoords[i - 1]) def test_getBIndexFromZIndex(self): # make sure the axMesh parameters are set in our test block for b in self.assembly: b.p.axMesh = 1 for zIndex in range(6): bIndex = self.assembly.getBIndexFromZIndex(zIndex * 0.5) self.assertEqual(bIndex, math.ceil(zIndex / 2) if zIndex < 5 else -1) class AssemblyInReactor_TestCase(unittest.TestCase): def setUp(self): self.o, self.r = test_reactors.loadTestReactor(TEST_ROOT) def test_snapAxialMesViaBlockIgn(self): """Snap axial mesh to a reference mesh should conserve mass based on Block igniter fuel.""" originalMesh = [25.0, 50.0, 75.0, 100.0, 175.0] refMesh = [26.0, 52.0, 79.0, 108.0, 175.0] grid = self.r.core.spatialGrid # 1. examine mass change in igniterFuel igniterFuel = self.r.core.childrenByLocator[grid[0, 0, 0]] # gridplate, fuel, fuel, fuel, plenum for b in igniterFuel.iterBlocks(Flags.FUEL): fuelComp = b.getComponent(Flags.FUEL) # add isotopes from clad and coolant to fuel component to test mass conservation # mass should only be conserved within fuel component, not over the whole block fuelComp.setNumberDensity("FE56", 1e-10) fuelComp.setNumberDensity("NA23", 1e-10) b = igniterFuel[0] coolantNucs = b.getComponent(Flags.COOLANT).getNuclides() coolMass = 0 for nuc in coolantNucs: coolMass += b.getMass(nuc) igniterMassGrid = b.getMass() - coolMass igniterMassGridTotal = b.getMass() b = igniterFuel[1] igniterHMMass1 = b.getHMMass() igniterZircMass1 = b.getMass("ZR") igniterFuelBlockMass = b.getMass() igniterDuctMass = b.getComponent(Flags.DUCT).getMass() igniterCoolMass = b.getComponent(Flags.COOLANT).getMass() coolMass = 0 b = igniterFuel[4] for nuc in coolantNucs: coolMass += b.getMass(nuc) igniterPlenumMass = b.getMass() - coolMass # expand the core to the new reference mesh for a in self.r.core: a.setBlockMesh(refMesh, conserveMassFlag="auto") # 2. check igniter mass after expansion # gridplate, fuel, fuel, fuel, plenum b = igniterFuel[0] coolantNucs = b.getComponent(Flags.COOLANT).getNuclides() coolMass = 0 for nuc in coolantNucs: coolMass += b.getMass(nuc) igniterMassGridAfterExpand = b.getMass() - coolMass b = igniterFuel[1] igniterHMMass1AfterExpand = b.getHMMass() igniterZircMass1AfterExpand = b.getMass("ZR") igniterDuctMassAfterExpand = b.getComponent(Flags.DUCT).getMass() igniterCoolMassAfterExpand = b.getComponent(Flags.COOLANT).getMass() coolMass = 0 b = igniterFuel[4] for nuc in coolantNucs: coolMass += b.getMass(nuc) igniterPlenumMassAfterExpand = b.getMass() - coolMass self.assertAlmostEqual(igniterMassGrid, igniterMassGridAfterExpand, 7) self.assertAlmostEqual(igniterHMMass1, igniterHMMass1AfterExpand, 7) self.assertAlmostEqual(igniterZircMass1, igniterZircMass1AfterExpand, 7) # demonstrate that the duct and coolant mass are not conserved. # number density stays constant, mass is scaled by ratio of new to old height self.assertAlmostEqual(igniterDuctMass, igniterDuctMassAfterExpand * 25.0 / 26.0, 7) self.assertAlmostEqual(igniterCoolMass, igniterCoolMassAfterExpand * 25.0 / 26.0, 7) # Note the masses are linearly different by the amount that the plenum shrunk self.assertAlmostEqual(igniterPlenumMass, igniterPlenumMassAfterExpand * 75 / 67.0, 7) # Shrink the core back to the original mesh size to see if mass is conserved for a in self.r.core: a.setBlockMesh(originalMesh, conserveMassFlag="auto") # 3. check igniter mass after shrink to original # gridplate, fuel, fuel, fuel, plenum b = igniterFuel[0] coolantNucs = b.getComponent(Flags.COOLANT).getNuclides() coolMass = 0 for nuc in coolantNucs: coolMass += b.getMass(nuc) igniterMassGridAfterShrink = b.getMass() - coolMass igniterMassGridTotalAfterShrink = b.getMass() b = igniterFuel[1] igniterHMMass1AfterShrink = b.getHMMass() igniterZircMass1AfterShrink = b.getMass("ZR") igniterFuelBlockMassAfterShrink = b.getMass() igniterDuctMassAfterShrink = b.getComponent(Flags.DUCT).getMass() igniterCoolMassAfterShrink = b.getComponent(Flags.COOLANT).getMass() coolMass = 0 b = igniterFuel[4] for nuc in coolantNucs: coolMass += b.getMass(nuc) igniterPlenumMassAfterShrink = b.getMass() - coolMass self.assertAlmostEqual(igniterMassGrid, igniterMassGridAfterShrink, 7) self.assertAlmostEqual(igniterMassGridTotal, igniterMassGridTotalAfterShrink, 7) self.assertAlmostEqual(igniterHMMass1, igniterHMMass1AfterShrink, 7) self.assertAlmostEqual(igniterZircMass1, igniterZircMass1AfterShrink, 7) self.assertAlmostEqual(igniterFuelBlockMass, igniterFuelBlockMassAfterShrink, 7) self.assertAlmostEqual(igniterDuctMass, igniterDuctMassAfterShrink, 7) self.assertAlmostEqual(igniterCoolMass, igniterCoolMassAfterShrink, 7) self.assertAlmostEqual(igniterPlenumMass, igniterPlenumMassAfterShrink, 7) def test_snapAxialMeshViaBlockShield(self): """Snap axial mesh to a reference mesh should conserve mass based on Block shield.""" originalMesh = [25.0, 50.0, 75.0, 100.0, 175.0] refMesh = [26.0, 52.0, 79.0, 108.0, 175.0] # access the shield in ring 9, pos 2 grid = self.r.core.spatialGrid i, j = grid.getIndicesFromRingAndPos(9, 2) # 1. examine mass change in radial shield a = self.r.core.childrenByLocator[grid[i, j, 0]] # gridplate, axial shield, axial shield, axial shield, plenum b = a[0] coolantNucs = b.getComponent(Flags.COOLANT).getNuclides() coolMass = 0 for nuc in coolantNucs: coolMass += b.getMass(nuc) shieldMassGrid = b.getMass() - coolMass b = a[1] coolantNucs = b.getComponent(Flags.COOLANT).getNuclides() coolMass = 0 for nuc in coolantNucs: coolMass += b.getMass(nuc) shieldShieldMass = b.getMass() - coolMass b = a[4] coolantNucs = b.getComponent(Flags.COOLANT).getNuclides() coolMass = 0 for nuc in coolantNucs: coolMass += b.getMass(nuc) shieldPlenumMass = b.getMass() - coolMass # expand the core to the new reference mesh for a in self.r.core: a.setBlockMesh(refMesh, conserveMassFlag="auto") # 2. examine mass change in radial shield after expansion # gridplate, axial shield, axial shield, axial shield, plenum b = a[0] coolantNucs = b.getComponent(Flags.COOLANT).getNuclides() coolMass = 0 for nuc in coolantNucs: coolMass += b.getMass(nuc) shieldMassGridAfterExpand = b.getMass() - coolMass b = a[1] coolantNucs = b.getComponent(Flags.COOLANT).getNuclides() coolMass = 0 for nuc in coolantNucs: coolMass += b.getMass(nuc) shieldShieldMassAfterExpand = b.getMass() - coolMass b = a[4] coolantNucs = b.getComponent(Flags.COOLANT).getNuclides() coolMass = 0 for nuc in coolantNucs: coolMass += b.getMass(nuc) shieldPlenumMassAfterExpand = b.getMass() - coolMass # non mass conserving expansions self.assertAlmostEqual(shieldMassGrid * 26.0 / 25.0, shieldMassGridAfterExpand, 7) self.assertAlmostEqual(shieldShieldMass * 26.0 / 25.0, shieldShieldMassAfterExpand, 7) self.assertAlmostEqual(shieldPlenumMass, shieldPlenumMassAfterExpand * 75.0 / 67.0, 7) # Shrink the core back to the original mesh size to see if mass is conserved for a in self.r.core: a.setBlockMesh(originalMesh, conserveMassFlag="auto") # 3. examine mass change in radial shield after shrink to original # gridplate, axial shield, axial shield, axial shield, plenum b = a[0] coolantNucs = b.getComponent(Flags.COOLANT).getNuclides() coolMass = 0 for nuc in coolantNucs: coolMass += b.getMass(nuc) shieldMassGridAfterShrink = b.getMass() - coolMass b = a[1] coolantNucs = b.getComponent(Flags.COOLANT).getNuclides() coolMass = 0 for nuc in coolantNucs: coolMass += b.getMass(nuc) shieldShieldMassAfterShrink = b.getMass() - coolMass b = a[4] coolantNucs = b.getComponent(Flags.COOLANT).getNuclides() coolMass = 0 for nuc in coolantNucs: coolMass += b.getMass(nuc) shieldPlenumMassAfterShrink = b.getMass() - coolMass # non mass conserving expansions self.assertAlmostEqual(shieldMassGrid, shieldMassGridAfterShrink, 7) self.assertAlmostEqual(shieldShieldMass, shieldShieldMassAfterShrink, 7) self.assertAlmostEqual(shieldPlenumMass, shieldPlenumMassAfterShrink, 7) class AnnularFuelTestCase(unittest.TestCase): """Test fuel with a whole in the center.""" def setUp(self): self.cs = settings.Settings() newSettings = {CONF_XS_KERNEL: "MC2v2"} # don't try to expand elementals self.cs = self.cs.modified(newSettings=newSettings) bp = blueprints.Blueprints() self.r = reactors.Reactor("test", bp) self.r.add(reactors.Core("Core")) inputStr = """blocks: ann fuel: &block_ann_fuel gap: shape: Circle material: Void Tinput: 20.0 Thot: 435.0 id: 0.0 mult: fuel.mult od: fuel.id fuel: shape: Circle material: UZr Tinput: 20.0 Thot: 600.0 id: 0.1 mult: 127 od: 0.8 gap1: shape: Circle material: Void Tinput: 20.0 Thot: 435.0 id: fuel.od mult: fuel.mult od: clad.id clad: shape: Circle material: HT9 Tinput: 20.0 Thot: 435.0 id: .85 mult: fuel.mult od: .95 duct: &component_type2_fuel_duct shape: Hexagon material: HT9 Tinput: 20.0 Thot: 435.0 ip: 13.00 op: 13.9 mult: 1 intercoolant: &component_type2_fuel_intercoolant shape: Hexagon material: Sodium Tinput: 435.0 Thot: 435.0 ip: duct.op mult: 1 op: 16 coolant: &component_type2_fuel_coolant shape: DerivedShape material: Sodium Tinput: 435.0 Thot: 435.0 assemblies: heights: &standard_heights [30.0] axial mesh points: &standard_axial_mesh_points [2] ann fuel: specifier: FA blocks: &inner_igniter_fuel_blocks [*block_ann_fuel] height: *standard_heights axial mesh points: *standard_axial_mesh_points hotChannelFactors: TWRPclad xs types: &inner_igniter_fuel_xs_types [D] """ self.blueprints = blueprints.Blueprints.load(inputStr) self.blueprints._prepConstruction(self.cs) def test_areaCheck(self): assembly = list(self.blueprints.assemblies.values())[0] fuelBlock = assembly.getFirstBlock(Flags.FUEL) intercoolant = fuelBlock.getComponent(Flags.INTERCOOLANT) bpAssemblyArea = assembly.getArea() actualAssemblyArea = math.sqrt(3) / 2.0 * intercoolant.p.op**2 self.assertAlmostEqual(bpAssemblyArea, actualAssemblyArea) ================================================ FILE: armi/reactor/tests/test_blocks.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests blocks.py.""" import copy import io import logging import math import os import shutil import unittest from glob import glob from unittest.mock import MagicMock, patch import numpy as np from numpy.testing import assert_allclose, assert_array_equal from armi import materials, runLog, settings, tests from armi.nucDirectory import nucDir from armi.nucDirectory.nuclideBases import NuclideBases from armi.nuclearDataIO import xsCollections from armi.nuclearDataIO.cccc import isotxs from armi.physics.neutronics import GAMMA, NEUTRON from armi.physics.neutronics.settings import ( CONF_LOADING_FILE, CONF_XS_KERNEL, ) from armi.reactor import blocks, blueprints, components, geometry, grids from armi.reactor.components import basicShapes, complexShapes from armi.reactor.flags import Flags from armi.reactor.grids.cartesian import CartesianGrid from armi.reactor.tests.test_assemblies import makeTestAssembly from armi.testing import getEmptyCartesianReactor, loadTestReactor from armi.testing.singleMixedAssembly import buildMixedPinAssembly from armi.tests import ISOAA_PATH, TEST_ROOT, mockRunLogs from armi.utils import densityTools, hexagon, units from armi.utils.directoryChangers import TemporaryDirectoryChanger from armi.utils.units import ( ASCII_LETTER_A, ASCII_LETTER_Z, MOLES_PER_CC_TO_ATOMS_PER_BARN_CM, ASCII_LETTER_a, ) NUM_PINS_IN_TEST_BLOCK = 217 def buildSimpleFuelBlock(): """Return a simple hex block containing fuel, clad, duct, and coolant.""" b = blocks.HexBlock("fuel", height=10.0) fuelDims = {"Tinput": 25.0, "Thot": 600, "od": 0.76, "id": 0.00, "mult": 127.0} cladDims = {"Tinput": 25.0, "Thot": 450, "od": 0.80, "id": 0.77, "mult": 127.0} ductDims = {"Tinput": 25.0, "Thot": 400, "op": 16, "ip": 15.3, "mult": 1.0} intercoolantDims = { "Tinput": 400, "Thot": 400, "op": 17.0, "ip": ductDims["op"], "mult": 1.0, } coolDims = {"Tinput": 25.0, "Thot": 400} fuel = components.Circle("fuel", "UZr", **fuelDims) clad = components.Circle("clad", "HT9", **cladDims) duct = components.Hexagon("duct", "HT9", **ductDims) coolant = components.DerivedShape("coolant", "Sodium", **coolDims) intercoolant = components.Hexagon("intercoolant", "Sodium", **intercoolantDims) b.add(fuel) b.add(clad) b.add(duct) b.add(coolant) b.add(intercoolant) return b def buildLinkedFuelBlock(): """Return a simple hex block containing linked bond.""" b = blocks.HexBlock("fuel", height=10.0) fuelDims = {"Tinput": 25.0, "Thot": 600, "od": 0.76, "id": 0.00, "mult": 127.0} bondDims = { "Tinput": 25.0, "Thot": 450, "od": "clad.id", "id": "fuel.od", "mult": 127.0, } cladDims = {"Tinput": 25.0, "Thot": 450, "od": 0.80, "id": 0.77, "mult": 127.0} ductDims = {"Tinput": 25.0, "Thot": 400, "op": 16, "ip": 15.3, "mult": 1.0} intercoolantDims = { "Tinput": 400, "Thot": 400, "op": 17.0, "ip": ductDims["op"], "mult": 1.0, } coolDims = {"Tinput": 25.0, "Thot": 400} fuel = components.Circle("fuel", "UZr", **fuelDims) clad = components.Circle("clad", "HT9", **cladDims) bondDims["components"] = {"clad": clad, "fuel": fuel} bond = components.Circle("bond", "HT9", **bondDims) duct = components.Hexagon("duct", "HT9", **ductDims) coolant = components.DerivedShape("coolant", "Sodium", **coolDims) intercoolant = components.Hexagon("intercoolant", "Sodium", **intercoolantDims) b.add(fuel) b.add(bond) b.add(clad) b.add(duct) b.add(coolant) b.add(intercoolant) return b def loadTestBlock(cold=True, depletable=False) -> blocks.HexBlock: """Build an annular test block for evaluating unit tests.""" caseSetting = settings.Settings() caseSetting[CONF_XS_KERNEL] = "MC2v2" runLog.setVerbosity("error") caseSetting["nCycles"] = 1 r = tests.getEmptyHexReactor() assemNum = 3 block = blocks.HexBlock("TestHexBlock") block.setType("defaultType") block.p.nPins = NUM_PINS_IN_TEST_BLOCK assembly = makeTestAssembly(assemNum, 1, r=r) # NOTE: temperatures are supposed to be in C coldTemp = 25.0 hotTempCoolant = 430.0 hotTempStructure = 25.0 if cold else hotTempCoolant hotTempFuel = 25.0 if cold else 600.0 fuelDims = { "Tinput": coldTemp, "Thot": hotTempFuel, "od": 0.84, "id": 0.6, "mult": NUM_PINS_IN_TEST_BLOCK, } fuel = components.Circle("fuel", "UZr", **fuelDims) if depletable: fuel.p.flags = Flags.fromString("fuel depletable") bondDims = { "Tinput": coldTemp, "Thot": hotTempCoolant, "od": "fuel.id", "id": 0.3, "mult": NUM_PINS_IN_TEST_BLOCK, } bondDims["components"] = {"fuel": fuel} bond = components.Circle("bond", "Sodium", **bondDims) annularVoidDims = { "Tinput": hotTempStructure, "Thot": hotTempStructure, "od": "bond.id", "id": 0.0, "mult": NUM_PINS_IN_TEST_BLOCK, } annularVoidDims["components"] = {"bond": bond} annularVoid = components.Circle("annular void", "Void", **annularVoidDims) innerLinerDims = { "Tinput": coldTemp, "Thot": hotTempStructure, "od": 0.90, "id": 0.85, "mult": NUM_PINS_IN_TEST_BLOCK, } innerLiner = components.Circle("inner liner", "Graphite", **innerLinerDims) fuelLinerGapDims = { "Tinput": hotTempStructure, "Thot": hotTempStructure, "od": "inner liner.id", "id": "fuel.od", "mult": NUM_PINS_IN_TEST_BLOCK, } fuelLinerGapDims["components"] = {"inner liner": innerLiner, "fuel": fuel} fuelLinerGap = components.Circle("gap1", "Void", **fuelLinerGapDims) outerLinerDims = { "Tinput": coldTemp, "Thot": hotTempStructure, "od": 0.95, "id": 0.90, "mult": NUM_PINS_IN_TEST_BLOCK, } outerLiner = components.Circle("outer liner", "HT9", **outerLinerDims) linerLinerGapDims = { "Tinput": hotTempStructure, "Thot": hotTempStructure, "od": "outer liner.id", "id": "inner liner.od", "mult": NUM_PINS_IN_TEST_BLOCK, } linerLinerGapDims["components"] = { "outer liner": outerLiner, "inner liner": innerLiner, } linerLinerGap = components.Circle("gap2", "Void", **linerLinerGapDims) claddingDims = { "Tinput": coldTemp, "Thot": hotTempStructure, "od": 1.05, "id": 0.95, "mult": NUM_PINS_IN_TEST_BLOCK, } cladding = components.Circle("clad", "HT9", **claddingDims) if depletable: cladding.p.flags = Flags.fromString("clad depletable") linerCladGapDims = { "Tinput": hotTempStructure, "Thot": hotTempStructure, "od": "clad.id", "id": "outer liner.od", "mult": NUM_PINS_IN_TEST_BLOCK, } linerCladGapDims["components"] = {"clad": cladding, "outer liner": outerLiner} linerCladGap = components.Circle("gap3", "Void", **linerCladGapDims) wireDims = { "Tinput": coldTemp, "Thot": hotTempStructure, "od": 0.1, "id": 0.0, "axialPitch": 30.0, "helixDiameter": 1.1, "mult": NUM_PINS_IN_TEST_BLOCK, } wire = components.Helix("wire", "HT9", **wireDims) if depletable: wire.p.flags = Flags.fromString("wire depletable") coolantDims = {"Tinput": hotTempCoolant, "Thot": hotTempCoolant} coolant = components.DerivedShape("coolant", "Sodium", **coolantDims) ductDims = { "Tinput": coldTemp, "Thot": hotTempStructure, "ip": 16.6, "op": 17.3, "mult": 1, } duct = components.Hexagon("duct", "HT9", **ductDims) if depletable: duct.p.flags = Flags.fromString("duct depletable") interDims = { "Tinput": hotTempCoolant, "Thot": hotTempCoolant, "op": 17.8, "ip": "duct.op", "mult": 1, } interDims["components"] = {"duct": duct} interSodium = components.Hexagon("interCoolant", "Sodium", **interDims) block.add(annularVoid) block.add(bond) block.add(fuel) block.add(fuelLinerGap) block.add(innerLiner) block.add(linerLinerGap) block.add(outerLiner) block.add(linerCladGap) block.add(cladding) block.add(wire) block.add(coolant) block.add(duct) block.add(interSodium) block.setHeight(16.0) block.autoCreateSpatialGrids(r.core.spatialGrid) assembly.add(block) r.core.add(assembly) return block def applyDummyData(block): """Add some dummy data to a block for physics-like tests.""" # typical SFR-ish flux in 1/cm^2/s flux = [ 161720716762.12997, 2288219224332.647, 11068159130271.139, 26473095948525.742, 45590249703180.945, 78780459664094.23, 143729928505629.06, 224219073208464.06, 229677567456769.22, 267303906113313.16, 220996878365852.22, 169895433093246.28, 126750484612975.31, 143215138794766.53, 74813432842005.5, 32130372366225.85, 21556243034771.582, 6297567411518.368, 22365198294698.45, 12211256796917.86, 5236367197121.363, 1490736020048.7847, 1369603135573.731, 285579041041.55945, 73955783965.98692, 55003146502.73623, 18564831886.20426, 4955747691.052108, 3584030491.076041, 884015567.3986057, 4298964991.043116, 1348809158.0353086, 601494405.293505, ] xslib = isotxs.readBinary(ISOAA_PATH) # Slight hack here because the test block was created by hand rather than via blueprints and so # elemental expansion of isotopics did not occur. But, the ISOTXS library being used did go # through an isotopic expansion, so we map nuclides here. xslib._nuclides["NAAA"] = xslib._nuclides["NA23AA"] xslib._nuclides["WAA"] = xslib._nuclides["W184AA"] xslib._nuclides["MNAA"] = xslib._nuclides["MN55AA"] block.p.mgFlux = flux block.core.lib = xslib def getComponentData(component): density = 0.0 for nuc in component.getNuclides(): density += ( component.getNumberDensity(nuc) * nucDir.getAtomicWeight(nuc) / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM ) volume = component.getVolume() mass = component.getMass() return component, density, volume, mass class TestDetailedNDensUpdate(unittest.TestCase): def test_updateDetailedNdens(self): from armi.reactor.blueprints.tests.test_blockBlueprints import FULL_BP cs = settings.Settings() with io.StringIO(FULL_BP) as stream: bps = blueprints.Blueprints.load(stream) bps._prepConstruction(cs) self.r = tests.getEmptyHexReactor() self.r.blueprints = bps a = makeTestAssembly(numBlocks=1, assemNum=0) a.add(buildSimpleFuelBlock()) self.r.core.add(a) # get first block in assembly with 'fuel' key block = self.r.core[0][0] # get nuclides in first component in block adjList = block[0].getNuclides() block.p.detailedNDens = np.array([1.0]) block.p.pdensDecay = 1.0 block._updateDetailedNdens(frac=0.5, adjustList=adjList) self.assertEqual(block.p.pdensDecay, 0.5) self.assertEqual(block.p.detailedNDens, np.array([0.5])) class TestValidateSFPSpatialGrids(unittest.TestCase): def test_noSFPExists(self): """Validate the spatial grid for a new SFP is None if it was not provided.""" # copy the inputs, so we can modify them with TemporaryDirectoryChanger() as newDir: oldDir = os.path.join(TEST_ROOT, "smallestTestReactor") newDir2 = os.path.join(newDir.destination, "smallestTestReactor") shutil.copytree(oldDir, newDir2) # cut out the SFP grid in the input file testFile = os.path.join(newDir2, "refSmallestReactor.yaml") txt = open(testFile, "r").read() txt = txt.split("symmetry: full")[0] open(testFile, "w").write(txt) # verify there is no spatial grid defined _o, r = loadTestReactor(newDir2, inputFileName="armiRunSmallest.yaml") self.assertIsNone(r.excore.sfp.spatialGrid) def test_SFPSpatialGridExists(self): """Validate the spatial grid for a new SFP is not None if it was provided.""" _o, r = loadTestReactor(os.path.join(TEST_ROOT, "smallestTestReactor"), inputFileName="armiRunSmallest.yaml") self.assertIsNotNone(r.excore.sfp.spatialGrid) def test_orientationBOL(self): _o, r = loadTestReactor(os.path.join(TEST_ROOT, "smallestTestReactor"), inputFileName="armiRunSmallest.yaml") # Test the null-case; these should all be zero. for a in r.core.iterChildren(): self.assertEqual(a.p.orientation[0], 0.0) self.assertEqual(a.p.orientation[1], 0.0) self.assertEqual(a.p.orientation[2], 0.0) class Block_TestCase(unittest.TestCase): def setUp(self): self.block = loadTestBlock() self._hotBlock = loadTestBlock(cold=False) self._deplBlock = loadTestBlock(depletable=True) def test_getSmearDensity(self): cur = self.block.getSmearDensity() ref = (self.block.getDim(Flags.FUEL, "od") ** 2 - self.block.getDim(Flags.FUEL, "id") ** 2) / self.block.getDim( Flags.LINER, "id" ) ** 2 places = 10 self.assertAlmostEqual(cur, ref, places=places) # test with liner instead of clad ref = (self.block.getDim(Flags.FUEL, "od") ** 2 - self.block.getDim(Flags.FUEL, "id") ** 2) / self.block.getDim( Flags.LINER, "id" ) ** 2 cur = self.block.getSmearDensity() self.assertAlmostEqual( cur, ref, places=places, msg="Incorrect getSmearDensity with liner. Got {0}. Should be {1}".format(cur, ref), ) # test with annular fuel. fuelDims = { "Tinput": 273.0, "Thot": 273.0, "od": 0.87, "id": 0.2, "mult": 271.0, } self.fuelComponent = components.Circle("fuel", "UZr", **fuelDims) ref = (self.block.getDim(Flags.FUEL, "od") ** 2 - self.block.getDim(Flags.FUEL, "id") ** 2) / self.block.getDim( Flags.LINER, "id" ) ** 2 cur = self.block.getSmearDensity() self.assertAlmostEqual( cur, ref, places=places, msg="Incorrect getSmearDensity with annular fuel. Got {0}. Should be {1}".format(cur, ref), ) def test_getSmearDensityMultipleClads(self): # add clad of different size clad = self.block.getComponent(Flags.CLAD) self.block.remove(clad) cladDims = { "Tinput": 273.0, "Thot": 273.0, "od": clad.getDimension("od") + 0.02, "id": clad.getDimension("id"), "mult": 117.0, } self.block.add(components.Circle("clad test", "HT9", **cladDims)) # add clad of different size cladDims = { "Tinput": 273.0, "Thot": 273.0, "od": clad.getDimension("od"), "id": clad.getDimension("id") + 0.02, "mult": 100.0, } self.block.add(components.Circle("clad", "HT9", **cladDims)) cur = self.block.getSmearDensity() fuel = self.block.getComponent(Flags.FUEL, exact=True) liner = self.block.getComponent(Flags.LINER | Flags.INNER) clads = self.block.getComponents(Flags.CLAD) ref = (fuel.getDimension("od", cold=True) ** 2 - fuel.getDimension("id", cold=True) ** 2) / liner.getDimension( "id", cold=True ) ** 2 fuelArea = fuel.getArea(cold=True) innerArea = 0.0 for clad in clads: innerArea += math.pi / 4.0 * clad.getDimension("id", cold=True) ** 2 * clad.getDimension("mult") for liner in self.block.getComponents(Flags.LINER): innerArea -= liner.getArea(cold=True) ref = fuelArea / innerArea self.assertAlmostEqual(cur, ref, places=10) def test_getSmearDensityMixedPin(self): fuel = self.block.getComponent(Flags.FUEL) self.block.remove(fuel) fuelDims = { "Tinput": 273.0, "Thot": 273.0, "od": fuel.getDimension("od"), "id": fuel.getDimension("id"), "mult": 117.0, } self.block.add(components.Circle("fuel annular", "UZr", **fuelDims)) # add non-annular fuel fuelDims = { "Tinput": 273.0, "Thot": 273.0, "od": 0.75, "id": 0.0, "mult": 100.0, } self.block.add(components.Circle("fuel", "UZr", **fuelDims)) # add clad of different size clad = self.block.getComponent(Flags.CLAD) self.block.remove(clad) cladDims = { "Tinput": 273.0, "Thot": 273.0, "od": clad.getDimension("od") + 0.02, "id": clad.getDimension("id"), "mult": 117.0, } self.block.add(components.Circle("clad test", "HT9", **cladDims)) # add clad of different size cladDims = { "Tinput": 273.0, "Thot": 273.0, "od": clad.getDimension("od"), "id": clad.getDimension("id") + 0.02, "mult": 100.0, } self.block.add(components.Circle("clad", "HT9", **cladDims)) # calculate reference smear density fuel = self.block.getComponent(Flags.FUEL, exact=True) annularFuel = self.block.getComponent(Flags.FUEL | Flags.ANNULAR) liner = self.block.getComponent(Flags.LINER | Flags.INNER) clads = self.block.getComponents(Flags.CLAD) fuelArea = math.pi / 4.0 * fuel.getDimension("od", cold=True) ** 2 * fuel.getDimension("mult") fuelArea += ( math.pi / 4.0 * (annularFuel.getDimension("od", cold=True) ** 2 - annularFuel.getDimension("id", cold=True) ** 2) * annularFuel.getDimension("mult") ) innerArea = 0.0 for clad in clads: innerArea += math.pi / 4.0 * clad.getDimension("id", cold=True) ** 2 * clad.getDimension("mult") for liner in self.block.getComponents(Flags.LINER): innerArea -= liner.getArea(cold=True) ref = fuelArea / innerArea cur = self.block.getSmearDensity() self.assertAlmostEqual(cur, ref, places=10) def test_getSmearDensityMultipleLiner(self): numLiners = sum(1 for c in self.block if "liner" in c.name and "gap" not in c.name) self.assertEqual( numLiners, 2, "self.block needs at least 2 liners for this test to be functional.", ) cur = self.block.getSmearDensity() ref = (self.block.getDim(Flags.FUEL, "od") ** 2 - self.block.getDim(Flags.FUEL, "id") ** 2) / self.block.getDim( Flags.INNER | Flags.LINER, "id" ) ** 2 self.assertAlmostEqual(cur, ref, places=10) def test_getSmearDensityEdgeCases(self): # show smear density is not computed for non-fuel blocks b0 = blocks.Block("DummyReflectorBlock") self.assertEqual(b0.getSmearDensity(), 0.0) # show smear density is only defined for pinned fuel blocks b1 = blocks.HexBlock("TestFuelHexBlock") b1.setType("fuel") b1.p.nPins = 0 fuel = components.Circle("fuel", "UZr", Tinput=25.0, Thot=25.0, od=0.84, id=0.6, mult=0) b1.add(fuel) self.assertEqual(b1.getSmearDensity(), 0.0) def test_computeSmearDensity(self): # test the null case smearDensity = blocks.Block.computeSmearDensity(123.4, [], True) self.assertEqual(smearDensity, 0.0) smearDensity = blocks.Block.computeSmearDensity(123.4, [], False) self.assertEqual(smearDensity, 0.0) # test one circle component circles = self.block.getComponentsOfShape(components.Circle) smearDensity = blocks.Block.computeSmearDensity(123.4, [circles[0]], True) self.assertEqual(smearDensity, 0.0) # use the test block clads = set(self.block.getComponents(Flags.CLAD)).intersection(set(circles)) cladID = np.mean([clad.getDimension("id", cold=True) for clad in clads]) sortedCircles = self.block.getSortedComponentsInsideOfComponent(circles.pop()) fuelCompArea = sum(f.getArea(cold=True) for f in self.block.getComponents(Flags.FUEL)) innerCladdingArea = math.pi * (cladID**2) / 4.0 * self.block.getNumComponents(Flags.FUEL) unmovableCompArea = sum( c.getArea(cold=True) for c in sortedCircles if not c.isFuel() and not c.hasFlags([Flags.SLUG, Flags.DUMMY]) and c.containsSolidMaterial() ) refSmearDensity = fuelCompArea / (innerCladdingArea - unmovableCompArea) smearDensity = blocks.Block.computeSmearDensity(153.81433981516477, sortedCircles, True) self.assertAlmostEqual(smearDensity, refSmearDensity, places=10) def test_timeNodeParams(self): self.block.p["buRate", 3] = 0.1 self.assertEqual(0.1, self.block.p[("buRate", 3)]) def test_getType(self): ref = "plenum pin" self.block.setType(ref) cur = self.block.getType() self.assertEqual(cur, ref) self.assertTrue(self.block.hasFlags(Flags.PLENUM)) self.assertTrue(self.block.hasFlags(Flags.PLENUM | Flags.PIN)) self.assertTrue(self.block.hasFlags(Flags.PLENUM | Flags.PIN, exact=True)) self.assertFalse(self.block.hasFlags(Flags.PLENUM, exact=True)) def test_hasFlags(self): self.block.setType("feed fuel") cur = self.block.hasFlags(Flags.FEED | Flags.FUEL) self.assertTrue(cur) cur = self.block.hasFlags(Flags.PLENUM) self.assertFalse(cur) def test_setType(self): self.block.setType("igniter fuel") self.assertEqual("igniter fuel", self.block.getType()) self.assertTrue(self.block.hasFlags(Flags.IGNITER | Flags.FUEL)) self.block.adjustUEnrich(0.0001) self.block.setType("feed fuel") self.assertTrue(self.block.hasFlags(Flags.FEED | Flags.FUEL)) self.assertTrue(self.block.hasFlags(Flags.FUEL)) self.assertFalse(self.block.hasFlags(Flags.IGNITER | Flags.FUEL)) def test_duplicate(self): Block2 = blocks.Block.createHomogenizedCopy(self.block) originalComponents = self.block.getComponents() newComponents = Block2.getComponents() for c1, c2 in zip(originalComponents, newComponents): self.assertEqual(c1.getName(), c2.getName()) a1, a2 = c1.getArea(), c2.getArea() self.assertIsNot(c1, c2) self.assertAlmostEqual( a1, a2, msg="The area of {0}={1} but the area of {2} in the copy={3}".format(c1, a1, c2, a2), ) for key in c2.DIMENSION_NAMES: dim = c2.p[key] if isinstance(dim, tuple): self.assertNotIn(dim[0], originalComponents) self.assertIn(dim[0], newComponents) ref = self.block.getMass() cur = Block2.getMass() places = 6 self.assertAlmostEqual(ref, cur, places=places) ref = self.block.getArea() cur = Block2.getArea() places = 6 self.assertAlmostEqual(ref, cur, places=places) ref = self.block.getHeight() cur = Block2.getHeight() places = 6 self.assertAlmostEqual(ref, cur, places=places) self.assertEqual(self.block.p.flags, Block2.p.flags) def test_homogenizedMixture(self): """ Confirms homogenized blocks have correct properties. .. test:: Homogenize the compositions of a block. :id: T_ARMI_BLOCK_HOMOG :tests: R_ARMI_BLOCK_HOMOG """ args = [False, True] # pinSpatialLocator argument expectedShapes = [ [basicShapes.Hexagon], [basicShapes.Hexagon, basicShapes.Circle], ] for arg, shapes in zip(args, expectedShapes): homogBlock = self.block.createHomogenizedCopy(pinSpatialLocators=arg) for shapeType in shapes: for c in homogBlock.getComponents(): if isinstance(c, shapeType): break else: # didn't find the homogenized hex in the block copy self.assertTrue(False, f"{self.block} does not have a {shapeType} component!") if arg: # check that homogenized block has correct pin coordinates self.assertEqual(self.block.getNumPins(), homogBlock.getNumPins()) self.assertEqual(self.block.p.nPins, homogBlock.p.nPins) pinCoords = self.block.getPinCoordinates() homogPinCoords = homogBlock.getPinCoordinates() for refXYZ, homogXYZ in zip(list(pinCoords), list(homogPinCoords)): self.assertListEqual(list(refXYZ), list(homogXYZ)) cur = homogBlock.getMass() self.assertAlmostEqual(self.block.getMass(), homogBlock.getMass()) self.assertEqual(homogBlock.getType(), self.block.getType()) self.assertEqual(homogBlock.p.flags, self.block.p.flags) self.assertEqual(homogBlock.macros, self.block.macros) self.assertEqual(homogBlock._lumpedFissionProducts, self.block._lumpedFissionProducts) ref = self.block.getArea() cur = homogBlock.getArea() places = 6 self.assertAlmostEqual(ref, cur, places=places) ref = self.block.getHeight() cur = homogBlock.getHeight() places = 6 self.assertAlmostEqual(ref, cur, places=places) def test_getXsType(self): self.cs = settings.Settings() newSettings = {CONF_LOADING_FILE: os.path.join(TEST_ROOT, "refSmallReactor.yaml")} self.cs = self.cs.modified(newSettings=newSettings) self.block.p.xsType = "B" cur = self.block.p.xsType ref = "B" self.assertEqual(cur, ref) _oldBuGroups = self.cs["buGroups"] newSettings = {"buGroups": [100]} self.cs = self.cs.modified(newSettings=newSettings) self.block.p.xsType = "BB" cur = self.block.p.xsType ref = "BB" self.assertEqual(cur, ref) def test_27b_setEnvGroup(self): type_ = "A" self.block.p.envGroup = type_ cur = self.block.p.envGroupNum ref = ord(type_) - ASCII_LETTER_A self.assertEqual(cur, ref) typeNumber = 25 # this is Z due to 0 based numbers self.block.p.envGroupNum = typeNumber cur = self.block.p.envGroup ref = chr(typeNumber + ASCII_LETTER_A) self.assertEqual(cur, ref) self.assertEqual(cur, "Z") before_a = ASCII_LETTER_a - 1 type_ = "a" self.block.p.envGroup = type_ cur = self.block.p.envGroupNum ref = ord(type_) - (before_a) + (ASCII_LETTER_Z - ASCII_LETTER_A) self.assertEqual(cur, ref) typeNumber = 26 # this is a due to 0 based numbers self.block.p.envGroupNum = typeNumber cur = self.block.p.envGroup self.assertEqual(cur, "a") type_ = "z" self.block.p.envGroup = type_ cur = self.block.p.envGroupNum ref = ord(type_) - before_a + (ASCII_LETTER_Z - ASCII_LETTER_A) self.assertEqual(cur, ref) typeNumber = 26 * 2 - 1 # 2x letters in alpha with 0 based index self.block.p.envGroupNum = typeNumber cur = self.block.p.envGroup ref = chr((typeNumber - 26) + ASCII_LETTER_a) self.assertEqual(cur, ref) self.assertEqual(cur, "z") def test_setZeroHeight(self): """Test that demonstrates that a block's height can be set to zero.""" b = buildSimpleFuelBlock() # Check for a DerivedShape component self.assertEqual(len([c for c in b if c.__class__ is components.DerivedShape]), 1) m1 = b.getMass() v1 = b.getVolume() a1 = b.getArea() nd1 = copy.deepcopy(b.getNumberDensities()) h1 = b.getHeight() self.assertNotEqual(h1, 0.0) # Set height to 0.0 b.setHeight(0.0) m2 = b.getMass() v2 = b.getVolume() a2 = b.getArea() nd2 = copy.deepcopy(b.getNumberDensities()) h2 = b.getHeight() self.assertEqual(m2, 0.0) self.assertEqual(v2, 0.0) self.assertEqual(h2, 0.0) self.assertAlmostEqual(a2, a1) for nuc, ndens in nd2.items(): self.assertEqual(ndens, 0.0, msg=(f"Number density of {nuc} is expected to be zero.")) # Set height back to the original height b.setHeight(h1) m3 = b.getMass() v3 = b.getVolume() a3 = b.getArea() nd3 = copy.deepcopy(b.getNumberDensities()) h3 = b.getHeight() self.assertAlmostEqual(m3, m1) self.assertAlmostEqual(v3, v1) self.assertAlmostEqual(a3, a1) self.assertEqual(h3, h1) for nuc in nd3.keys(): self.assertAlmostEqual(nd3[nuc], nd1[nuc]) def test_getVolumeFractionsWithZeroHeight(self): """Tests that the component fractions are the same with a zero height block.""" b = buildSimpleFuelBlock() h1 = b.getHeight() originalVolFracs = b.getVolumeFractions() for _c, vf in originalVolFracs: self.assertNotEqual(vf, 0.0) b.setHeight(0.0) volFracs = b.getVolumeFractions() for (_c, vf1), (_c, vf2) in zip(volFracs, originalVolFracs): self.assertAlmostEqual(vf1, vf2) b.setHeight(h1) volFracs = b.getVolumeFractions() for (_c, vf1), (_c, vf2) in zip(volFracs, originalVolFracs): self.assertAlmostEqual(vf1, vf2) def test_getVolumeFractionWithoutParent(self): """Tests that the volume fraction of a block with no parent is zero.""" b = buildSimpleFuelBlock() self.assertIsNone(b.parent) with self.assertRaises(ValueError): b.getVolumeFraction() def test_clearDensity(self): self.block.clearNumberDensities() for nuc in self.block.getNuclides(): cur = self.block.getNumberDensity(nuc) ref = 0.0 places = 5 self.assertAlmostEqual(cur, ref, places=places) def test_getNumberDensity(self): refDict = { "U235": 0.00275173784234, "U238": 0.0217358415457, "W182": 1.09115150103e-05, "W183": 5.89214392093e-06, "W184": 1.26159558164e-05, "W186": 1.17057432664e-05, "ZR": 0.00709003962772, } self.block.setNumberDensities(refDict) for nucKey, nucItem in refDict.items(): cur = self.block.getNumberDensity(nucKey) ref = nucItem places = 6 self.assertAlmostEqual(ref, cur, places=places) def test_getMasses(self): masses = sorted(self.block.getMasses()) self.assertEqual(len(masses), 13) self.assertEqual(masses[0], "C") def test_removeMass(self): mass0 = self.block.getMass("U238") self.assertGreater(mass0, 0.1) self.block.removeMass("U238", 0.1) mass1 = self.block.getMass("U238") self.assertGreater(mass1, 0) self.assertGreater(mass0, mass1) def test_setNumberDensity(self): ref = 0.05 self.block.setNumberDensity("U235", ref) cur = self.block.getNumberDensity("U235") places = 5 self.assertAlmostEqual(cur, ref, places=places) def test_setNumberDensities(self): """Make sure we can set multiple number densities at once.""" b = self.block b.setNumberDensity("NA", 0.5) refDict = { "U235": 0.00275173784234, "U238": 0.0217358415457, "W": 1.09115150103e-05, "ZR": 0.00709003962772, } b.setNumberDensities(refDict) for nucKey, nucItem in refDict.items(): cur = self.block.getNumberDensity(nucKey) ref = nucItem places = 6 self.assertAlmostEqual(cur, ref, places=places) # make sure U235 stayed fully contained in the fuel component fuelC = b.getComponent(Flags.FUEL) self.assertAlmostEqual( b.getNumberDensity("U235"), fuelC.getNumberDensity("U235") * fuelC.getVolumeFraction(), ) # make sure other vals were zeroed out self.assertAlmostEqual(b.getNumberDensity("NA23"), 0.0) def test_getMass(self): self.block.setHeight(100.0) nucName = "U235" d = self.block.getNumberDensity(nucName) v = self.block.getVolume() A = nucDir.getAtomicWeight(nucName) ref = d * v * A / MOLES_PER_CC_TO_ATOMS_PER_BARN_CM cur = self.block.getMass(nucName) places = 6 self.assertAlmostEqual(cur, ref, places=places) def test_setMass(self): self.block.setHeight(100.0) mass = 100.0 nuc = "U238" self.block.setMass(nuc, mass) cur = self.block.getMass(nuc) ref = mass places = 6 self.assertAlmostEqual(cur, ref, places=places) cur = self.block.getNumberDensity(nuc) v = self.block.getVolume() A = nucDir.getAtomicWeight(nuc) ref = MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * mass / (v * A) places = 6 self.assertAlmostEqual(cur, ref, places=places) def test_getTotalMass(self): self.block.setHeight(100.0) self.block.clearNumberDensities() refDict = { "U235": 0.00275173784234, "U238": 0.0217358415457, "W182": 1.09115150103e-05, "W183": 5.89214392093e-06, "W184": 1.26159558164e-05, "W186": 1.17057432664e-05, "ZR": 0.00709003962772, } self.block.setNumberDensities(refDict) cur = self.block.getMass() tot = 0.0 for nucName, nucItem in refDict.items(): d = nucItem A = nucDir.getAtomicWeight(nucName) tot += d * A v = self.block.getVolume() ref = tot * v / MOLES_PER_CC_TO_ATOMS_PER_BARN_CM places = 9 self.assertAlmostEqual(cur, ref, places=places) def test_replaceBlockWithBlock(self): """Tests conservation of mass flag in replaceBlockWithBlock.""" block = self.block ductBlock = block.__class__("duct") ductBlock.add(block.getComponent(Flags.COOLANT, exact=True)) ductBlock.add(block.getComponent(Flags.DUCT, exact=True)) ductBlock.add(block.getComponent(Flags.INTERCOOLANT, exact=True)) # get reference data refLoc = block.spatialLocator refName = block.name refHeight = block.p.height ductBlock.p.height = 99 * block.p.height self.assertGreater(len(block), 3) block.replaceBlockWithBlock(ductBlock) self.assertEqual(block.spatialLocator, refLoc) self.assertEqual(refName, block.name) self.assertEqual(3, len(block)) self.assertEqual(block.p.height, refHeight) def test_getWettedPerimeterDepletable(self): # calculate the reference value wire = self._deplBlock.getComponent(Flags.WIRE) correctionFactor = np.hypot( 1.0, math.pi * wire.getDimension("helixDiameter") / wire.getDimension("axialPitch"), ) wireDiam = wire.getDimension("od") * correctionFactor ipDim = self.block.getDim(Flags.DUCT, "ip") odDim = self.block.getDim(Flags.CLAD, "od") mult = self.block.getDim(Flags.CLAD, "mult") ref = math.pi * (odDim + wireDiam) * mult + 6 * ipDim / math.sqrt(3) # test getWettedPerimeter cur = self._deplBlock.getWettedPerimeter() self.assertAlmostEqual(cur, ref) def test_getWettedPerimeter(self): # calculate the reference value wire = self.block.getComponent(Flags.WIRE) correctionFactor = np.hypot( 1.0, math.pi * wire.getDimension("helixDiameter") / wire.getDimension("axialPitch"), ) wireDiam = wire.getDimension("od") * correctionFactor ipDim = self.block.getDim(Flags.DUCT, "ip") odDim = self.block.getDim(Flags.CLAD, "od") mult = self.block.getDim(Flags.CLAD, "mult") ref = math.pi * (odDim + wireDiam) * mult + 6 * ipDim / math.sqrt(3) # test getWettedPerimeter cur = self.block.getWettedPerimeter() self.assertAlmostEqual(cur, ref) def test_getWettedPerimeterCircularInnerDuct(self): """Calculate the wetted perimeter for a HexBlock with circular inner duct.""" # build a test block with a Hex inner duct fuelDims = {"Tinput": 400, "Thot": 400, "od": 0.76, "id": 0.00, "mult": 127.0} cladDims = {"Tinput": 400, "Thot": 400, "od": 0.80, "id": 0.77, "mult": 127.0} ductDims = {"Tinput": 400, "Thot": 400, "od": 16, "id": 15.3, "mult": 1.0} intercoolantDims = { "Tinput": 400, "Thot": 400, "od": 17.0, "id": ductDims["od"], "mult": 1.0, } fuel = components.Circle("fuel", "UZr", **fuelDims) clad = components.Circle("clad", "HT9", **cladDims) duct = components.Circle("inner duct", "HT9", **ductDims) intercoolant = components.Circle("intercoolant", "Sodium", **intercoolantDims) b = blocks.HexBlock("fuel", height=10.0) b.add(fuel) b.add(clad) b.add(duct) b.add(intercoolant) # calculate the reference value ref = (ductDims["id"] + ductDims["od"]) * math.pi ref += b.getNumPins() * cladDims["od"] * math.pi # test getWettedPerimeter cur = b.getWettedPerimeter() self.assertAlmostEqual(cur, ref) def test_getWettedPerimeterHexInnerDuct(self): """Calculate the wetted perimeter for a HexBlock with hexagonal inner duct.""" # build a test block with a Hex inner duct fuelDims = {"Tinput": 400, "Thot": 400, "od": 0.76, "id": 0.00, "mult": 127.0} cladDims = {"Tinput": 400, "Thot": 400, "od": 0.80, "id": 0.77, "mult": 127.0} ductDims = {"Tinput": 400, "Thot": 400, "op": 16, "ip": 15.3, "mult": 1.0} intercoolantDims = { "Tinput": 400, "Thot": 400, "op": 17.0, "ip": ductDims["op"], "mult": 1.0, } fuel = components.Circle("fuel", "UZr", **fuelDims) clad = components.Circle("clad", "HT9", **cladDims) duct = components.Hexagon("inner duct", "HT9", **ductDims) intercoolant = components.Hexagon("intercoolant", "Sodium", **intercoolantDims) b = blocks.HexBlock("fuel", height=10.0) b.add(fuel) b.add(clad) b.add(duct) b.add(intercoolant) # calculate the reference value ref = 6 * (ductDims["ip"] + ductDims["op"]) / math.sqrt(3) ref += b.getNumPins() * cladDims["od"] * math.pi # test getWettedPerimeter cur = b.getWettedPerimeter() self.assertAlmostEqual(cur, ref) def test_getWettedPerimeterMultiPins(self): assembly = buildMixedPinAssembly() block = assembly.getFirstBlock(Flags.FUEL) # calculate the reference value wires = block.getComponents(Flags.WIRE) clads = block.getComponents(Flags.CLAD) ref = 0 for wire in wires: mult = wire.getDimension("mult") correctionFactor = np.hypot( 1.0, math.pi * wire.getDimension("helixDiameter") / wire.getDimension("axialPitch"), ) wireDiam = wire.getDimension("od") * correctionFactor ref += math.pi * wireDiam * mult ref += sum(math.pi * clad.getDimension("od") * clad.getDimension("mult") for clad in clads) ipDim = block.getDim(Flags.DUCT, "ip") ref += 6 * ipDim / math.sqrt(3) # test getWettedPerimeter cur = block.getWettedPerimeter() self.assertAlmostEqual(cur, ref) def test_getFlowAreaPerPin(self): area = self.block.getComponent(Flags.COOLANT).getArea() nPins = self.block.getNumPins() cur = self.block.getFlowAreaPerPin() ref = area / nPins self.assertAlmostEqual(cur, ref) def test_getFlowArea(self): """Test Block.getFlowArea() for a Block with just coolant.""" ref = self.block.getComponent(Flags.COOLANT).getArea() cur = self.block.getFlowArea() self.assertAlmostEqual(cur, ref) def test_getFlowAreaInterDuctCoolant(self): """Test Block.getFlowArea() for a Block with coolant and interductcoolant.""" # build a test block with a Hex inter duct collant fuelDims = {"Tinput": 400, "Thot": 400, "od": 0.76, "id": 0.00, "mult": 127.0} ductDims = {"Tinput": 400, "Thot": 400, "op": 16, "ip": 15.3, "mult": 1.0} coolDims = {"Tinput": 400, "Thot": 400} iCoolantDims = {"Tinput": 400, "Thot": 400, "op": 17.0, "ip": 16, "mult": 1.0} fuel = components.Circle("fuel", "UZr", **fuelDims) duct = components.Hexagon("inner duct", "HT9", **ductDims) coolant = components.DerivedShape("coolant", "Sodium", **coolDims) iCoolant = components.Hexagon("interductcoolant", "Sodium", **iCoolantDims) b = blocks.HexBlock("fuel", height=10.0) b.add(fuel) b.add(coolant) b.add(duct) b.add(iCoolant) ref = b.getComponent(Flags.COOLANT).getArea() ref += b.getComponent(Flags.INTERDUCTCOOLANT).getArea() cur = b.getFlowArea() self.assertAlmostEqual(cur, ref) def test_getHydraulicDiameter(self): cur = self.block.getHydraulicDiameter() ref = 4.0 * self.block.getFlowArea() / self.block.getWettedPerimeter() self.assertAlmostEqual(cur, ref) def test_adjustUEnrich(self): self.block.setHeight(100.0) ref = 0.25 self.block.adjustUEnrich(ref) cur = self.block.getComponent(Flags.FUEL).getEnrichment() places = 5 self.assertAlmostEqual(cur, ref, places=places) def test_setLocation(self): """ Retrieve a blocks location. .. test:: Location of a block is retrievable. :id: T_ARMI_BLOCK_POSI0 :tests: R_ARMI_BLOCK_POSI """ b = self.block # a bit obvious, but location is a property now... i, j = grids.HexGrid.getIndicesFromRingAndPos(2, 3) b.spatialLocator = b.core.spatialGrid[i, j, 0] self.assertEqual(b.getLocation(), "002-003-000") self.assertEqual(0, b.spatialLocator.k) self.assertEqual(b.getSymmetryFactor(), 1.0) # now if we don't specify axial, it will move to the new xy, location and have original z index i, j = grids.HexGrid.getIndicesFromRingAndPos(4, 4) b.spatialLocator = b.core.spatialGrid[i, j, 0] self.assertEqual(0, b.spatialLocator.k) self.assertEqual(b.getSymmetryFactor(), 1.0) # center blocks have a different symmetry factor for 1/3rd core for symmetry, powerMult in ( (geometry.FULL_CORE, 1), ( geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC), 3, ), ): self.block.core.symmetry = geometry.SymmetryType.fromAny(symmetry) i, j = grids.HexGrid.getIndicesFromRingAndPos(1, 1) b.spatialLocator = b.core.spatialGrid[i, j, 0] self.assertEqual(0, b.spatialLocator.k) self.assertEqual(b.getSymmetryFactor(), powerMult) def test_setBuLimitInfo(self): self.block.adjustUEnrich(0.1) self.block.setType("igniter fuel") self.block.setBuLimitInfo() cur = self.block.p.buLimit ref = 0.0 self.assertEqual(cur, ref) def test_getTotalNDens(self): self.block.setType("fuel") self.block.clearNumberDensities() refDict = { "U235": 0.00275173784234, "U238": 0.0217358415457, "W182": 1.09115150103e-05, "W183": 5.89214392093e-06, "W184": 1.26159558164e-05, "W186": 1.17057432664e-05, "ZR": 0.00709003962772, } self.block.setNumberDensities(refDict) cur = self.block.getTotalNDens() tot = 0.0 for nucName in refDict.keys(): ndens = self.block.getNumberDensity(nucName) tot += ndens ref = tot places = 6 self.assertAlmostEqual(cur, ref, places=places) def test_getHMDens(self): self.block.setType("fuel") self.block.clearNumberDensities() refDict = { "U235": 0.00275173784234, "U238": 0.0217358415457, "W182": 1.09115150103e-05, "W183": 5.89214392093e-06, "W184": 1.26159558164e-05, "W186": 1.17057432664e-05, "ZR": 0.00709003962772, } self.block.setNumberDensities(refDict) cur = self.block.getHMDens() hmDens = 0.0 for nuclide in refDict.keys(): if nucDir.isHeavyMetal(nuclide): # then nuclide is a HM hmDens += self.block.getNumberDensity(nuclide) ref = hmDens places = 6 self.assertAlmostEqual(cur, ref, places=places) def test_getFissileMassEnrich(self): fuelDims = {"Tinput": 273.0, "Thot": 273.0, "od": 0.76, "id": 0.0, "mult": 1.0} self.fuelComponent = components.Circle("fuel", "UZr", **fuelDims) self.block.add(self.fuelComponent) self.block.setHeight(100.0) self.block.clearNumberDensities() refDict = { "U235": 0.00275173784234, "U238": 0.0217358415457, "W182": 1.09115150103e-05, "W183": 5.89214392093e-06, "W184": 1.26159558164e-05, "W186": 1.17057432664e-05, "ZR": 0.00709003962772, } self.block.setNumberDensities(refDict) cur = self.block.getFissileMassEnrich() ref = self.block.getFissileMass() / self.block.getHMMass() places = 4 self.assertAlmostEqual(cur, ref, places=places) self.block.remove(self.fuelComponent) def test_getMicroSuffix(self): self.assertEqual(self.block.getMicroSuffix(), "AA") self.block.p.xsType = "Z" self.assertEqual(self.block.getMicroSuffix(), "ZA") self.block.p.xsType = "RS" self.assertEqual(self.block.getMicroSuffix(), "RS") self.block.p.envGroup = "X" self.block.p.xsType = "AB" with self.assertRaises(ValueError): self.block.getMicroSuffix() def test_getUraniumMassEnrich(self): fuel = self.block.getComponent(Flags.FUEL) fuel.setNumberDensity("U234", 1.0e-4) self.block.adjustUEnrich(0.25) ref = 0.25 self.block.adjustUEnrich(ref) cur = self.block.getUraniumMassEnrich() places = 6 self.assertAlmostEqual(cur, ref, places=places) def test_getUraniumNumEnrich(self): fuel = self.block.getComponent(Flags.FUEL) fuel.setNumberDensity("U234", 1.0e-4) self.block.adjustUEnrich(0.25) cur = self.block.getUraniumNumEnrich() u8 = self.block.getNumberDensity("U238") u5 = self.block.getNumberDensity("U235") u4 = self.block.getNumberDensity("U234") ref = u5 / (u8 + u5 + u4) self.assertAlmostEqual(cur, ref, places=6) # test the zero edge case self.block.adjustUEnrich(0) cur = self.block.getUraniumNumEnrich() self.assertEqual(cur, 0.0) self.block.setNumberDensity("U238", 0.0) cur = self.block.getUraniumNumEnrich() self.assertEqual(cur, 0.0) def test_getUraniumNumEnrichWith233(self): fuel = self.block.getComponent(Flags.FUEL) u5 = fuel.getNumberDensity("U235") fuel.setNumberDensity("U233", 0.005) self.block.adjustUEnrich(0.25) cur = self.block.getUraniumNumEnrich() u3 = self.block.getNumberDensity("U233") u5 = self.block.getNumberDensity("U235") u8 = self.block.getNumberDensity("U238") ref = (u3 + u5) / (u3 + u5 + u8) self.assertAlmostEqual(cur, ref, places=6) def test_getNumberOfAtoms(self): self.block.clearNumberDensities() refDict = { "U235": 0.00275173784234, "U238": 0.0217358415457, "W182": 1.09115150103e-05, "W183": 5.89214392093e-06, "W184": 1.26159558164e-05, "W186": 1.17057432664e-05, "ZR": 0.00709003962772, } self.block.setNumberDensities(refDict) nucName = "U238" moles = self.block.getNumberOfAtoms(nucName) / units.AVOGADROS_NUMBER # about 158 moles refMoles = refDict["U238"] * self.block.getVolume() / (units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM) self.assertAlmostEqual(moles, refMoles) def test_getPu(self): fuel = self.block.getComponent(Flags.FUEL) vFrac = fuel.getVolumeFraction() refDict = { "AM241": 2.695633500634074e-05, "U238": 0.015278429635341755, "O16": 0.04829586365251901, "U235": 0.004619446966056436, "PU239": 0.0032640382635406515, "PU238": 4.266845903720035e-06, "PU240": 0.000813669265183342, "PU241": 0.00011209296581262849, "PU242": 2.3078961257395204e-05, } fuel.setNumberDensities({nuc: v / vFrac for nuc, v in refDict.items()}) # test moles cur = self.block.getPuMoles() ndens = 0.0 for nucName in refDict.keys(): if nucName in ["PU238", "PU239", "PU240", "PU241", "PU242"]: ndens += self.block.getNumberDensity(nucName) ref = ndens / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * self.block.getVolume() * self.block.getSymmetryFactor() self.assertAlmostEqual(cur, ref, places=6) def test_adjustDensity(self): u235Dens = 0.003 u238Dens = 0.010 self.block.setNumberDensity("U235", u235Dens) self.block.setNumberDensity("U238", u238Dens) mass1 = self.block.getMass(["U235", "U238"]) densAdj = 0.9 nucList = ["U235", "U238"] massDiff = self.block.adjustDensity(densAdj, nucList, returnMass=True) mass2 = self.block.getMass(["U235", "U238"]) cur = self.block.getNumberDensity("U235") ref = densAdj * u235Dens self.assertAlmostEqual(cur, ref, places=9) cur = self.block.getNumberDensity("U238") ref = densAdj * u238Dens self.assertAlmostEqual(cur, ref, places=9) self.assertAlmostEqual(mass2 - mass1, massDiff) @patch.object(blocks.HexBlock, "getSymmetryFactor") def test_getMgFlux(self, mock_sf): # calculate Mg Flux with a Symmetry Factor of 3 mock_sf.return_value = 3 neutronFlux = 1.0 gammaFlux = 2.0 self.block.p.mgFlux = np.full(5, neutronFlux) self.block.p.mgFluxGamma = np.full(4, gammaFlux) fuel = self.block.getComponent(Flags.FUEL) blockVol = self.block.getVolume() fuelVol = fuel.getVolume() # compute volume fraction of component; need symmetry factor volFrac = fuelVol / blockVol / self.block.getSymmetryFactor() neutronFluxInt = fuel.getIntegratedMgFlux() gammaFluxInt = fuel.getIntegratedMgFlux(gamma=True) # getIntegratedMgFlux should be scaled by the component volume fraction np.testing.assert_almost_equal(neutronFluxInt, np.full(5, neutronFlux * volFrac)) np.testing.assert_almost_equal(gammaFluxInt, np.full(4, gammaFlux * volFrac)) # getMgFlux should return regular, non-integrated flux neutronMgFlux = fuel.getMgFlux() gammaMgFlux = fuel.getMgFlux(gamma=True) np.testing.assert_almost_equal(neutronMgFlux, np.full(5, neutronFlux / blockVol)) np.testing.assert_almost_equal(gammaMgFlux, np.full(4, gammaFlux / blockVol)) # calculate Mg Flux with a Symmetry Factor of 1 mock_sf.return_value = 1 self.block.p.mgFlux = np.full(5, neutronFlux) self.block.p.mgFluxGamma = np.full(4, gammaFlux) fuel = self.block.getComponent(Flags.FUEL) blockVol = self.block.getVolume() fuelVol = fuel.getVolume() volFrac = fuelVol / blockVol / self.block.getSymmetryFactor() neutronFluxInt = fuel.getIntegratedMgFlux() gammaFluxInt = fuel.getIntegratedMgFlux(gamma=True) # getIntegratedMgFlux should be scaled by the component volume fraction np.testing.assert_almost_equal(neutronFluxInt, np.full(5, neutronFlux * volFrac)) np.testing.assert_almost_equal(gammaFluxInt, np.full(4, gammaFlux * volFrac)) # getMgFlux should return regular, non-integrated flux neutronMgFlux = fuel.getMgFlux() gammaMgFlux = fuel.getMgFlux(gamma=True) np.testing.assert_almost_equal(neutronMgFlux, np.full(5, neutronFlux / blockVol)) np.testing.assert_almost_equal(gammaMgFlux, np.full(4, gammaFlux / blockVol)) @patch.object(blocks.HexBlock, "getSymmetryFactor") def test_completeInitialLoading(self, mock_sf): """Ensure that some BOL block and component params are populated properly. Notes ----- - When checking component-level BOL params, puFrac is skipped due to 1) there's no Pu in the block, and 2) getPuMoles is functionally identical to getHMMoles (just limits nuclides from heavy metal to just Pu). - getSymmetryFactor is mocked to return 3. This indicates that the block is in the center-most assembly. Providing this mock ensures that symmetry factors are tested as well (otherwise it's just a factor of 1 and it is a less robust test). """ mock_sf.return_value = 3 area = self.block.getArea() height = 2.0 self.block.setHeight(height) self.block.clearNumberDensities() self.block.setNumberDensities( { "U238": 0.018518936996911595, "ZR": 0.006040713762820692, "U235": 0.0023444806416701184, "NA23": 0.009810163826158255, } ) self.block.completeInitialLoading() sf = self.block.getSymmetryFactor() cur = self.block.p.molesHmBOL ref = self.block.getHMDens() / MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * height * area self.assertAlmostEqual(cur, ref, places=12) totalHMMass = 0.0 for c in self.block: nucs = c.getNuclides() hmNucs = [nuc for nuc in nucs if nucDir.isHeavyMetal(nuc)] hmNDens = {hmNuc: c.getNumberDensity(hmNuc) for hmNuc in hmNucs} # use sf to account for only a 1/sf portion of the component being in the block hmMass = densityTools.calculateMassDensity(hmNDens) * c.getVolume() / sf totalHMMass += hmMass if hmMass: self.assertAlmostEqual(c.p.massHmBOL, hmMass, places=12) self.assertAlmostEqual( c.p.molesHmBOL, sum(ndens for ndens in hmNDens.values()) / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * c.getVolume(), places=12, ) self.assertAlmostEqual(c.p.enrichmentBOL, c.getFissileMassEnrich(), places=12) else: self.assertEqual(c.p.massHmBOL, 0.0) self.assertEqual(c.p.molesHmBOL, 0.0) self.assertEqual(c.p.enrichmentBOL, 0.0) self.assertAlmostEqual(self.block.p.massHmBOL, totalHMMass) self.assertAlmostEqual(self.block.p.enrichmentBOL, self.block.getFissileMassEnrich(), places=12) def test_add(self): numComps = len(self.block.getComponents()) fuelDims = {"Tinput": 25.0, "Thot": 600, "od": 0.76, "id": 0.00, "mult": 127.0} newComp = components.Circle("fuel", "UZr", **fuelDims) self.block.add(newComp) self.assertEqual(numComps + 1, len(self.block.getComponents())) self.assertIn(newComp, self.block.getComponents()) self.block.remove(newComp) def test_extend(self): # generate a list of composites to extend onto this block comps = [] nunComps = 3 for i in range(nunComps): fuelDims = { "Tinput": 25.0 * i, "Thot": 600, "od": 0.76, "id": 0.00, "mult": 127.0, } comps.append(components.Circle("fuel", "UZr", **fuelDims)) # show the composites have no parents for c in comps: self.assertIsNone(c.parent) # add the composites to the block lenBlock = len(self.block) self.block.extend(comps) self.assertEqual(len(self.block), lenBlock + nunComps) # show all the composites in the block have the block as the parent for c in self.block: self.assertIs(c.parent, self.block) def test_hasComponents(self): self.assertTrue(self.block.hasComponents([Flags.FUEL, Flags.CLAD])) self.assertTrue(self.block.hasComponents(Flags.FUEL)) self.assertFalse(self.block.hasComponents([Flags.FUEL, Flags.CLAD, Flags.DUMMY])) def test_getComponentNames(self): cur = self.block.getComponentNames() ref = set( [ "annular void", "bond", "fuel", "gap1", "inner liner", "gap2", "outer liner", "gap3", "clad", "wire", "coolant", "duct", "interCoolant", ] ) self.assertEqual(cur, ref) def test_getComponents(self): cur = self.block.getComponents(Flags.FUEL) self.assertEqual(len(cur), 1) comps = self.block.getComponents(Flags.FUEL) + self.block.getComponents(Flags.CLAD) self.assertEqual(len(comps), 2) inter = self.block.getComponents(Flags.INTERCOOLANT) self.assertEqual(len(inter), 1) inter = self.block.getComponents(Flags.INTERCOOLANT, exact=True) # case insensitive self.assertEqual(inter, [self.block.getComponent(Flags.INTERCOOLANT)]) cool = self.block.getComponents(Flags.COOLANT, exact=True) self.assertEqual(len(cool), 1) def test_getComponent(self): cur = self.block.getComponent(Flags.FUEL) self.assertIsInstance(cur, components.Component) inter = self.block.getComponent(Flags.INTERCOOLANT) self.assertIsInstance(inter, components.Component) with self.assertRaises(KeyError): # this really isn't the responsibility of block, more of Flags, but until this refactor # is over... inter = self.block.getComponent(Flags.fromString("intercoolantlala"), exact=True) cool = self.block.getComponent(Flags.COOLANT, exact=True) self.assertIsInstance(cool, components.Component) def test_getComponentsOfShape(self): ref = [ "annular void", "bond", "fuel", "gap1", "inner liner", "gap2", "outer liner", "gap3", "clad", ] cur = [c.name for c in self.block.getComponentsOfShape(components.Circle)] self.assertEqual(sorted(ref), sorted(cur)) def test_getComponentsOfMaterial(self): cur = self.block.getComponentsOfMaterial(materials.UZr()) ref = self.block.getComponent(Flags.FUEL) self.assertEqual(cur[0], ref) self.assertEqual( self.block.getComponentsOfMaterial(materials.HT9()), [ self.block.getComponent(Flags.OUTER | Flags.LINER), self.block.getComponent(Flags.CLAD), self.block.getComponent(Flags.WIRE), self.block.getComponent(Flags.DUCT), ], ) # test edge case cur = self.block.getComponentsOfMaterial(None, "UZr") self.assertEqual(cur[0], ref) def test_getComponentByName(self): """Test children by name.""" self.assertIsNone(self.block.getComponentByName("not the droid you are looking for")) self.assertIsNotNone(self.block.getComponentByName("annular void")) def test_getSortedCompsInClad(self): """Test that components can be sorted within a block and returned in the correct order. For an arbitrary example: a clad component. """ expected = [ self.block.getComponentByName(c) for c in [ "annular void", "bond", "fuel", "gap1", "inner liner", "gap2", "outer liner", "gap3", ] ] clad = self.block.getComponent(Flags.CLAD) actual = self.block.getSortedComponentsInsideOfComponent(clad) self.assertListEqual(actual, expected) def test_getSortedCompsInDuct(self): """Test that components can be sorted within a block and returned in the correct order. For an arbitrary example: a duct. """ expected = [ self.block.getComponentByName(c) for c in [ "annular void", "bond", "fuel", "gap1", "inner liner", "gap2", "outer liner", "gap3", "clad", "wire", "coolant", ] ] clad = self.block.getComponent(Flags.DUCT) actual = self.block.getSortedComponentsInsideOfComponent(clad) self.assertListEqual(actual, expected) def test_getNumComponents(self): cur = self.block.getNumComponents(Flags.FUEL) ref = self.block.getDim(Flags.FUEL, "mult") self.assertEqual(cur, ref) self.assertEqual(ref, self.block.getNumComponents(Flags.CLAD)) self.assertEqual(1, self.block.getNumComponents(Flags.DUCT)) def test_getNumPins(self): """Test that we can get the number of pins from various blocks. .. test:: Retrieve the number of pins from various blocks. :id: T_ARMI_BLOCK_NPINS :tests: R_ARMI_BLOCK_NPINS """ cur = self.block.getNumPins() ref = self.block.getDim(Flags.FUEL, "mult") self.assertEqual(cur, ref) emptyBlock = blocks.HexBlock("empty") self.assertEqual(emptyBlock.getNumPins(), 0) holedRectangle = complexShapes.HoledRectangle("holedRectangle", "HT9", 100, 100, 0.5, 1.0, 1.0) holedRectangle.setType("component", flags=Flags.CONTROL) emptyBlock.add(holedRectangle) self.assertEqual(emptyBlock.getNumPins(), 0) hexagon = basicShapes.Hexagon("hexagon", "HT9", 100, 100, 1) hexagon.setType("component", flags=Flags.SHIELD) emptyBlock.add(hexagon) self.assertEqual(emptyBlock.getNumPins(), 0) pins = basicShapes.Circle("circle", "HT9", 100, 100, 1, 0, 8) pins.setType("component", flags=Flags.PLENUM) emptyBlock.add(pins) self.assertEqual(emptyBlock.getNumPins(), 8) def test_setLinPowByPin(self): numPins = self.block.getNumPins() neutronPower = [10.0 * i for i in range(numPins)] gammaPower = [1.0 * i for i in range(numPins)] totalPower = [x + y for x, y in zip(neutronPower, gammaPower)] totalPowerKey = "linPowByPin" neutronPowerKey = f"linPowByPin{NEUTRON}" gammaPowerKey = f"linPowByPin{GAMMA}" # Try setting gamma power too early and then reset with self.assertRaises(UnboundLocalError) as context: self.block.setPinPowers( gammaPower, powerKeySuffix=GAMMA, ) errorMsg = f"Neutron power has not been set yet. Cannot set total power for {self.block}." self.assertTrue(errorMsg in str(context.exception)) self.block.p[gammaPowerKey] = None # Test with no powerKeySuffix self.block.setPinPowers(neutronPower) assert_allclose(self.block.p[totalPowerKey], np.array(neutronPower)) self.assertIsNone(self.block.p[neutronPowerKey]) self.assertIsNone(self.block.p[gammaPowerKey]) # Test with neutron powers self.block.setPinPowers( neutronPower, powerKeySuffix=NEUTRON, ) assert_allclose(self.block.p[totalPowerKey], np.array(neutronPower)) assert_allclose(self.block.p[neutronPowerKey], np.array(neutronPower)) self.assertIsNone(self.block.p[gammaPowerKey]) # Test with gamma powers self.block.setPinPowers( gammaPower, powerKeySuffix=GAMMA, ) assert_allclose(self.block.p[totalPowerKey], np.array(totalPower)) assert_allclose(self.block.p[neutronPowerKey], np.array(neutronPower)) assert_allclose(self.block.p[gammaPowerKey], np.array(gammaPower)) def test_getComponentAreaFrac(self): def calcFracManually(names): tFrac = 0.0 for n in names: for c, frac in fracs: if c.getName() == n: tFrac += frac return tFrac self.block.setHeight(2.0) refList = [Flags.BOND, Flags.COOLANT] cur = self.block.getComponentAreaFrac(refList) fracs = self.block.getVolumeFractions() ref = calcFracManually(("bond", "coolant")) places = 6 self.assertAlmostEqual(cur, ref, places=places) # allow inexact for things like fuel1, fuel2 or clad vs. cladding val = self.block.getComponentAreaFrac([Flags.COOLANT, Flags.INTERCOOLANT]) ref = calcFracManually(["coolant", "interCoolant"]) refWrong = calcFracManually( ["coolant", "interCoolant", "clad"] ) # can't use 'clad' b/c ``calcFracManually`` is exact only self.assertAlmostEqual(ref, val) self.assertNotAlmostEqual(refWrong, val) def test_100_getPinPitch(self): cur = self.block.getPinPitch() ref = self.block.getDim(Flags.CLAD, "od") + self.block.getDim(Flags.WIRE, "od") places = 6 self.assertAlmostEqual(cur, ref, places=places) def test_101_getPitch(self): cur = self.block.getPitch(returnComp=True) ref = ( self.block.getDim(Flags.INTERCOOLANT, "op"), self.block.getComponent(Flags.INTERCOOLANT), ) self.assertEqual(cur, ref) newb = copy.deepcopy(self.block) p1, c1 = self.block.getPitch(returnComp=True) p2, c2 = newb.getPitch(returnComp=True) self.assertNotEqual(c1, c2) self.assertEqual(newb.getLargestComponent("op"), c2) self.assertEqual(p1, p2) def test_102_setPitch(self): pitch = 17.5 self.block.setPitch(pitch) cur = self.block.getPitch() self.assertEqual(cur, pitch) self.assertEqual(self.block.getComponent(Flags.INTERCOOLANT).getDimension("op"), pitch) def test_106_getAreaFractions(self): cur = self.block.getVolumeFractions() tot = 0.0 areas = [] for c in self.block.iterComponents(): a = c.getArea() tot += a areas.append((c, a)) fracs = {} for c, a in areas: fracs[c.getName()] = a / tot places = 6 for c, a in cur: self.assertAlmostEqual(a, fracs[c.getName()], places=places) self.assertAlmostEqual(sum(fracs.values()), sum([a for c, a in cur])) def test_expandElementalToIsotopics(self): """Tests the expand to elementals capability.""" initialN = {} initialM = {} byName = NuclideBases().byName elementals = [byName[nn] for nn in ["FE", "CR", "SI", "V", "MO"]] for elemental in elementals: initialN[elemental] = self.block.getNumberDensity(elemental.name) # homogenized initialM[elemental] = self.block.getMass(elemental.name) for elemental in elementals: self.block.expandElementalToIsotopics(elemental) newDens = 0.0 newMass = 0.0 for natNuc in elemental.getNaturalIsotopics(): newDens += self.block.getNumberDensity(natNuc.name) newMass += self.block.getMass(natNuc.name) self.assertAlmostEqual( initialN[elemental], newDens, msg="Isotopic {2} ndens does not add up to {0}. It adds to {1}".format( initialN[elemental], newDens, elemental ), ) self.assertAlmostEqual( initialM[elemental], newMass, msg="Isotopic {2} mass does not add up to {0} g. It adds to {1}".format( initialM[elemental], newMass, elemental ), ) def test_expandAllElementalsToIsotopics(self): """Tests the expand all elementals simlutaneously capability.""" initialN = {} initialM = {} byName = NuclideBases().byName elementals = [byName[nn] for nn in ["FE", "CR", "SI", "V", "MO"]] for elemental in elementals: initialN[elemental] = self.block.getNumberDensity(elemental.name) # homogenized initialM[elemental] = self.block.getMass(elemental.name) self.block.expandAllElementalsToIsotopics() for elemental in elementals: newDens = 0.0 newMass = 0.0 for natNuc in elemental.getNaturalIsotopics(): newDens += self.block.getNumberDensity(natNuc.name) newMass += self.block.getMass(natNuc.name) self.assertAlmostEqual( initialN[elemental], newDens, msg="Isotopic {2} ndens does not add up to {0}. It adds to {1}".format( initialN[elemental], newDens, elemental ), ) self.assertAlmostEqual( initialM[elemental], newMass, msg="Isotopic {2} mass does not add up to {0} g. It adds to {1}".format( initialM[elemental], newMass, elemental ), ) def test_setPitch(self): """ Checks consistency after adjusting pitch. Needed to verify fix to Issue #165. """ b = self.block moles1 = b.p.molesHmBOL b.setPitch(17.5) moles2 = b.p.molesHmBOL self.assertAlmostEqual(moles1, moles2) b.setPitch(20.0) moles3 = b.p.molesHmBOL self.assertAlmostEqual(moles2, moles3) def test_setImportantParams(self): """Confirm that important block parameters can be set and get.""" # Test ability to set and get flux applyDummyData(self.block) self.assertEqual(self.block.p.mgFlux[0], 161720716762.12997) self.assertEqual(self.block.p.mgFlux[-1], 601494405.293505) # Test ability to set and get number density fuel = self.block.getComponent(Flags.FUEL) u235_dens = fuel.getNumberDensity("U235") self.assertEqual(u235_dens, 0.003695461770836022) fuel.setNumberDensity("U235", 0.5) u235_dens = fuel.getNumberDensity("U235") self.assertEqual(u235_dens, 0.5) # TH parameter test self.assertEqual(0, self.block.p.THmassFlowRate) self.block.p.THmassFlowRate = 10 self.assertEqual(10, self.block.p.THmassFlowRate) def test_getMfp(self): """Test mean free path.""" applyDummyData(self.block) # These are unverified numbers, just the result of this calculation. mfp, mfpAbs, diffusionLength = self.block.getMfp() # no point testing these number to high accuracy. assert_allclose(3.9, mfp, rtol=0.1) assert_allclose(235.0, mfpAbs, rtol=0.1) assert_allclose(17.0, diffusionLength, rtol=0.1) def test_consistentMassDensVolCold(self): """Consistent mass density and volume betwen cold block and component.""" block = self.block expectedData = [] actualData = [] for c in block: expectedData.append(getComponentData(c)) actualData.append((c, c.density(), c.getVolume(), c.density() * c.getVolume())) for expected, actual in zip(expectedData, actualData): msg = ( "Data (component, density, volume, mass) for component {} does not match. " "Expected: {}, Actual: {}".format(expected[0], expected, actual) ) for expectedVal, actualVal in zip(expected, actual): self.assertAlmostEqual(expectedVal, actualVal, msg=msg) def test_consistentMassDensVolHot(self): """Consistent mass density and volume betwen hot block and component.""" block = self._hotBlock expectedData = [] actualData = [] for c in block: expectedData.append(getComponentData(c)) actualData.append((c, c.density(), c.getVolume(), c.density() * c.getVolume())) for expected, actual in zip(expectedData, actualData): msg = ( "Data (component, density, volume, mass) for component {} does not match. " "Expected: {}, Actual: {}".format(expected[0], expected, actual) ) for expectedVal, actualVal in zip(expected, actual): self.assertAlmostEqual(expectedVal, actualVal, msg=msg) def test_consistentAreaWithOverlappingComp(self): """ Test that negative gap areas correctly account for area overlapping upon thermal expansion. Notes ----- This test calculates a reference coolant area by subtracting the areas of the intercoolant, duct, wire wrap, and pins from the total hex block area. The area of the pins is calculated using only the outer radius of the clad. This avoids the use of negative areas as implemented in Block.getVolumeFractions. Na-23 mass will not be conserved as when duct/clad expands sodium is evacuated. See Also -------- armi.reactor.blocks.Block.getVolumeFractions """ numFE56 = self.block.getNumberOfAtoms("FE56") numU235 = self.block.getNumberOfAtoms("U235") for c in self.block: c.setTemperature(700) hasNegativeArea = any(c.getArea() < 0 for c in self.block) self.assertTrue(hasNegativeArea) self.block.getVolumeFractions() # sets coolant area self._testDimensionsAreLinked() # linked dimensions are needed for this test to work blockPitch = self.block.getPitch() self.assertAlmostEqual(blockPitch, self.block.getComponent(Flags.INTERCOOLANT).getDimension("op")) totalHexArea = blockPitch**2 * math.sqrt(3) / 2.0 clad = self.block.getComponent(Flags.CLAD) pinArea = math.pi / 4.0 * clad.getDimension("od") ** 2 * clad.getDimension("mult") ref = ( totalHexArea - self.block.getComponent(Flags.INTERCOOLANT).getArea() - self.block.getComponent(Flags.DUCT).getArea() - self.block.getComponent(Flags.WIRE).getArea() - pinArea ) self.assertAlmostEqual(totalHexArea, self.block.getArea()) self.assertAlmostEqual(ref, self.block.getComponent(Flags.COOLANT).getArea()) self.assertTrue(np.allclose(numFE56, self.block.getNumberOfAtoms("FE56"))) self.assertTrue(np.allclose(numU235, self.block.getNumberOfAtoms("U235"))) def _testDimensionsAreLinked(self): prevC = None for c in self.block.getComponentsOfShape(components.Circle): if prevC: self.assertAlmostEqual(prevC.getDimension("od"), c.getDimension("id")) prevC = c self.assertAlmostEqual( self.block.getComponent(Flags.DUCT).getDimension("op"), self.block.getComponent(Flags.INTERCOOLANT).getDimension("ip"), ) def test_pinMgFluxes(self): """Test setting/getting of pin-wise multigroup fluxes.""" self.assertIsNone(self.block.p.pinMgFluxes) self.assertIsNone(self.block.p.pinMgFluxesAdj) self.assertIsNone(self.block.p.pinMgFluxesGamma) nFlux = np.random.rand(10, 33) aFlux = np.random.random(nFlux.shape) gFlux = np.random.random(nFlux.shape) self.block.setPinMgFluxes(nFlux) assert_array_equal(self.block.p.pinMgFluxes, nFlux) self.assertIsNone(self.block.p.pinMgFluxesAdj) self.assertIsNone(self.block.p.pinMgFluxesGamma) self.block.setPinMgFluxes(aFlux, adjoint=True) assert_array_equal(self.block.p.pinMgFluxesAdj, aFlux) # Make sure we didn't modify anything else assert_array_equal(self.block.p.pinMgFluxes, nFlux) self.assertIsNone(self.block.p.pinMgFluxesGamma) self.block.setPinMgFluxes(gFlux, gamma=True) assert_array_equal(self.block.p.pinMgFluxesGamma, gFlux) assert_array_equal(self.block.p.pinMgFluxesAdj, aFlux) assert_array_equal(self.block.p.pinMgFluxes, nFlux) def test_getComponentsInLinkedOrder(self): comps = self.block.getComponentsInLinkedOrder() self.assertEqual(len(comps), len(self.block)) comps.pop(0) with self.assertRaises(RuntimeError): _ = self.block.getComponentsInLinkedOrder(comps) def test_mergeWithBlock(self): fuel1 = self.block.getComponent(Flags.FUEL) fuel1.setNumberDensity("CM246", 0.0) block2 = loadTestBlock() fuel2 = block2.getComponent(Flags.FUEL) fuel2.setNumberDensity("CM246", 0.02) self.assertEqual(self.block.getNumberDensity("CM246"), 0.0) self.block.mergeWithBlock(block2, 0.1) self.assertGreater(self.block.getNumberDensity("CM246"), 0.0) self.assertLess(self.block.getNumberDensity("CM246"), 0.02) def test_getDimensions(self): dims = self.block.getDimensions("od") self.assertIn(self.block.getComponent(Flags.FUEL).p.od, dims) def test_getPlenumPin(self): pin = self.block.getPlenumPin() self.assertIsNone(pin) b = copy.deepcopy(self.block) b.p.flags = Flags.fromString("plenum aclp") pinDims = { "Tinput": 25, "Thot": 250, "od": 1.0, "id": 0, "mult": 1, } pin = components.Circle("plenum pin", "HT9", **pinDims) pin.p.flags = Flags.fromString("gap") b.add(pin) pin = b.getPlenumPin() self.assertTrue(pin) def test_pinPitches(self): self.assertTrue(self.block.hasPinPitch()) self.assertAlmostEqual(self.block.getPinPitch(cold=True), 1.15) self.assertAlmostEqual(self.block.getPinPitch(cold=False), 1.15) def test_getReactionRates(self): block = blocks.HexBlock("HexBlock") block.setType("defaultType") comp = basicShapes.Hexagon("hexagon", "MOX", 1, 1, 1) block.add(comp) block.setHeight(1) block.p.xsType = "A" r = tests.getEmptyHexReactor() assembly = makeTestAssembly(1, 1, r=r) assembly.add(block) r.core.add(assembly) r.core.lib = isotxs.readBinary(ISOAA_PATH) block.p.mgFlux = 1 self.assertAlmostEqual( block.getReactionRates("PU239")["nG"], block.getNumberDensity("PU239") * sum(r.core.lib["PU39AA"].micros.nGamma), ) # the key is invalid, so should get back all zeros self.assertEqual( block.getReactionRates("PU39"), {"nG": 0, "nF": 0, "n2n": 0, "nA": 0, "nP": 0, "n3n": 0}, ) def test_getComponentsThatAreLinkedTo(self): c = self.block.getFirstComponent(Flags.FUEL) linked = self.block.getComponentsThatAreLinkedTo(c, "id") self.assertEqual(linked[0][1], "od") c = self.block.getFirstComponent(Flags.CLAD) linked = self.block.getComponentsThatAreLinkedTo(c, "id") self.assertEqual(linked[0][1], "od") c = self.block.getFirstComponent(Flags.DUCT) linked = self.block.getComponentsThatAreLinkedTo(c, "ip") self.assertEqual(len(linked), 0) class BlockInputHeightsTests(unittest.TestCase): def test_foundReactor(self): """Test the input height is pullable from blueprints.""" r = loadTestReactor()[1] msg = "Input height from blueprints differs. Did a blueprint get updated and not this test?" # Grab a block from an assembly, so long as we have the height assem = r.core.getFirstAssembly(Flags.IGNITER | Flags.FUEL) lowerB = assem[0] self.assertEqual( lowerB.getInputHeight(), 25, msg=msg, ) # Grab another block just for good measure midBlock = assem[2] self.assertEqual( midBlock.getInputHeight(), 25, msg=msg, ) # Top block has a different height. Make sure we don't just # return 25 all the time topBlock = assem[4] self.assertEqual(topBlock.getInputHeight(), 75, msg=msg) def test_noBlueprints(self): """Verify an error is raised if there are no blueprints.""" b = buildSimpleFuelBlock() with self.assertRaisesRegex(AttributeError, "No ancestor.*blueprints"): b.getInputHeight() class BlockEnergyDepositionConstants(unittest.TestCase): """Tests the energy deposition methods. MagicMocks xsCollections.compute*Constants() -- we're not testing those methods specifically so just make sure they're hit """ @classmethod def setUpClass(cls): cls.block = loadTestBlock() def setUp(self): self.block.core.lib = MagicMock() @patch.object(xsCollections, "computeFissionEnergyGenerationConstants") @patch.object(xsCollections, "computeCaptureEnergyGenerationConstants") def test_getTotalEnergyGenerationConstants(self, mock_capture, mock_fission): """Mock both xsCollections methods so you get complete coverage.""" _x = self.block.getTotalEnergyGenerationConstants() self.assertEqual(mock_fission.call_count, 1) self.assertEqual(mock_capture.call_count, 1) @patch.object(xsCollections, "computeFissionEnergyGenerationConstants") def test_getFissionEnergyDepositionConstants(self, mock_method): """Test RuntimeError and that it gets to the deposition constant call.""" # make sure xsCollections.compute* gets hit _x = self.block.getFissionEnergyGenerationConstants() self.assertEqual(mock_method.call_count, 1) # set core.lib to None and get RuntimeError self.block.core.lib = None with self.assertRaises(RuntimeError): # fails because this test reactor does not have a cross-section library _x = self.block.getFissionEnergyGenerationConstants() @patch.object(xsCollections, "computeCaptureEnergyGenerationConstants") def test_getCaptureEnergyGenerationConstants(self, mock_method): """Test RuntimeError and that it gets to the deposition constant call.""" # make sure xsCollections.compute* gets hit _x = self.block.getCaptureEnergyGenerationConstants() self.assertEqual(mock_method.call_count, 1) # set core.lib to None and get RuntimeError self.block.core.lib = None with self.assertRaises(RuntimeError): # fails because this test reactor does not have a cross-section library _x = self.block.getCaptureEnergyGenerationConstants() @patch.object(xsCollections, "computeNeutronEnergyDepositionConstants") def test_getNeutronEnergyDepositionConstants(self, mock_method): """Test RuntimeError and that it gets to the deposition constant call.""" # make sure xsCollections.compute* gets hit _x = self.block.getNeutronEnergyDepositionConstants() self.assertEqual(mock_method.call_count, 1) # set core.lib to None and get RuntimeError self.block.core.lib = None with self.assertRaises(RuntimeError): _x = self.block.getNeutronEnergyDepositionConstants() @patch.object(xsCollections, "computeGammaEnergyDepositionConstants") def test_getGammaEnergyDepositionConstants(self, mock_method): """Test RuntimeError and that it gets to the deposition constant call.""" # make sure xsCollections.compute* gets hit _x = self.block.getGammaEnergyDepositionConstants() self.assertEqual(mock_method.call_count, 1) # set core.lib to None and get RuntimeError self.block.core.lib = None with self.assertRaises(RuntimeError): # fails because this test reactor does not have a cross-section library _x = self.block.getGammaEnergyDepositionConstants() class TestNegativeVolume(unittest.TestCase): def test_negativeVolume(self): """Build a Block with WAY too many fuel pins & show that the derived volume is negative.""" block = blocks.HexBlock("TestHexBlock") coldTemp = 20 hotTemp = 200 fuelDims = { "Tinput": coldTemp, "Thot": hotTemp, "od": 0.84, "id": 0.6, "mult": 1000.0, # pack in too many fuels } fuel = components.Circle("fuel", "UZr", **fuelDims) coolantDims = {"Tinput": hotTemp, "Thot": hotTemp} coolant = components.DerivedShape("coolant", "Sodium", **coolantDims) interDims = { "Tinput": hotTemp, "Thot": hotTemp, "op": 17.8, "ip": 17.3, "mult": 1.0, } interSodium = components.Hexagon("interCoolant", "Sodium", **interDims) block.add(fuel) block.add(coolant) block.add(interSodium) block.setHeight(16.0) with self.assertRaises(ValueError): block.getVolumeFractions() class TestHexBlock(unittest.TestCase): def setUp(self): self.hexBlock = blocks.HexBlock("TestHexBlock") hexDims = {"Tinput": 273.0, "Thot": 273.0, "op": 70.6, "ip": 70.0, "mult": 1.0} self.hexComponent = components.Hexagon("duct", "UZr", **hexDims) self.hexBlock.add(self.hexComponent) self.hexBlock.add(components.Circle("clad", "HT9", Tinput=273.0, Thot=273.0, od=0.1, mult=169.0)) self.hexBlock.add(components.Circle("wire", "HT9", Tinput=273.0, Thot=273.0, od=0.01, mult=169.0)) self.hexBlock.add(components.DerivedShape("coolant", "Sodium", Tinput=273.0, Thot=273.0)) self.r = tests.getEmptyHexReactor() self.hexBlock.autoCreateSpatialGrids(self.r.core.spatialGrid) a = makeTestAssembly(1, 1) a.add(self.hexBlock) loc1 = self.r.core.spatialGrid[0, 1, 0] self.r.core.add(a, loc1) def test_getArea(self): """Test that we can correctly calculate the area of a hexagonal block. .. test:: Users can create blocks that have the correct hexagonal area. :id: T_ARMI_BLOCK_HEX0 :tests: R_ARMI_BLOCK_HEX """ # Test for various outer and inner pitches for HexBlocks with hex holes for op in (20.0, 20.4, 20.1234, 25.001): for ip in (0.0, 5.0001, 7.123, 10.0): # generate a block with a different outer pitch hBlock = blocks.HexBlock("TestAreaHexBlock") hexDims = { "Tinput": 273.0, "Thot": 273.0, "op": op, "ip": ip, "mult": 1.0, } hComponent = components.Hexagon("duct", "UZr", **hexDims) hBlock.add(hComponent) # verify the area of the hexagon (with a hex hole) is correct cur = hBlock.getArea() ref = math.sqrt(3) / 2.0 * op**2 ref -= math.sqrt(3) / 2.0 * ip**2 self.assertAlmostEqual(cur, ref, places=6, msg=str(op)) def test_component_type(self): """ Test that a hex block has the proper "hexagon" __name__. .. test:: Users can create blocks with a hexagonal shape. :id: T_ARMI_BLOCK_HEX1 :tests: R_ARMI_BLOCK_HEX """ pitch_comp_type = self.hexBlock.PITCH_COMPONENT_TYPE[0] self.assertEqual(pitch_comp_type.__name__, "Hexagon") def test_coords(self): """ Test that coordinates are retrievable from a block. .. test:: Coordinates of a block are queryable. :id: T_ARMI_BLOCK_POSI1 :tests: R_ARMI_BLOCK_POSI """ core = self.hexBlock.core a = self.hexBlock.parent loc1 = core.spatialGrid[0, 1, 0] a.spatialLocator = loc1 x0, y0 = self.hexBlock.coords() a.spatialLocator = core.spatialGrid[0, -1, 0] # symmetric x2, y2 = self.hexBlock.coords() a.spatialLocator = loc1 self.hexBlock.p.displacementX = 0.01 self.hexBlock.p.displacementY = 0.02 x1, y1 = self.hexBlock.coords() # make sure displacements are working self.assertAlmostEqual(x1 - x0, 1.0) self.assertAlmostEqual(y1 - y0, 2.0) # make sure location symmetry is working self.assertAlmostEqual(x0, -x2) self.assertAlmostEqual(y0, -y2) def test_getNumPins(self): self.assertEqual(self.hexBlock.getNumPins(), 169) def test_block_dims(self): """Tests that the block class can provide basic dimensionality information about itself.""" self.assertAlmostEqual(4316.582, self.hexBlock.getVolume(), 3) self.assertAlmostEqual(70.6, self.hexBlock.getPitch(), 1) self.assertAlmostEqual(4316.582, self.hexBlock.getMaxArea(), 3) self.assertEqual(70, self.hexBlock.getDuctIP()) self.assertEqual(70.6, self.hexBlock.getDuctOP()) self.assertAlmostEqual(34.273, self.hexBlock.getPinToDuctGap(), 3) self.assertEqual(0.11, self.hexBlock.getPinPitch()) self.assertAlmostEqual(300.889, self.hexBlock.getWettedPerimeter(), 3) self.assertAlmostEqual(4242.184, self.hexBlock.getFlowArea(), 3) self.assertAlmostEqual(56.395, self.hexBlock.getHydraulicDiameter(), 3) def test_symmetryFactor(self): # full hex self.hexBlock.spatialLocator = self.hexBlock.core.spatialGrid[2, 0, 0] self.hexBlock.clearCache() self.assertEqual(1.0, self.hexBlock.getSymmetryFactor()) a0 = self.hexBlock.getArea() v0 = self.hexBlock.getVolume() m0 = self.hexBlock.getMass() # 1/3 symmetric self.hexBlock.spatialLocator = self.hexBlock.core.spatialGrid[0, 0, 0] self.hexBlock.clearCache() self.assertEqual(3.0, self.hexBlock.getSymmetryFactor()) self.assertEqual(a0 / 3.0, self.hexBlock.getArea()) self.assertEqual(v0 / 3.0, self.hexBlock.getVolume()) self.assertAlmostEqual(m0 / 3.0, self.hexBlock.getMass()) def test_retainState(self): """Ensure retainState restores params and spatialGrids.""" self.hexBlock.spatialGrid = grids.HexGrid.fromPitch(1.0) self.hexBlock.setType("intercoolant") with self.hexBlock.retainState(): self.hexBlock.setType("fuel") self.hexBlock.spatialGrid.changePitch(2.0) self.assertAlmostEqual(self.hexBlock.spatialGrid.pitch, 1.0) self.assertTrue(self.hexBlock.hasFlags(Flags.INTERCOOLANT)) def test_getPinLocations(self): """Test pin locations can be obtained.""" locs = set(self.hexBlock.getPinLocations()) nPins = self.hexBlock.getNumPins() self.assertEqual(len(locs), nPins) for l in locs: self.assertIs(l.grid, self.hexBlock.spatialGrid) # Check all clad components are represented for c in self.hexBlock.getChildrenWithFlags(Flags.CLAD): if isinstance(c.spatialLocator, grids.MultiIndexLocation): for l in c.spatialLocator: locs.remove(l) else: locs.remove(c.spatialLocator) self.assertFalse( locs, msg="Some clad locations were not found but returned by getPinLocations", ) def test_getPinCoordsAndLocsAgree(self): """Ensure consistency in ordering of pin locations and coordinates.""" locs = self.hexBlock.getPinLocations() coords = self.hexBlock.getPinCoordinates() self.assertEqual(len(locs), len(coords)) for loc, coord in zip(locs, coords): convertedCoords = loc.getLocalCoordinates() np.testing.assert_array_equal(coord, convertedCoords, err_msg=f"{loc=}") def test_getPinCoords(self): blockPitch = self.hexBlock.getPitch() pinPitch = self.hexBlock.getPinPitch() nPins = self.hexBlock.getNumPins() side = hexagon.side(blockPitch) xyz = self.hexBlock.getPinCoordinates() x, y, z = xyz.T # these two pins should be side by side self.assertTrue(self.hexBlock.spatialGrid.cornersUp) self.assertAlmostEqual(y[1], y[2]) self.assertAlmostEqual(x[1], -x[2]) self.assertEqual(len(xyz), self.hexBlock.getNumPins()) # ensure all pins are within the proper bounds of a # flats-up oriented hex block self.assertLess(max(y), blockPitch / 2.0) self.assertGreater(min(y), -blockPitch / 2.0) self.assertLess(max(x), side) self.assertGreater(min(x), -side) # center pin should be at 0 mags = x * x + y * y minIndex = mags.argmin() cx = x[minIndex] cy = y[minIndex] self.assertAlmostEqual(cx, 0.0) self.assertAlmostEqual(cy, 0.0) # extreme pin should be at proper radius cornerMag = mags.max() nRings = hexagon.numRingsToHoldNumCells(nPins) - 1 self.assertAlmostEqual(math.sqrt(cornerMag), nRings * pinPitch) # all z coords equal to zero np.testing.assert_equal(z, 0) def test_getPitchHomogeneousBlock(self): """ Demonstrate how to communicate pitch on a hex block with unshaped components. Notes ----- This assumes there are 3 materials in the homogeneous block, one with half the area fraction, and 2 with 1/4 each. """ desiredPitch = 14.0 hexTotalArea = hexagon.area(desiredPitch) compArgs = {"Tinput": 273.0, "Thot": 273.0} areaFractions = [0.5, 0.25, 0.25] materials = ["HT9", "UZr", "Sodium"] # There are 2 ways to do this, the first is to pick a component to be the pitch defining # component, and given it the shape of a hexagon to define the pitch. The hexagon outer # pitch (op) is defined by the pitch of the block/assembly. The ip is defined by whatever # thickness is necessary to have the desired area fraction. The second way is shown in the # second half of this test. hexBlock = blocks.HexBlock("TestHexBlock") hexComponentArea = areaFractions[0] * hexTotalArea # Picking 1st material to use for the hex component here, but really the choice is # arbitrary. area grows quadratically with op ipNeededForCorrectArea = desiredPitch * areaFractions[0] ** 0.5 self.assertEqual(hexComponentArea, hexTotalArea - hexagon.area(ipNeededForCorrectArea)) hexArgs = {"op": desiredPitch, "ip": ipNeededForCorrectArea, "mult": 1.0} hexArgs.update(compArgs) pitchDefiningComponent = components.Hexagon("pitchComp", materials[0], **hexArgs) hexBlock.add(pitchDefiningComponent) # hex component is added, now add the rest as unshaped. for aFrac, material in zip(areaFractions[1:], materials[1:]): unshapedArgs = {"area": hexTotalArea * aFrac} unshapedArgs.update(compArgs) name = f"unshaped {material}" comp = components.UnshapedComponent(name, material, **unshapedArgs) hexBlock.add(comp) self.assertEqual(desiredPitch, hexBlock.getPitch()) self.assertAlmostEqual(hexTotalArea, hexBlock.getMaxArea()) self.assertAlmostEqual(sum(c.getArea() for c in hexBlock), hexTotalArea) # For this second way, we will simply define the 3 components as unshaped, with the desired # area fractions, and make a 4th component that is an infinitely thin hexagon with the the # desired pitch. The downside of this method is that now the block has a fourth component # with no volume. hexBlock = blocks.HexBlock("TestHexBlock") for aFrac, material in zip(areaFractions, materials): unshapedArgs = {"area": hexTotalArea * aFrac} unshapedArgs.update(compArgs) name = f"unshaped {material}" comp = components.UnshapedComponent(name, material, **unshapedArgs) hexBlock.add(comp) # We haven't set a pitch defining component this time so set it now with 0 area. pitchDefiningComponent = components.Hexagon( "pitchComp", "Void", op=desiredPitch, ip=desiredPitch, mult=1, **compArgs ) hexBlock.add(pitchDefiningComponent) self.assertEqual(desiredPitch, hexBlock.getPitch()) self.assertAlmostEqual(hexTotalArea, hexBlock.getMaxArea()) self.assertAlmostEqual(sum(c.getArea() for c in hexBlock), hexTotalArea) def test_getDuctPitch(self): ductIP = self.hexBlock.getDuctIP() self.assertAlmostEqual(70.0, ductIP) ductOP = self.hexBlock.getDuctOP() self.assertAlmostEqual(70.6, ductOP) def test_getPinCenterFlatToFlat(self): nRings = hexagon.numRingsToHoldNumCells(self.hexBlock.getNumPins()) pinPitch = self.hexBlock.getPinPitch() pinCenterCornerToCorner = 2 * (nRings - 1) * pinPitch pinCenterFlatToFlat = math.sqrt(3.0) / 2.0 * pinCenterCornerToCorner f2f = self.hexBlock.getPinCenterFlatToFlat() self.assertAlmostEqual(pinCenterFlatToFlat, f2f) def test_gridCreation(self): """Create a grid for a block, and show that it can handle components with multiplicity > 1. .. test:: Grids can handle components with multiplicity > 1. :id: T_ARMI_GRID_MULT :tests: R_ARMI_GRID_MULT """ b = self.hexBlock # The block should have a spatial grid at construction, # since it has mults = 1 or 169 from setup b.autoCreateSpatialGrids(self.r.core.spatialGrid) self.assertIsNotNone(b.spatialGrid) for c in b: if c.getDimension("mult", cold=True) == 169: # Then it's spatialLocator must be of size 169 locations = c.spatialLocator self.assertEqual(type(locations), grids.MultiIndexLocation) mult = 0 uniqueLocations = set() for loc in locations: mult = mult + 1 # test for the uniqueness of the locations (since mult > 1) if loc not in uniqueLocations: uniqueLocations.add(loc) else: self.assertTrue(False, msg="Duplicate location found!") self.assertEqual(mult, 169) def test_gridNumPinsAndLocations(self): b = blocks.HexBlock("fuel", height=10.0) fuelDims = {"Tinput": 25.0, "Thot": 600, "od": 0.76, "id": 0.00, "mult": 168.0} cladDims = {"Tinput": 25.0, "Thot": 450, "od": 0.80, "id": 0.77, "mult": 168.0} ductDims = {"Tinput": 25.0, "Thot": 400, "op": 16, "ip": 15.3, "mult": 1.0} wireDims = { "Tinput": 25.0, "Thot": 600, "od": 0.1, "id": 0.0, "axialPitch": 30.0, "helixDiameter": 0.9, "mult": 168.0, } wire = components.Helix("wire", "HT9", **wireDims) fuel = components.Circle("fuel", "UZr", **fuelDims) clad = components.Circle("clad", "HT9", **cladDims) duct = components.Hexagon("duct", "HT9", **ductDims) b.add(fuel) b.add(clad) b.add(duct) b.add(wire) with self.assertRaises(ValueError): b.autoCreateSpatialGrids(self.r.core.spatialGrid) self.assertIsNone(b.spatialGrid) def test_gridNotCreatedMultipleMultiplicities(self): wireDims = { "Tinput": 200, "Thot": 200, "od": 0.1, "id": 0.0, "axialPitch": 30.0, "helixDiameter": 1.1, "mult": 21.0, } # add a wire only some places in the block, so grid should not be created. wire = components.Helix("wire", "HT9", **wireDims) self.hexBlock.add(wire) self.hexBlock.spatialGrid = None # clear existing self.hexBlock.autoCreateSpatialGrids(self.r.core.spatialGrid) self.assertIsNone(self.hexBlock.spatialGrid) def test_assignPinIndicesToFullGrid(self): """Ensure we can assign pin indices to fuel if it occupies the entire spatial grid.""" b = blocks.HexBlock("fuel") fuel = components.Circle( "fuel", "UZr", Tinput=25.0, Thot=600.0, od=0.76, mult=169, ) b.add(fuel) clad = components.Circle( "clad", "HT9", Tinput=25.0, Thot=450.0, id=0.77, od=0.80, mult=169, ) b.add(clad) wire = components.Helix( "wire", "HT9", Tinput=25.0, Thot=600, id=0, od=0.1, axialPitch=30, helixDiameter=0.9, mult=169, ) b.add(wire) duct = components.Hexagon("duct", "HT9", Tinput=25.0, Thot=400, ip=15.3, op=16, mult=1) b.add(duct) b.autoCreateSpatialGrids(self.r.core.spatialGrid) self.assertIsNotNone(b.spatialGrid) b.assignPinIndices() self.assertIsNotNone(fuel.p.pinIndices) indices = fuel.getPinIndices() self.assertIsNotNone(indices) np.testing.assert_allclose(indices, np.arange(169, dtype=int)) def test_pinPitches(self): self.assertTrue(self.hexBlock.hasPinPitch()) self.assertAlmostEqual(self.hexBlock.getPinPitch(cold=True), 0.11) self.assertAlmostEqual(self.hexBlock.getPinPitch(cold=False), 0.11) def test_hasPinPitch(self): # A HexBlock with no components inside should return False b = blocks.HexBlock("EmptyHexBlock") self.assertFalse(b.hasPinPitch()) # A HexBlock with only a clad or a wire component, but not both, should return False b.add(components.Circle("clad", "HT9", Tinput=273.0, Thot=273.0, od=0.1, mult=169.0)) self.assertFalse(b.hasPinPitch()) # A HexBlock with a clad and a wire component should return True b.add(components.Circle("wire", "HT9", Tinput=273.0, Thot=273.0, od=0.01, mult=169.0)) self.assertTrue(b.hasPinPitch()) def test_getBlocks(self): self.assertEqual(len(self.hexBlock.getBlocks()), 1) def test_getBoronMassEnrich(self): self.assertAlmostEqual(self.hexBlock.getBoronMassEnrich(), 0.0) def test_rotationNumbers(self): self.assertEqual(self.hexBlock.getRotationNum(), 0.0) self.hexBlock.setRotationNum(1) self.assertEqual(self.hexBlock.getRotationNum(), 1.0) self.hexBlock.setRotationNum(2) self.assertEqual(self.hexBlock.getRotationNum(), 2.0) class MultiPinIndicesTests(unittest.TestCase): BP_STR = """ blocks: fuel: &fuel_block grid name: fuel grid fuel 1: &fuel_def shape: Circle # Use void material because we don't need nuclides, just components with flags material: Void od: 0.68 Tinput: 25 Thot: 600 latticeIDs: [1] flags: primary fuel clad 1: &clad_def shape: Circle material: Void id: 0.7 od: 0.71 Tinput: 600 Thot: 450 latticeIDs: [1] # Smaller pin so it gets placed earlier in the sorting fuel 2: <<: *fuel_def id: 0.6 latticeIDs: [2] flags: secondary fuel clad 2: <<: *clad_def id: 0.62 od: 0.65 latticeIDs: [2] duct: shape: Hexagon material: Void Tinput: 25 Thot: 450 ip: 15.3 op: 16 assemblies: fuel: specifier: F blocks: [*fuel_block] height: [10] axial mesh points: [1] xs types: [A] grids: fuel grid: geom: hex_corners_up symmetry: full # Kind of a convoluted map but helps test a lot of edge conditions lattice map: | - - - 1 1 1 1 - - 1 1 1 1 1 - 1 1 2 2 1 1 1 1 2 1 2 1 1 1 1 2 2 1 1 1 1 1 1 1 1 2 1 1 nuclide flags: """ @classmethod def setUpClass(cls): cs = settings.Settings() bp: blueprints.Blueprints = blueprints.Blueprints.load(cls.BP_STR) bp._prepConstruction(cs) cls._originalBlock: blocks.HexBlock = bp.blockDesigns["fuel"].construct(cs, bp, 0, 2, 10, "A", {}) def setUp(self): self.block = copy.deepcopy(self._originalBlock) self.block.assignPinIndices() self.allLocations = self.block.getPinLocations() self.fuelPins = self.block.getComponents(Flags.FUEL) def test_nonOverlappingIndices(self): """Test pin indices are complete and non-overlapping.""" foundIndices: set[int] = set() for fp in self.fuelPins: actualIndices = fp.getPinIndices() self.assertIsNotNone(actualIndices, fp) overlap = foundIndices.intersection(actualIndices) self.assertFalse(overlap, msg="Found overlapping indices on unique fuel pin") foundIndices.update(actualIndices) # Make sure we have all the indices covered for i in range(len(self.allLocations)): self.assertIn(i, foundIndices) def test_consistentPinOrdering(self): """Test values of pin indices on a component align with pin locations of that component within the block.""" for fp in self.fuelPins: locations: list[grids.IndexLocation] = list(fp.spatialLocator) indices = fp.getPinIndices() self.assertEqual(len(locations), len(indices), msg=fp) for loc, ix in zip(locations, indices): indexInBlock = self.allLocations.index(loc) self.assertEqual(ix, indexInBlock, msg=f"{loc=} in {fp}") def test_noPinIndicesForHexes(self): """Test we never get pin indices for hexagons.""" duct = self.block.getComponent(Flags.DUCT) self.assertIsNone(duct.p.pinIndices) with self.assertRaisesRegex(ValueError, "no pin indices"): duct.getPinIndices() def test_recoverCladIndicesFromFuel(self): """Show the same indices for cladding are found for fuel that it wraps.""" clad = self.block.getComponents(Flags.CLAD)[0] cladIndices = clad.getPinIndices() fuel = self.block.getComponents(Flags.FUEL)[0] fuelIndices = fuel.getPinIndices() # Show not only are they equal, we get literally the same object # through the dimension linking. This only works if the fuel pin # is not at all the lattice sites, or else they'd both be equal # equivalent to np.arange(0, N - 1) but different instances of the same data self.assertIs(cladIndices, fuelIndices) def test_locations(self): """Ensure we have locations consistent with the lattice map.""" primary: components.Circle = self.block.getComponent(Flags.PRIMARY) # Count the number of primary pins in the blueprint above nPrimary = 30 expectedPrimaryRingPos = { (1, 1), } # 12 and 18 pins in one-indexed rings three and four. # remember that range is exclusive of the stop expectedPrimaryRingPos.update((3, i) for i in range(1, 13)) expectedPrimaryRingPos.update((4, i) for i in range(1, 19)) # special pin designed to poke some edge cases # remember ARMI hex positions start at 1 in the north east corner and go counterclockwise trickyPin = (4, 11) # drop the tricky pin in the fourth ring expectedPrimaryRingPos.remove(trickyPin) self._checkPinLocationsAndIndices(primary, nPrimary, expectedPrimaryRingPos) secondary: components.Circle = self.block.getComponent(Flags.SECONDARY) nSecondary = 7 # six pins in one-indexed ring two expectedSecondaryRingPos = {(2, i) for i in range(1, 7)} expectedSecondaryRingPos.add(trickyPin) self._checkPinLocationsAndIndices(secondary, nSecondary, expectedSecondaryRingPos) def _checkPinLocationsAndIndices( self, pin: components.Circle, expectedNumPins: int, expectedRingPos: set[tuple[int, int]], ): self.assertEqual( len(expectedRingPos), expectedNumPins, msg="Expected pins and locations differ. Your test inputs are not setup correct.", ) self.assertEqual(pin.getDimension("mult"), expectedNumPins) self.assertEqual(len(pin.spatialLocator), expectedNumPins) primaryIndices = pin.getPinIndices() self.assertIsNotNone(primaryIndices) self.assertEqual(primaryIndices.size, expectedNumPins) allLocations = self.block.getPinLocations() for ix in primaryIndices: loc = allLocations[ix] ringPos = loc.getRingPos() self.assertIn(ringPos, expectedRingPos, msg=f"{ix=} : {loc=}") def test_nonFueledBlock(self): """If we have no fuel, but we have clad, we should still have pin indices.""" nonFuel = copy.deepcopy(self._originalBlock) # strip out fuel flags for c in nonFuel.iterComponents(Flags.FUEL): c.p.flags &= ~Flags.FUEL nonFuel.assignPinIndices() # Should still have what ARMI considers pins self.assertTrue(nonFuel.getPinLocations()) for c in nonFuel.iterComponents(Flags.CLAD): self.assertIsNotNone(c.getPinIndices()) def test_assignmentChangesPreviousPinIndices(self): """Show successive calls to assignPinIndices clear out previous state.""" # assign pin indices to something that maybe doesn't need it firstFuel = self.block.getFirstComponent(Flags.FUEL) firstClad = self.block.getFirstComponent(Flags.CLAD) self.assertIsNone(firstClad.p.pinIndices) self.assertIsNotNone(firstFuel.p.pinIndices) firstClad.p.pinIndices = firstFuel.p.pinIndices self.block.assignPinIndices() self.assertIsNone(firstClad.p.pinIndices) def test_fuelAndNonFuel(self): """If you have fuel and non-fuel pins in the block, all pins should have indices still.""" firstBefore = self.fuelPins[0].getPinIndices() secondBefore = self.fuelPins[1].getPinIndices() for c in self.block: c.p.pinIndices = None self.fuelPins[1].p.flags &= ~Flags.FUEL self.block.assignPinIndices() firstAfter = self.fuelPins[0].getPinIndices() assert_array_equal(firstAfter, firstBefore) secondAfter = self.fuelPins[1].getPinIndices() assert_array_equal(secondAfter, secondBefore) def test_reassignOnSort(self): """Show the pin indices are reassigned when the block is sorted.""" # Make sure we get new block-level pin locations or else this test is meaningless with patch.object(self.block, "assignPinIndices") as patchAssign: self.block.sort() newPinLocations = self.block.getPinLocations() self.assertNotEqual( newPinLocations, self.allLocations, msg="Test requires new pin locations post-sort.", ) # Make sure we called it. Other tests confirm that assignPinIndices is correct. # this makes sure we've called it where we want to call it patchAssign.assert_called_once() class TestHexBlockOrientation(unittest.TestCase): def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() def tearDown(self): self.td.__exit__(None, None, None) @staticmethod def getLocalCoordinatesBlockBounds(b: blocks.HexBlock): """Call getLocalCoordinates() for every Component in the Block and find the X/Y bounds.""" maxX = -111 minX = 999 maxY = -111 minY = 999 for comp in b: locs = comp.spatialLocator if not isinstance(locs, grids.MultiIndexLocation): locs = [locs] for loc in locs: x, y, _ = loc.getLocalCoordinates() if x > maxX: maxX = x elif x < minX: minX = x if y > maxY: maxY = y elif y < minY: minY = y return minX, maxX, minY, maxY def test_validateReactorCornersUp(self): """Validate the spatial grid for a corners up HexBlock and its children.""" # load a corners up reactor _o, r = loadTestReactor( os.path.join(TEST_ROOT, "smallestTestReactor"), inputFileName="armiRunSmallest.yaml", ) # grab a pinned fuel block, and verify it is flats up b = r.core.getFirstBlock(Flags.FUEL) self.assertTrue(r.core.spatialGrid.cornersUp) self.assertFalse(b.spatialGrid.cornersUp) self.assertNotEqual(r.core.spatialGrid.cornersUp, b.spatialGrid.cornersUp) # for a flats up block-grid, the hex centroids should stretch more in Y than X minX, maxX, minY, maxY = self.getLocalCoordinatesBlockBounds(b) ratio = (maxY - minY) / (maxX - minX) self.assertAlmostEqual(ratio, 2 / math.sqrt(3), delta=0.0001) def test_validateReactorFlatsUp(self): """Validate the spatial grid for a flats up HexBlock and its children.""" # copy the files over inDir = os.path.join(TEST_ROOT, "smallestTestReactor") for filePath in glob(os.path.join(inDir, "*.yaml")): outPath = os.path.join(self.td.destination, os.path.basename(filePath)) shutil.copyfile(filePath, outPath) # modify the reactor to make it flats up testFile = os.path.join(self.td.destination, "refSmallestReactor.yaml") txt = open(testFile, "r").read() txt = txt.replace("geom: hex_corners_up", "geom: hex") open(testFile, "w").write(txt) # load a flats up reactor _o, r = loadTestReactor(self.td.destination, inputFileName="armiRunSmallest.yaml") # grab a pinned fuel block, and verify it is corners up b = r.core.getFirstBlock(Flags.FUEL) self.assertFalse(r.core.spatialGrid.cornersUp) self.assertTrue(b.spatialGrid.cornersUp) self.assertNotEqual(r.core.spatialGrid.cornersUp, b.spatialGrid.cornersUp) # for a corners up block-grid, the hex centroids should stretch more in X than Y minX, maxX, minY, maxY = self.getLocalCoordinatesBlockBounds(b) ratio = (maxX - minX) / (maxY - minY) self.assertAlmostEqual(ratio, 2 / math.sqrt(3), delta=0.0001) class ThRZBlock_TestCase(unittest.TestCase): def setUp(self): self.ThRZBlock = blocks.ThRZBlock("TestThRZBlock") self.ThRZBlock.add( components.DifferentialRadialSegment( "fuel", "UZr", Tinput=273.0, Thot=273.0, inner_radius=0.0, radius_differential=40.0, inner_theta=0.0, azimuthal_differential=1.5 * math.pi, inner_axial=5.0, height=10.0, mult=1.0, ) ) self.ThRZBlock.add( components.DifferentialRadialSegment( "coolant", "Sodium", Tinput=273.0, Thot=273.0, inner_radius=40.0, radius_differential=10.0, inner_theta=0.0, azimuthal_differential=1.5 * math.pi, inner_axial=5.0, height=10.0, mult=1.0, ) ) self.ThRZBlock.add( components.DifferentialRadialSegment( "clad", "HT9", Tinput=273.0, Thot=273.0, inner_radius=50.0, radius_differential=7.0, inner_theta=0.0, azimuthal_differential=1.5 * math.pi, inner_axial=5.0, height=10.0, mult=1.0, ) ) self.ThRZBlock.add( components.DifferentialRadialSegment( "wire", "HT9", Tinput=273.0, Thot=273.0, inner_radius=57.0, radius_differential=3.0, inner_theta=0.0, azimuthal_differential=1.5 * math.pi, inner_axial=5.0, height=10.0, mult=1.0, ) ) # random 1/4 chunk taken out to exercise Theta-RZ block capabilities self.ThRZBlock.add( components.DifferentialRadialSegment( "chunk", "Sodium", Tinput=273.0, Thot=273.0, inner_radius=0.0, radius_differential=60.0, inner_theta=1.5 * math.pi, azimuthal_differential=0.5 * math.pi, inner_axial=5.0, height=10.0, mult=1.0, ) ) def test_radii(self): radialInner = self.ThRZBlock.radialInner() self.assertEqual(0.0, radialInner) radialOuter = self.ThRZBlock.radialOuter() self.assertEqual(60.0, radialOuter) def test_theta(self): thetaInner = self.ThRZBlock.thetaInner() self.assertEqual(0.0, thetaInner) thetaOuter = self.ThRZBlock.thetaOuter() self.assertEqual(2.0 * math.pi, thetaOuter) def test_axial(self): axialInner = self.ThRZBlock.axialInner() self.assertEqual({5.0}, axialInner) axialOuter = self.ThRZBlock.axialOuter() self.assertEqual({15.0}, axialOuter) def test_verifyBlockDims(self): with mockRunLogs.BufferLog() as mock: # we should start with a clean slate, before debug logging self.assertEqual("", mock.getStdout()) runLog.LOG.setVerbosity(logging.WARNING) runLog.LOG.startLog("test_updateComponentDims") # the verify method throws a ton of warnings or raises errors when there are problems self.ThRZBlock.verifyBlockDims() self.assertEqual("", mock.getStdout()) def test_getThetaRZGrid(self): """Since not applicable to ThetaRZ Grids.""" b = self.ThRZBlock self.assertIsNone(b.spatialGrid) b.autoCreateSpatialGrids("FakeSpatilGrid") self.assertIsNotNone(b.spatialGrid) def test_getWettedPerimeter(self): with self.assertRaises(NotImplementedError): _ = self.ThRZBlock.getWettedPerimeter() def test_getHydraulicDiameter(self): with self.assertRaises(NotImplementedError): _ = self.ThRZBlock.getHydraulicDiameter() def test_pinPitches(self): self.assertFalse(self.ThRZBlock.hasPinPitch()) with self.assertRaises(AttributeError): self.ThRZBlock.getPinPitch(cold=False) with self.assertRaises(AttributeError): self.ThRZBlock.getPinPitch(cold=True) def test_updateComponentDims(self): with mockRunLogs.BufferLog() as mock: # we should start with a clean slate, before logging self.assertEqual("", mock.getStdout()) runLog.LOG.setVerbosity(logging.WARNING) runLog.LOG.startLog("test_updateComponentDims") # if this fails, we get a warning. Here we just test the warning isn't thrown. self.ThRZBlock.updateComponentDims() self.assertEqual("", mock.getStdout()) def test_getBoronMassEnrich(self): self.assertAlmostEqual(self.ThRZBlock.getBoronMassEnrich(), 0.0) class CartesianBlockTests(unittest.TestCase): """Tests for blocks with rectangular/square outer shape.""" PITCH = 70 def setUp(self): self.cartesianBlock = blocks.CartesianBlock("TestCartesianBlock") self.cartesianComponent = components.HoledSquare( "duct", "UZr", Tinput=273.0, Thot=273.0, holeOD=68.0, widthOuter=self.PITCH, mult=1.0, ) self.cartesianBlock.add(self.cartesianComponent) self.cartesianBlock.add(components.Circle("clad", "HT9", Tinput=273.0, Thot=273.0, od=68.0, mult=169.0)) self.rCenter = getEmptyCartesianReactor(throughCenterAssembly=True) self.rBorder = getEmptyCartesianReactor(throughCenterAssembly=False) self.cartesianBlock.parent = self.rCenter.core self.cartesianBlock.autoCreateSpatialGrids(self.rCenter.core.spatialGrid) def test_getPitchSquare(self): self.assertEqual(self.cartesianBlock.getPitch(), (self.PITCH, self.PITCH)) def test_getPitchHomogeneousBlock(self): """ Demonstrate how to communicate pitch on a hex block with unshaped components. Notes ----- This assumes there are 3 materials in the homogeneous block, one with half the area fraction, and 2 with 1/4 each. """ desiredPitch = (10.0, 12.0) rectTotalArea = desiredPitch[0] * desiredPitch[1] compArgs = {"Tinput": 273.0, "Thot": 273.0} areaFractions = [0.5, 0.25, 0.25] materials = ["HT9", "UZr", "Sodium"] # There are 2 ways to do this, the first is to pick a component to be the pitch defining component, and given it # the shape of a rectangle to define the pitch. The rectangle outer dimensions is defined by the pitch of the # block/assembly. The inner dimensions is defined by whatever thickness is necessary to have the desired area # fraction. The second way is to define all physical material components as unshaped, and add an additional # infinitely thin Void component (no area) that defines pitch. See second part of # HexBlock_TestCase.test_getPitchHomogeneousBlock for demonstration. cartBlock = blocks.CartesianBlock("TestCartBlock") hexComponentArea = areaFractions[0] * rectTotalArea # Picking 1st material to use for the hex component here, but really the choice is arbitrary. # area grows quadratically with outer dimensions. # Note there are infinitely many inner dims that would preserve area, this is just one. innerDims = [dim * areaFractions[0] ** 0.5 for dim in desiredPitch] self.assertAlmostEqual(hexComponentArea, rectTotalArea - innerDims[0] * innerDims[1]) rectArgs = { "lengthOuter": desiredPitch[0], "lengthInner": innerDims[0], "widthOuter": desiredPitch[1], "widthInner": innerDims[1], "mult": 1.0, } rectArgs.update(compArgs) pitchDefiningComponent = components.Rectangle("pitchComp", materials[0], **rectArgs) cartBlock.add(pitchDefiningComponent) # Rectangle component is added, now add the rest as unshaped. for aFrac, material in zip(areaFractions[1:], materials[1:]): unshapedArgs = {"area": rectTotalArea * aFrac} unshapedArgs.update(compArgs) name = f"unshaped {material}" comp = components.UnshapedComponent(name, material, **unshapedArgs) cartBlock.add(comp) self.assertEqual(desiredPitch, cartBlock.getPitch()) self.assertAlmostEqual(rectTotalArea, cartBlock.getMaxArea()) self.assertAlmostEqual(sum(c.getArea() for c in cartBlock), rectTotalArea) def test_getCartesianGrid(self): """Since not applicable to Cartesian Grids.""" b = self.cartesianBlock self.assertIsNotNone(b.spatialGrid) b.autoCreateSpatialGrids("FakeSpatialGrid") self.assertIsInstance(b.spatialGrid, CartesianGrid) def test_getWettedPerimeter(self): with self.assertRaises(NotImplementedError): _ = self.cartesianBlock.getWettedPerimeter() def test_getHydraulicDiameter(self): with self.assertRaises(NotImplementedError): _ = self.cartesianBlock.getHydraulicDiameter() def test_pinPitches(self): self.assertTrue(self.cartesianBlock.hasPinPitch()) pinPitch = self.cartesianBlock.getPinPitch(cold=True) self.assertAlmostEqual(pinPitch[0], 10.0) self.assertAlmostEqual(pinPitch[1], 16.0) pinPitch = self.cartesianBlock.getPinPitch(cold=False) self.assertAlmostEqual(pinPitch[0], 10.0) self.assertAlmostEqual(pinPitch[1], 16.0) def test_getBoronMassEnrich(self): self.assertAlmostEqual(self.cartesianBlock.getBoronMassEnrich(), 0.0) def test_getPinCenterFlatToFlat(self): # test with isThroughCenterAssembly=True self.cartesianBlock.parent = self.rCenter.core self.assertAlmostEqual(self.cartesianBlock.getPinCenterFlatToFlat(), 226.4155471693585, delta=1e-6) # test with isThroughCenterAssembly=False self.cartesianBlock.parent = self.rBorder.core self.assertAlmostEqual(self.cartesianBlock.getPinCenterFlatToFlat(), 245.2835094334717, delta=1e-6) def test_getNumCellsGivenRings(self): """ Testing CartesianBlock.getNumCellsGivenRings, in the two different origin locations. There are some diagrams in the docstrings for Cartesian Grids and docs explaining this, but the number of cells in a ring on a Cartesian grid changes depending on if the origin is at the center of a grid cell, or at the boundary between 4 grid cells. """ # test with isThroughCenterAssembly=True self.cartesianBlock.parent = self.rCenter.core self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(1), 1) self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(2), 9) self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(3), 25) self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(4), 49) # test with isThroughCenterAssembly=False self.cartesianBlock.parent = self.rBorder.core self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(1), 4) self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(2), 16) self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(3), 36) self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(4), 64) def test_numRingsToHoldNumCells(self): """ Testing CartesianBlock.numRingsToHoldNumCells, in the two different origin locations. There are some diagrams in the docstrings for Cartesian Grids and docs explaining this, but the number of cells in a ring on a Cartesian grid changes depending on if the origin is at the center of a grid cell, or at the boundary between 4 grid cells. """ # test with isThroughCenterAssembly=True self.cartesianBlock.parent = self.rCenter.core self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(1), 1) self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(9), 2) self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(24), 3) self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(26), 4) self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(50), 5) # test with isThroughCenterAssembly=False self.cartesianBlock.parent = self.rBorder.core self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(3), 1) self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(16), 2) self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(36), 3) self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(64), 4) self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(65), 5) class MassConservationTests(unittest.TestCase): """Tests designed to verify mass conservation during thermal expansion.""" def setUp(self): self.b = buildSimpleFuelBlock() def test_heightExpansionDifferences(self): """The point of this test is to determine if the number densities stay the same with two different heights of the same block. Since we want to expand a block from cold temperatures to hot using the fuel expansion coefficient (most important neutronicall), other components are not grown correctly. This means that on the block level, axial expansion will NOT conserve mass of non-fuel components. However, the excess mass is simply added to the top of the reactor in the plenum regions (or any non fueled region). """ # Assume the default block height is 'cold' height. Now we must determine what the hot height should be based # on thermal expansion. Change the height of the block based on the different thermal expansions of the # components then see the effect on number densities. fuel = self.b.getComponent(Flags.FUEL) height = self.b.getHeight() Thot = fuel.temperatureInC Tcold = fuel.inputTemperatureInC dllHot = fuel.getProperties().linearExpansionFactor(Tc=Thot, T0=Tcold) hotFuelHeight = height * (1 + dllHot) self.b.setHeight(hotFuelHeight) hotFuelU238 = self.b.getNumberDensity("U238") hotFuelIRON = self.b.getNumberDensity("FE") # look at clad clad = self.b.getComponent(Flags.CLAD) Thot = clad.temperatureInC Tcold = clad.inputTemperatureInC dllHot = fuel.getProperties().linearExpansionFactor(Tc=Thot, T0=Tcold) hotCladHeight = height * (1 + dllHot) self.b.setHeight(hotCladHeight) hotCladU238 = self.b.getNumberDensity("U238") hotCladIRON = self.b.getNumberDensity("FE") self.assertAlmostEqual( hotFuelU238, hotCladU238, 10, "Number Density of fuel in one height ({0}) != number density of fuel at another " "height {1}. Number density conservation violated during thermal " "expansion".format(hotFuelU238, hotCladU238), ) self.assertAlmostEqual( hotFuelIRON, hotCladIRON, 10, "Number Density of clad in one height ({0}) != number density of clad at another " "height {1}. Number density conservation violated during thermal " "expansion".format(hotFuelIRON, hotCladIRON), ) def test_massFuelHeatup(self): fuel = self.b.getComponent(Flags.FUEL) massCold = fuel.getMass() fuel.setTemperature(100) massHot = fuel.getMass() self.assertAlmostEqual( massCold, massHot, 10, "Cold mass of fuel ({0}) != hot mass {1}. Mass conservation violated during thermal expansion".format( massCold, massHot ), ) def test_massCladHeatup(self): cladding = self.b.getComponent(Flags.CLAD) massCold = cladding.getMass() cladding.setTemperature(100) massHot = cladding.getMass() self.assertAlmostEqual( massCold, massHot, 10, "Cold mass of clad ({0}) != hot mass {1}. Mass conservation violated during thermal expansion".format( massCold, massHot ), ) def test_massDuctHeatup(self): duct = self.b.getComponent(Flags.DUCT) massCold = duct.getMass() duct.setTemperature(100) massHot = duct.getMass() self.assertAlmostEqual( massCold, massHot, 10, "Cold mass of duct ({0}) != hot mass {1}. Mass conservation violated during thermal expansion".format( massCold, massHot ), ) def test_massCoolHeatup(self): """Make sure mass of coolant goes down when it heats up.""" coolant = self.b.getComponent(Flags.COOLANT) massCold = coolant.getMass() coolant.setTemperature(coolant.temperatureInC + 100) massHot = coolant.getMass() self.assertGreater( massCold, massHot, "Cold mass of coolant ({0}) <= hot mass {1}. Mass conservation not violated during " "thermal expansion of coolant".format(massCold, massHot), ) def test_dimensionDuctHeatup(self): duct = self.b.getComponent(Flags.DUCT) pitchCold = duct.getDimension("op", cold=True) duct.setTemperature(100) pitchHot = duct.getDimension("op") dLL = duct.getProperties().linearExpansionFactor(100, 25) correctHot = pitchCold * (1 + dLL) self.assertAlmostEqual( correctHot, pitchHot, 10, "Theoretical pitch of duct ({0}) != hot pitch {1}. Linear expansion violated during " "heatup. \nTc={tc} Tref={tref} dLL={dLL} cold={pcold}".format( correctHot, pitchHot, tc=duct.temperatureInC, tref=duct.inputTemperatureInC, dLL=dLL, pcold=pitchCold, ), ) def test_coldMass(self): """ Verify that the cold mass is what it should be, even though the hot height is input. At the cold temperature (but with hot height), the mass should be the same as at hot temperature and hot height. """ fuel = self.b.getComponent(Flags.FUEL) # set ref (input/cold) temperature. Thot = fuel.temperatureInC Tcold = fuel.inputTemperatureInC # change temp to cold fuel.setTemperature(Tcold) massCold = fuel.getMass() fuelArea = fuel.getArea() # we are at cold temp so cold and hot area are equal self.assertAlmostEqual(fuel.getArea(cold=True), fuel.getArea()) height = self.b.getHeight() # hot height. rho = fuel.getProperties().density(Tc=Tcold) # can't use getThermalExpansionFactor since hot=cold so it would be 0 dllHot = fuel.getProperties().linearExpansionFactor(Tc=Thot, T0=Tcold) coldHeight = height / (1 + dllHot) theoreticalMass = fuelArea * coldHeight * rho self.assertAlmostEqual( massCold, theoreticalMass, 7, msg="Cold mass of fuel ({0}) != theoretical mass {1}. Check calculation of cold mass".format( massCold, theoreticalMass ), ) def test_massConsistency(self): """Verify that the sum of the component masses equals the total mass.""" tMass = 0.0 for child in self.b: tMass += child.getMass() bMass = self.b.getMass() self.assertAlmostEqual( tMass, bMass, 10, "Sum of component mass {0} != total block mass {1}. ".format(tMass, bMass), ) ================================================ FILE: armi/reactor/tests/test_components.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests functionalities of components within ARMI.""" import copy import math import random import unittest import numpy as np from numpy.testing import assert_allclose, assert_equal from armi.materials import air, alloy200 from armi.materials.material import Material from armi.reactor import components, flags from armi.reactor.blocks import Block from armi.reactor.components import ( Circle, Component, ComponentType, Cube, DerivedShape, DifferentialRadialSegment, FilletedHexagon, Helix, Hexagon, HexHoledCircle, HoledHexagon, HoledRectangle, HoledSquare, NullComponent, RadialSegment, Rectangle, SolidRectangle, Sphere, Square, Triangle, UnshapedComponent, UnshapedVolumetricComponent, materials, ) from armi.reactor.reactors import Reactor from armi.testing import loadTestReactor from armi.utils.units import getTc class MockCompositionDependentExpander(materials.Material): """Dummy material that has a composition-dependent thermal expansion coefficient.""" def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float: """ Composition-dependent linear expansion coefficient. Parameters ---------- Tk : float, optional Temperature in Kelvin. Tc : float, optional Temperature in Celsius. """ alpha = 1.0e-5 beta = 1.0e-5 * self.parent.getMassFrac("C") refTemp = 20 return (alpha + beta) * getTc(Tc=Tc, Tk=Tk) * (Tc - refTemp) class TestComponentFactory(unittest.TestCase): def getCircleVoidDict(self): return dict( shape="circle", name="gap", Tinput=25, Thot=600, od=2.1, id=0.0, mult=7, material="Void", isotopics="", ) def getCircleFuelDict(self): return dict( shape="circle", name="fuel", Tinput=25, Thot=600, od=2.1, id=0.0, mult=7, material="UZr", isotopics="", ) def test_factory(self): """Creating and verifying void and fuel components. .. test:: Example void and fuel components are initialized. :id: T_ARMI_COMP_DEF0 :tests: R_ARMI_COMP_DEF """ voidAttrs = self.getCircleVoidDict() voidComp = components.factory(voidAttrs.pop("shape"), [], voidAttrs) fuelAttrs = self.getCircleFuelDict() fuelComp = components.factory(fuelAttrs.pop("shape"), [], fuelAttrs) self.assertIsInstance(voidComp, components.Circle) self.assertIsInstance(voidComp.material, materials.Void) self.assertIsInstance(fuelComp, components.Circle) self.assertIsInstance(fuelComp.material, materials.UZr) def test_componentInitializationAndDuplication(self): """Initialize and duplicate a component, veifying the parameters. .. test:: Verify the parameters of an initialized component. :id: T_ARMI_COMP_DEF1 :tests: R_ARMI_COMP_DEF """ # populate the class/signature dict, and create a basis attrs attrs = { "name": "gap", "Tinput": 25, "Thot": 600, "material": "Void", "isotopics": "", } for i, (name, klass) in enumerate(ComponentType.TYPES.items()): # hack together a dictionary input thisAttrs = {k: 1.0 for k in set(klass.INIT_SIGNATURE).difference(attrs)} if "oR" in thisAttrs: thisAttrs["oR"] /= 20.0 if "iR" in thisAttrs: thisAttrs["iR"] /= 20.0 del thisAttrs["components"] thisAttrs.update(attrs) thisAttrs["name"] = f"banana{i}" if "modArea" in thisAttrs: thisAttrs["modArea"] = None component = components.factory(name, [], thisAttrs) duped = copy.deepcopy(component) for key, val in component.p.items(): if key in ["numberDensities", "nuclides"]: for i in range(len(val)): self.assertEqual(val[i], duped.p[key][i]) elif key not in ["area", "volume", "serialNum"]: # they get recomputed self.assertEqual( val, duped.p[key], msg=f"Key: {key}, val1: {val}, val2: {duped.p[key]}", ) def test_factoryBadShapeName(self): badDict = self.getCircleFuelDict() with self.assertRaises(ValueError): components.factory("turtle", [], badDict) class TestGeneralComponents(unittest.TestCase): """Base test for all individual component tests.""" componentCls = Component componentMaterial = "HT9" componentDims = {"Tinput": 25.0, "Thot": 25.0} def setUp(self, component=None): """ Most of the time nothing will be passed as `component` and the result will be stored in self, but you can also pass a component object as `component`, in which case the object will be returned with the `parent` attribute assigned. """ class _Parent: def getSymmetryFactor(self): return 1.0 def getHeight(self): return 1.0 def clearCache(self): pass def __iter__(self): """Act like an iterator but don't actually iterate.""" return iter(()) derivedMustUpdate = False if component is None: self.component = self.componentCls("TestComponent", self.componentMaterial, **self.componentDims) self.component.parent = _Parent() else: component.parent = _Parent() return component class TestComponentNDens(TestGeneralComponents): """Test component number density setting.""" componentCls = Circle componentDims = {"Tinput": 25.0, "Thot": 25.0, "id": 0.0, "od": 0.5} def test_setNumberDensity(self): """Test setting a single number density. .. test:: Users can set Component number density. :id: T_ARMI_COMP_NUCLIDE_FRACS0 :tests: R_ARMI_COMP_NUCLIDE_FRACS """ component = self.component self.assertAlmostEqual(component.getNumberDensity("C"), 0.000780, 6) component.setNumberDensity("C", 0.57) self.assertEqual(component.getNumberDensity("C"), 0.57) def test_setNumberDensities(self): """Test setting multiple number densities. .. test:: Users can set Component number densities. :id: T_ARMI_COMP_NUCLIDE_FRACS1 :tests: R_ARMI_COMP_NUCLIDE_FRACS """ component = self.component self.assertAlmostEqual(component.getNumberDensity("MN"), 0.000426, 6) component.setNumberDensities({"C": 1, "MN": 0.58}) self.assertEqual(component.getNumberDensity("C"), 1.0) self.assertEqual(component.getNumberDensity("MN"), 0.58) def test_setNumberDensitiesWithExpansion(self): expansionMaterial = MockCompositionDependentExpander() expansionMaterial.parent = self.component self.component.material = expansionMaterial component = self.component initialVolume = component.getVolume() component.temperatureInC = 50 self.assertAlmostEqual(component.getNumberDensity("MN"), 0.000426, 6) component.setNumberDensities({"C": 1, "MN": 0.58}) newVolume = component.getVolume() expansionFactor = initialVolume / newVolume self.assertEqual(component.getNumberDensity("C"), 1.0 * expansionFactor) self.assertEqual(component.getNumberDensity("MN"), 0.58 * expansionFactor) def test_changeNDensByFactor(self): """Test the ability to change just the component number densities.""" referenceDensity = self.component.getNumberDensities() self.component.p.detailedNDens = None self.component.p.pinNDens = None scalingFactor = random.uniform(0, 10) self.component.changeNDensByFactor(scalingFactor) for nuc, refDens in referenceDensity.items(): actual = self.component.getNumberDensity(nuc) self.assertEqual(actual, refDens * scalingFactor, msg=nuc) self.assertIsNone(self.component.p.detailedNDens) self.assertIsNone(self.component.p.pinNDens) def test_changeNDensByFactorWithExtraParams(self): """Test scaling other parameters when component number density is scaled.""" referenceDensity = self.component.getNumberDensities() refDetailedNDens = np.random.random(100) # Use copy to avoid spoiling the reference data with in-place multiplication self.component.p.detailedNDens = refDetailedNDens.copy() # Array of number densities per pin refPinDens = np.random.random(size=(50, 10)) self.component.p.pinNDens = refPinDens.copy() scalingFactor = random.uniform(0, 10) self.component.changeNDensByFactor(scalingFactor) for nuc, refDens in referenceDensity.items(): actual = self.component.getNumberDensity(nuc) self.assertEqual(actual, refDens * scalingFactor) assert_allclose(self.component.p.detailedNDens, refDetailedNDens * scalingFactor, rtol=1e-6) assert_allclose(self.component.p.pinNDens, refPinDens * scalingFactor, rtol=1e-6) class TestComponent(TestGeneralComponents): """Test the base component.""" componentCls = Component def test_initializeComponentMaterial(self): """Creating component with single material. .. test:: Components are made of one material. :id: T_ARMI_COMP_1MAT0 :tests: R_ARMI_COMP_1MAT """ expectedName = "TestComponent" actualName = self.component.getName() expectedMaterialName = "HT9" actualMaterialName = self.component.material.getName() self.assertEqual(expectedName, actualName) self.assertEqual(expectedMaterialName, actualMaterialName) def test_solid_material(self): """Determine if material is solid. .. test:: Components have material properties. :id: T_ARMI_COMP_MAT :tests: R_ARMI_COMP_MAT """ self.assertTrue(isinstance(self.component.getProperties(), Material)) self.assertTrue(hasattr(self.component.material, "density")) self.assertIn("HT9", str(self.component.getProperties())) self.component.material = air.Air() self.assertFalse(self.component.containsSolidMaterial()) self.component.material = alloy200.Alloy200() self.assertTrue(self.component.containsSolidMaterial()) self.assertTrue(isinstance(self.component.getProperties(), Material)) self.assertTrue(hasattr(self.component.material, "density")) self.assertIn("Alloy200", str(self.component.getProperties())) class TestNullComponent(TestGeneralComponents): componentCls = NullComponent def test_cmp(self): """Test null component.""" cur = self.component ref = DerivedShape("DerivedShape", "Material", 0, 0) self.assertLess(cur, ref) def test_nonzero(self): cur = bool(self.component) ref = False self.assertEqual(cur, ref) def test_getDimension(self): """Test getting empty component. .. test:: Retrieve a null dimension. :id: T_ARMI_COMP_DIMS0 :tests: R_ARMI_COMP_DIMS """ for temp in range(400, 901, 25): self.assertEqual(self.component.getDimension("", Tc=temp), 0.0) class TestUnshapedComponent(TestGeneralComponents): componentCls = UnshapedComponent componentMaterial = "HT9" componentDims = {"Tinput": 25.0, "Thot": 430.0, "area": math.pi} def test_getComponentArea(self): # a case without thermal expansion self.assertEqual(self.component.getComponentArea(cold=True), math.pi) # a case with thermal expansion self.assertEqual( self.component.getComponentArea(cold=False), math.pi * self.component.getThermalExpansionFactor(self.component.temperatureInC) ** 2, ) # Passing temperature directly self.assertEqual( self.component.getComponentArea(cold=False), self.component.getComponentArea(Tc=self.component.temperatureInC), ) # show that area expansion is consistent with the density change in the material hotDensity = self.component.density() hotArea = self.component.getArea() thermalExpansionFactor = self.component.getThermalExpansionFactor(self.component.temperatureInC) coldComponent = self.setUp( UnshapedComponent( name="coldComponent", material=self.componentMaterial, Tinput=self.component.inputTemperatureInC, Thot=self.component.inputTemperatureInC, area=math.pi, ) ) coldDensity = coldComponent.density() coldArea = coldComponent.getArea() self.assertGreater(thermalExpansionFactor, 1) # thermalExpansionFactor accounts for density being 3D while area is 2D self.assertAlmostEqual( (coldDensity * coldArea), (thermalExpansionFactor * hotDensity * hotArea), ) def test_getBoundingCircleOuterDiameter(self): # a case without thermal expansion self.assertEqual(self.component.getBoundingCircleOuterDiameter(cold=True), 2.0) # a case with thermal expansion self.assertEqual( self.component.getBoundingCircleOuterDiameter(cold=False), 2.0 * self.component.getThermalExpansionFactor(self.component.temperatureInC), ) def test_component_less_than(self): """Ensure that comparisons between components properly reference bounding circle outer diameter. .. test:: Order components by their outermost diameter :id: T_ARMI_COMP_ORDER :tests: R_ARMI_COMP_ORDER """ componentCls = UnshapedComponent componentMaterial = "HT9" smallDims = {"Tinput": 25.0, "Thot": 430.0, "area": 0.5 * math.pi} sameDims = {"Tinput": 25.0, "Thot": 430.0, "area": 1.0 * math.pi} bigDims = {"Tinput": 25.0, "Thot": 430.0, "area": 2.0 * math.pi} smallComponent = componentCls("TestComponent", componentMaterial, **smallDims) sameComponent = componentCls("TestComponent", componentMaterial, **sameDims) bigComponent = componentCls("TestComponent", componentMaterial, **bigDims) self.assertTrue(smallComponent < self.component) self.assertFalse(bigComponent < self.component) self.assertFalse(sameComponent < self.component) def test_fromComponent(self): circle = components.Circle("testCircle", "HT9", 25, 500, 1.0) unshaped = components.UnshapedComponent.fromComponent(circle) self.assertEqual(circle.getComponentArea(), unshaped.getComponentArea()) class TestShapedComponent(TestGeneralComponents): """Abstract class for all shaped components.""" def test_preserveMassDuringThermalExpansion(self): """Test that when we thermally expand any arbitrary shape, mass is conserved.""" if not self.component.THERMAL_EXPANSION_DIMS: return temperatures = [25.0, 30.0, 40.0, 60.0, 80.0, 430.0] masses = [] report = "Temperature, mass, volume, dLL\n" for ht in temperatures: self.component.setTemperature(ht) mass = self.component.getMass() masses.append(mass) report += "{:10.1f}, {:7.5e}, {:7.5e}, {:7.5e}\n".format( ht, mass, self.component.getVolume(), self.component.getThermalExpansionFactor(), ) for mass in masses: self.assertNotAlmostEqual(mass, 0.0) self.assertAlmostEqual( masses[0], mass, msg="Masses are not preserved during thermal expansion of component {} at {} C. " "Original Mass: {}, Thermally Expanded Mass: {}\n{}" "".format(self.component, ht, masses[0], mass, report), ) def test_volumeAfterClearCache(self): """ Test volume after cache has been cleared. .. test:: Clear cache after a dimensions updated. :id: T_ARMI_COMP_VOL0 :tests: R_ARMI_COMP_VOL """ c = UnshapedVolumetricComponent("testComponent", "Custom", 0, 0, volume=1) self.assertAlmostEqual(c.getVolume(), 1, 6) c.clearCache() self.assertAlmostEqual(c.getVolume(), 1, 6) def test_densityConsistent(self): """Testing the Component matches quick hand calc.""" c = self.component # no volume defined if isinstance(c, (DerivedShape, UnshapedVolumetricComponent)): return elif isinstance(c, Component): return # basic density sanity test self.assertAlmostEqual(c.density(), c.getMass() / c.getVolume()) # test 2D expanding density if c.temperatureInC == c.inputTemperatureInC: self.assertAlmostEqual(c.density(), c.material.pseudoDensity(Tc=c.temperatureInC), delta=0.001) if not c.is3D: self.assertAlmostEqual( c.getArea() * c.parent.getHeight() * c.density(), self.component.getMass(), ) def test_density(self): """Testing the Component density gets the correct 3D material density.""" class StrangeMaterial(Material): """material designed to make the test easier to understand.""" def pseduoDensity(self, Tk=None, Tc=None): return 1.0 def density(self, Tk=None, Tc=None): return 3.0 c = Sphere( name="strangeBall", material=StrangeMaterial(), Tinput=200, Thot=500, od=1, id=0, mult=1, ) # we expect to see the 3D material density here self.assertEqual(c.density(), 3.0) class TestDerivedShape(TestShapedComponent): componentCls = DerivedShape componentMaterial = "Sodium" componentDims = {"Tinput": 25.0, "Thot": 400.0, "area": 1.0} def test_getBoundingCircleOuterDiameter(self): self.assertGreater(self.component.getBoundingCircleOuterDiameter(cold=True), 0.0) def test_computeVolume(self): """Test the computeVolume method on a number of components in a block. .. test:: Compute the volume of a DerivedShape inside solid shapes. :id: T_ARMI_COMP_FLUID :tests: R_ARMI_COMP_FLUID """ from armi.reactor.tests.test_blocks import buildSimpleFuelBlock # Calculate the total volume of the block b = buildSimpleFuelBlock() totalVolume = b.getVolume() # calculate the total volume by adding up all the components c = b.getComponent(flags.Flags.COOLANT) totalByParts = 0 for co in b.getComponents(): totalByParts += co.computeVolume() self.assertAlmostEqual(totalByParts, totalVolume) # test the computeVolume method on the one DerivedShape in this block self.assertAlmostEqual(c.computeVolume(), 1386.5232044586771) class TestDerivedShapeGetArea(unittest.TestCase): def test_getAreaColdTrue(self): """Prove that the DerivedShape.getArea() works at cold=True.""" # load one-block test reactor _o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") b = r.core[0][0] # ensure there is a DerivedShape in this Block shapes = set([type(c) for c in b]) self.assertIn(Circle, shapes) self.assertIn(DerivedShape, shapes) self.assertIn(Helix, shapes) self.assertIn(Hexagon, shapes) # prove that getArea works on the block level self.assertAlmostEqual(b.getArea(cold=True), b.getArea(cold=False), delta=1e-10) # prove that getArea preserves the sum of all the areas, even if there is a DerivedShape totalAreaCold = sum([c.getArea(cold=True) for c in b]) totalAreaHot = sum([c.getArea(cold=False) for c in b]) self.assertAlmostEqual(totalAreaCold, totalAreaHot, delta=1e-10) def test_getAreaTemp(self): """Prove that the DerivedShape.getArea() works for an arbitrary temperature.""" # load one-block test reactor _o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") b = r.core[0][0] b.clearCache() # ensure there is a DerivedShape in this Block shapes = set([type(c) for c in b]) self.assertIn(Circle, shapes) self.assertIn(DerivedShape, shapes) self.assertIn(Helix, shapes) self.assertIn(Hexagon, shapes) blockArea = b.getMaxArea() compArea = sum([c.getArea(Tc=300) for c in b if not isinstance(c, DerivedShape)]) comp = [c for c in b if isinstance(c, DerivedShape)][0] self.assertAlmostEqual(blockArea - compArea, comp.getComponentArea(Tc=300)) class TestComponentSort(unittest.TestCase): def setUp(self): self.components = [] pinComp = components.Circle("pin", "UZr", Tinput=273.0, Thot=273.0, od=0.08, mult=169.0) gapComp = components.Circle("gap", "Sodium", Tinput=273.0, Thot=273.0, id=0.08, od=0.08, mult=169.0) ductComp = components.Hexagon("duct", "HT9", Tinput=273.0, Thot=273.0, op=2.6, ip=2.0, mult=1.0) cladComp = components.Circle("clad", "HT9", Tinput=273.0, Thot=273.0, id=0.08, od=0.1, mult=169.0) wireComp = components.Helix( "wire", "HT9", Tinput=273.0, Thot=273.0, axialPitch=10.0, helixDiameter=0.11, od=0.01, mult=169.0, ) self.components = [ wireComp, cladComp, ductComp, pinComp, gapComp, ] def test_sorting(self): """Test that components are sorted as expected.""" sortedComps = sorted(self.components) currentMaxOd = 0.0 for c in sortedComps: self.assertGreaterEqual(c.getBoundingCircleOuterDiameter(cold=True), currentMaxOd) currentMaxOd = c.getBoundingCircleOuterDiameter(cold=True) self.assertEqual(sortedComps[1].name, "gap") self.assertEqual(sortedComps[2].name, "clad") class TestCircle(TestShapedComponent): """Test circle shaped component.""" componentCls = Circle _id = 5.0 _od = 10 _coldTemp = 25.0 componentDims = { "Tinput": _coldTemp, "Thot": 25.0, "od": _od, "id": _id, "mult": 1.5, } def test_copy(self): circle2 = copy.copy(self.component) self.assertIsNot(circle2, self.component) self.assertAlmostEqual(circle2.getDimension("id"), self.component.getDimension("id")) self.assertAlmostEqual(circle2.getDimension("od"), self.component.getDimension("od")) self.assertAlmostEqual(circle2.getDimension("mult"), self.component.getDimension("mult")) def test_circleExpansionWorks(self): """Test that when ARMI thermally expands a circle, mass is conserved. .. test:: Calculate thermal expansion. :id: T_ARMI_COMP_EXPANSION0 :tests: R_ARMI_COMP_EXPANSION """ hotTemp = 700.0 dLL = self.component.material.linearExpansionFactor(Tc=hotTemp, T0=self._coldTemp) ref = 1.0 + dLL cur = self.component.getThermalExpansionFactor(Tc=hotTemp) self.assertAlmostEqual(cur, ref) def test_getDimension(self): """Test getting component dimension at specific temperature. .. test:: Retrieve a dimension at a temperature. :id: T_ARMI_COMP_DIMS1 :tests: R_ARMI_COMP_DIMS .. test:: Calculate thermal expansion. :id: T_ARMI_COMP_EXPANSION1 :tests: R_ARMI_COMP_EXPANSION """ for hotTemp in range(200, 400, 25): ref = self._od * self.component.getThermalExpansionFactor(Tc=hotTemp) cur = self.component.getDimension("od", Tc=hotTemp) self.assertAlmostEqual(cur, ref) def test_thermallyExpands(self): """Test that ARMI can thermally expands a circle.""" self.assertTrue(self.component.THERMAL_EXPANSION_DIMS) def test_getBoundingCircleOuterDiam(self): ref = self._od cur = self.component.getBoundingCircleOuterDiameter(cold=True) self.assertAlmostEqual(ref, cur) def test_getCircleInnerDiameter(self): cur = self.component.getCircleInnerDiameter(cold=True) self.assertAlmostEqual(self._id, cur) def test_dimensionThermallyExpands(self): expandedDims = ["od", "id", "mult"] ref = [True, True, False] for i, d in enumerate(expandedDims): cur = d in self.component.THERMAL_EXPANSION_DIMS self.assertEqual(cur, ref[i]) def test_getArea(self): """Calculate area of circle. .. test:: Calculate area of circle. :id: T_ARMI_COMP_VOL1 :tests: R_ARMI_COMP_VOL """ # show we can calculate the area once od = self.component.getDimension("od") idd = self.component.getDimension("id") mult = self.component.getDimension("mult") ref = math.pi * ((od / 2) ** 2 - (idd / 2) ** 2) * mult cur = self.component.getArea() self.assertAlmostEqual(cur, ref) # show we can clear the cache, change the temp, and correctly re-calc the area for newTemp in range(500, 690, 19): self.component.clearCache() # re-calc area self.component.temperatureInC = newTemp od = self.component.getDimension("od", Tc=newTemp) idd = self.component.getDimension("id", Tc=newTemp) ref = math.pi * ((od / 2) ** 2 - (idd / 2) ** 2) * mult cur = self.component.getArea() self.assertAlmostEqual(cur, ref) def test_compInteractionsLinkingByDims(self): """Tests linking of Components by dimensions. The component ``gap``, representing the fuel-clad gap filled with Void, is defined with dimensions that depend on the fuel outer diameter and clad inner diameter. The :py:meth:`~armi.reactor.components.component.Component.resolveLinkedDims` method links the gap dimensions appropriately when the Component is constructed, and the test shows the area of the gap is calculated correctly based on the thermally-expanded dimensions of the fuel and clad Components. .. test:: Show the dimensions of a liquid Component can be defined to depend on the solid Components that bound it. :id: T_ARMI_COMP_FLUID1 :tests: R_ARMI_COMP_FLUID """ nPins = 217 fuelDims = {"Tinput": 25.0, "Thot": 430.0, "od": 0.9, "id": 0.0, "mult": nPins} cladDims = {"Tinput": 25.0, "Thot": 430.0, "od": 1.1, "id": 1.0, "mult": nPins} fuel = Circle("fuel", "UZr", **fuelDims) clad = Circle("clad", "HT9", **cladDims) gapDims = { "Tinput": 25.0, "Thot": 430.0, "od": "clad.id", "id": "fuel.od", "mult": nPins, } gapDims["components"] = {"clad": clad, "fuel": fuel} gap = Circle("gap", "Void", **gapDims) mult = gap.getDimension("mult") od = gap.getDimension("od") idd = gap.getDimension("id") ref = mult * math.pi * ((od / 2.0) ** 2 - (idd / 2.0) ** 2) cur = gap.getArea() self.assertAlmostEqual(cur, ref) def test_badComponentName(self): """This shows that resolveLinkedDims cannot support names with periods in them.""" nPins = 12 fuelDims = {"Tinput": 25.0, "Thot": 430.0, "od": 0.9, "id": 0.0, "mult": nPins} cladDims = {"Tinput": 25.0, "Thot": 430.0, "od": 1.1, "id": 1.0, "mult": nPins} fuel = Circle("fuel", "UZr", **fuelDims) clad = Circle("clad_4.2.3", "HT9", **cladDims) gapDims = { "Tinput": 25.0, "Thot": 430.0, "od": "clad_4.2.3.id", "id": "fuel.od", "mult": nPins, } gapDims["components"] = {"clad_4.2.3": clad, "fuel": fuel} with self.assertRaises(ValueError): _gap = Circle("gap", "Void", **gapDims) def test_compInteractionsLinkingBySubt(self): """Tests linking of components by subtraction.""" nPins = 217 gapDims = {"Tinput": 25.0, "Thot": 430.0, "od": 1.0, "id": 0.9, "mult": nPins} gap = Circle("gap", "Void", **gapDims) fuelDims = { "Tinput": 25.0, "Thot": 430.0, "od": 0.9, "id": 0.0, "mult": nPins, "modArea": "gap.sub", } fuel = Circle("fuel", "UZr", components={"gap": gap}, **fuelDims) gapArea = ( gap.getDimension("mult") * math.pi * ((gap.getDimension("od") / 2.0) ** 2 - (gap.getDimension("id") / 2.0) ** 2) ) fuelArea = ( fuel.getDimension("mult") * math.pi * ((fuel.getDimension("od") / 2.0) ** 2 - (fuel.getDimension("id") / 2.0) ** 2) ) ref = fuelArea - gapArea cur = fuel.getArea() self.assertAlmostEqual(cur, ref) def test_getNumberDensities(self): """Test that demonstrates that number densities can be retrieved on from component.""" self.component.p.numberDensities = np.ones(1, dtype=np.float64) self.component.p.nuclides = np.array(["NA23"], dtype="S6") self.assertEqual(self.component.getNumberDensity("NA23"), 1.0) def test_changeNumberDensities(self): """Test that demonstrates that the number densities on a component can be modified.""" self.component.p.numberDensities = np.ones(1, dtype=np.float64) self.component.p.nuclides = np.array(["NA23"], dtype="S6") self.component.p.detailedNDens = [1.0] self.component.p.pinNDens = [1.0] self.assertEqual(self.component.getNumberDensity("NA23"), 1.0) self.component.changeNDensByFactor(3.0) self.assertEqual(self.component.getNumberDensity("NA23"), 3.0) self.assertEqual(self.component.p.detailedNDens[0], 3.0) self.assertEqual(self.component.p.pinNDens[0], 3.0) def test_fuelMass(self): nominalMass = self.component.getMass() self.component.p.flags = flags.Flags.FUEL self.assertEqual(self.component.getFuelMass(), nominalMass) self.component.p.flags = flags.Flags.MODERATOR self.assertEqual(self.component.getFuelMass(), 0.0) def test_theoreticalDensitySetter(self): """Ensure only fraction theoretical densities are supported.""" self.assertEqual(self.component.p.theoreticalDensityFrac, 1) with self.assertRaises(ValueError): self.component.p.theoreticalDensityFrac = 2.0 self.assertEqual(self.component.p.theoreticalDensityFrac, 1) self.component.p.theoreticalDensityFrac = 0.2 self.assertEqual(self.component.p.theoreticalDensityFrac, 0.2) with self.assertRaises(ValueError): self.component.p.theoreticalDensityFrac = -1.0 self.assertEqual(self.component.p.theoreticalDensityFrac, 0.2) self.component.p.theoreticalDensityFrac = 1.0 self.assertEqual(self.component.p.theoreticalDensityFrac, 1) self.component.p.theoreticalDensityFrac = 0.0 self.assertEqual(self.component.p.theoreticalDensityFrac, 0) class TestComponentExpansion(unittest.TestCase): tCold = 25 tWarm = 50 tHot = 500 coldOuterDiameter = 1.0 def test_HT9Expansion(self): self.runExpansionTests(mat="HT9", isotope="FE") def test_UZrExpansion(self): self.runExpansionTests(mat="UZr", isotope="U235") def test_B4CExpansion(self): self.runExpansionTests(mat="B4C", isotope="B10") def runExpansionTests(self, mat: str, isotope: str): self.componentMassIndependentOfInputTemp(mat) self.expansionConservationHotHeightDefined(mat, isotope) self.expansionConservationColdHeightDefined(mat) def componentMassIndependentOfInputTemp(self, mat: str): circle1 = Circle("circle", mat, self.tCold, self.tHot, self.coldOuterDiameter) # pick the input dimension to get the same hot component hotterDim = self.coldOuterDiameter * (1 + circle1.material.linearExpansionFactor(self.tCold + 200, self.tCold)) circle2 = Circle("circle", mat, self.tCold + 200, self.tHot, hotterDim) self.assertAlmostEqual(circle1.getDimension("od"), circle2.getDimension("od")) self.assertAlmostEqual(circle1.getArea(), circle2.getArea()) self.assertAlmostEqual(circle1.density(), circle2.density()) def expansionConservationHotHeightDefined(self, mat: str, isotope: str): """ Demonstrate tutorial for how to expand and relationships conserved at during expansion. Notes ----- - height taken as hot height and show how quantity is conserved with inputHeightsConsideredHot = True (the default) """ hotHeight = 1.0 circle1 = Circle("circle", mat, self.tCold, self.tWarm, self.coldOuterDiameter) circle2 = Circle("circle", mat, self.tCold, self.tHot, self.coldOuterDiameter) # mass density is proportional to Fe number density and derived from # all the number densities and atomic masses self.assertAlmostEqual( circle1.getNumberDensity(isotope) / circle2.getNumberDensity(isotope), circle1.density() / circle2.density(), ) # the colder one has more because it is the same cold outer diameter but it would be taller # at the same temperature mass1 = circle1.density() * circle1.getArea() * hotHeight mass2 = circle2.density() * circle2.getArea() * hotHeight self.assertGreater(mass1, mass2) # they are off by factor of thermal exp self.assertAlmostEqual( mass1 * circle1.getThermalExpansionFactor(), mass2 * circle2.getThermalExpansionFactor(), ) # material.pseudoDensity is the 2D density of a material # material.density is true density and not equal in this case for circle in [circle1, circle2]: # 2D density is not equal after application of coldMatAxialExpansionFactor # which happens during construction self.assertNotAlmostEqual( circle.density(), circle.material.pseudoDensity(Tc=circle.temperatureInC), ) # 2D density is off by the material thermal exp factor percent = circle.material.linearExpansionPercent(Tc=circle.temperatureInC) thermalExpansionFactorFromColdMatTemp = 1 + percent / 100 self.assertAlmostEqual( circle.density() * thermalExpansionFactorFromColdMatTemp, circle.material.pseudoDensity(Tc=circle.temperatureInC), ) self.assertAlmostEqual( circle.density(), circle.material.density(Tc=circle.temperatureInC), ) # brief 2D expansion with set temp to show mass is conserved hot height would come from # block value warmMass = circle1.density() * circle1.getArea() * hotHeight circle1.setTemperature(self.tHot) hotMass = circle1.density() * circle1.getArea() * hotHeight self.assertAlmostEqual(warmMass, hotMass) circle1.setTemperature(self.tWarm) # Change temp to circle 2 temp to show equal to circle2 and then change back to show # recoverable to original values oldArea = circle1.getArea() initialDens = circle1.density() # when block.setHeight is called (which effectively changes component height) # component.setNumberDensity is called (for solid isotopes) to adjust the number density so # that now the 2D expansion will be approximated/expanded around the hot temp which is akin # to these adjustments heightFactor = circle1.getHeightFactor(self.tHot) circle1.adjustDensityForHeightExpansion(self.tHot) # apply temp at new height circle1.setTemperature(self.tHot) # now its density is same as hot component self.assertAlmostEqual(circle1.density(), circle2.density()) # show that mass is conserved after expansion circle1NewHotHeight = hotHeight * heightFactor self.assertAlmostEqual(mass1, circle1.density() * circle1.getArea() * circle1NewHotHeight) self.assertAlmostEqual( circle1.density(), circle1.material.density(Tc=circle1.temperatureInC), ) # change back to old temp circle1.adjustDensityForHeightExpansion(self.tWarm) circle1.setTemperature(self.tWarm) # check for consistency self.assertAlmostEqual(initialDens, circle1.density()) self.assertAlmostEqual(oldArea, circle1.getArea()) self.assertAlmostEqual(mass1, circle1.density() * circle1.getArea() * hotHeight) def expansionConservationColdHeightDefined(self, mat: str): """ Demonstrate that material is conserved at during expansion. Notes ----- - height taken as cold height and show how quantity is conserved with inputHeightsConsideredHot = False """ coldHeight = 1.0 circle1 = Circle("circle", mat, self.tCold, self.tWarm, self.coldOuterDiameter) circle2 = Circle("circle", mat, self.tCold, self.tHot, self.coldOuterDiameter) # same as 1 but we will make like 2 circle1AdjustTo2 = Circle("circle", mat, self.tCold, self.tWarm, self.coldOuterDiameter) # make it hot like 2 circle1AdjustTo2.adjustDensityForHeightExpansion(self.tHot) circle1AdjustTo2.setTemperature(self.tHot) # check that its like 2 self.assertAlmostEqual(circle2.density(), circle1AdjustTo2.density()) self.assertAlmostEqual(circle2.getArea(), circle1AdjustTo2.getArea()) for circle in [circle1, circle2, circle1AdjustTo2]: self.assertAlmostEqual( circle.density(), circle.material.density(Tc=circle.temperatureInC), ) # total mass consistent between hot and cold. Hot height will be taller hotHeight = coldHeight * circle.getThermalExpansionFactor() self.assertAlmostEqual( coldHeight * circle.getArea(cold=True) * circle.material.density(Tc=circle.inputTemperatureInC), hotHeight * circle.getArea() * circle.density(), ) class TestTriangle(TestShapedComponent): """Test triangle shaped component.""" componentCls = Triangle componentDims = { "Tinput": 25.0, "Thot": 430.0, "base": 3.0, "height": 2.0, "mult": 30, } def test_getArea(self): """Calculate area of triangle. .. test:: Calculate area of triangle. :id: T_ARMI_COMP_VOL2 :tests: R_ARMI_COMP_VOL .. test:: Triangle shaped component :id: T_ARMI_COMP_SHAPES1 :tests: R_ARMI_COMP_SHAPES """ b = self.component.getDimension("base") h = self.component.getDimension("height") mult = self.component.getDimension("mult") ref = mult * 0.5 * b * h cur = self.component.getArea() self.assertAlmostEqual(cur, ref) def test_thermallyExpands(self): """Test that ARMI can thermally expands a triangle.""" self.assertTrue(self.component.THERMAL_EXPANSION_DIMS) def test_dimensionThermallyExpands(self): expandedDims = ["base", "height", "mult"] ref = [True, True, False] for i, d in enumerate(expandedDims): cur = d in self.component.THERMAL_EXPANSION_DIMS self.assertEqual(cur, ref[i]) class TestRectangle(TestShapedComponent): """Test rectangle shaped component.""" componentCls = Rectangle componentDims = { "Tinput": 25.0, "Thot": 430.0, "lengthOuter": 6.0, "lengthInner": 4.0, "widthOuter": 5.0, "widthInner": 3.0, "mult": 2, } def test_negativeArea(self): dims = { "Tinput": 25.0, "Thot": 430.0, "lengthOuter": 1.0, "lengthInner": 2.0, "widthOuter": 5.0, "widthInner": 6.0, "mult": 2, } refArea = dims["mult"] * (dims["lengthOuter"] * dims["widthOuter"] - dims["lengthInner"] * dims["widthInner"]) negativeRectangle = Rectangle("test", "Void", **dims) self.assertAlmostEqual(negativeRectangle.getArea(), refArea) with self.assertRaises(ArithmeticError): negativeRectangle = Rectangle("test", "UZr", **dims) negativeRectangle.getArea() def test_getBoundingCircleOuterDiam(self): """Get outer diameter bounding circle. .. test:: Rectangle shaped component :id: T_ARMI_COMP_SHAPES2 :tests: R_ARMI_COMP_SHAPES """ ref = math.sqrt(61.0) cur = self.component.getBoundingCircleOuterDiameter(cold=True) self.assertAlmostEqual(ref, cur) # verify the area of the rectangle is correct ref = self.componentDims["lengthOuter"] * self.componentDims["widthOuter"] ref -= self.componentDims["lengthInner"] * self.componentDims["widthInner"] ref *= self.componentDims["mult"] cur = self.component.getArea(cold=True) self.assertAlmostEqual(cur, ref) def test_getCircleInnerDiam(self): cur = self.component.getCircleInnerDiameter(cold=True) self.assertAlmostEqual(math.sqrt(25.0), cur) def test_getArea(self): """Calculate area of rectangle. .. test:: Calculate area of rectangle. :id: T_ARMI_COMP_VOL3 :tests: R_ARMI_COMP_VOL """ outerL = self.component.getDimension("lengthOuter") innerL = self.component.getDimension("lengthInner") outerW = self.component.getDimension("widthOuter") innerW = self.component.getDimension("widthInner") mult = self.component.getDimension("mult") ref = mult * (outerL * outerW - innerL * innerW) cur = self.component.getArea() self.assertAlmostEqual(cur, ref) def test_thermallyExpands(self): """Test that ARMI can thermally expands a rectangle.""" self.assertTrue(self.component.THERMAL_EXPANSION_DIMS) def test_dimensionThermallyExpands(self): expandedDims = [ "lengthInner", "lengthOuter", "widthInner", "widthOuter", "mult", ] ref = [True, True, True, True, False] for i, d in enumerate(expandedDims): cur = d in self.component.THERMAL_EXPANSION_DIMS self.assertEqual(cur, ref[i]) class TestSolidRectangle(TestShapedComponent): componentCls = SolidRectangle componentDims = { "Tinput": 25.0, "Thot": 430.0, "lengthOuter": 5.0, "widthOuter": 5.0, "mult": 1, } def test_getBoundingCircleOuterDiam(self): """Test get bounding circle of the outer diameter.""" ref = math.sqrt(50) cur = self.component.getBoundingCircleOuterDiameter(cold=True) self.assertAlmostEqual(ref, cur) def test_getArea(self): """Calculate area of solid rectangle. .. test:: Calculate area of solid rectangle. :id: T_ARMI_COMP_VOL4 :tests: R_ARMI_COMP_VOL """ outerL = self.component.getDimension("lengthOuter") outerW = self.component.getDimension("widthOuter") mult = self.component.getDimension("mult") ref = mult * (outerL * outerW) cur = self.component.getArea() self.assertAlmostEqual(cur, ref) def test_thermallyExpands(self): """Test that ARMI can thermally expands a solid rectangle.""" self.assertTrue(self.component.THERMAL_EXPANSION_DIMS) def test_dimensionThermallyExpands(self): expandedDims = ["lengthOuter", "widthOuter", "mult"] ref = [True, True, False] for i, d in enumerate(expandedDims): cur = d in self.component.THERMAL_EXPANSION_DIMS self.assertEqual(cur, ref[i]) class TestSquare(TestShapedComponent): """Test square shaped component.""" componentCls = Square componentDims = { "Tinput": 25.0, "Thot": 430.0, "widthOuter": 3.0, "widthInner": 2.0, "mult": 1, } def test_negativeArea(self): dims = { "Tinput": 25.0, "Thot": 430.0, "widthOuter": 1.0, "widthInner": 5.0, "mult": 1, } refArea = dims["mult"] * (dims["widthOuter"] * dims["widthOuter"] - dims["widthInner"] * dims["widthInner"]) negativeRectangle = Square("test", "Void", **dims) self.assertAlmostEqual(negativeRectangle.getArea(), refArea) with self.assertRaises(ArithmeticError): negativeRectangle = Square("test", "UZr", **dims) negativeRectangle.getArea() def test_getBoundingCircleOuterDiam(self): """Get bounding circle outer diameter. .. test:: Square shaped component :id: T_ARMI_COMP_SHAPES3 :tests: R_ARMI_COMP_SHAPES """ ref = math.sqrt(18.0) cur = self.component.getBoundingCircleOuterDiameter(cold=True) self.assertAlmostEqual(ref, cur) # verify the area of the circle is correct ref = self.componentDims["widthOuter"] ** 2 - self.componentDims["widthInner"] ** 2 cur = self.component.getComponentArea(cold=True) self.assertAlmostEqual(cur, ref) def test_getCircleInnerDiam(self): ref = math.sqrt(8.0) cur = self.component.getCircleInnerDiameter(cold=True) self.assertAlmostEqual(ref, cur) def test_getArea(self): """Calculate area of square. .. test:: Calculate area of square. :id: T_ARMI_COMP_VOL5 :tests: R_ARMI_COMP_VOL """ outerW = self.component.getDimension("widthOuter") innerW = self.component.getDimension("widthInner") mult = self.component.getDimension("mult") ref = mult * (outerW * outerW - innerW * innerW) cur = self.component.getArea() self.assertAlmostEqual(cur, ref) def test_thermallyExpands(self): """Test that ARMI can thermally expands a square.""" self.assertTrue(self.component.THERMAL_EXPANSION_DIMS) def test_dimensionThermallyExpands(self): expandedDims = ["widthOuter", "widthInner", "mult"] ref = [True, True, False] for i, d in enumerate(expandedDims): cur = d in self.component.THERMAL_EXPANSION_DIMS self.assertEqual(cur, ref[i]) class TestCube(TestShapedComponent): componentCls = Cube componentDims = { "Tinput": 25.0, "Thot": 430.0, "lengthOuter": 5.0, "lengthInner": 4.0, "widthOuter": 5.0, "widthInner": 3.0, "heightOuter": 20.0, "heightInner": 10.0, "mult": 2, } def test_negativeVolume(self): dims = { "Tinput": 25.0, "Thot": 430.0, "lengthOuter": 5.0, "lengthInner": 20.0, "widthOuter": 5.0, "widthInner": 30.0, "heightOuter": 20.0, "heightInner": 30.0, "mult": 2, } refVolume = dims["mult"] * ( dims["lengthOuter"] * dims["widthOuter"] * dims["heightOuter"] - dims["lengthInner"] * dims["widthInner"] * dims["heightInner"] ) negativeCube = Cube("test", "Void", **dims) self.assertAlmostEqual(negativeCube.getVolume(), refVolume) with self.assertRaises(ArithmeticError): negativeCube = Cube("test", "UZr", **dims) negativeCube.getVolume() def test_getVolume(self): """Calculate area of cube. .. test:: Calculate area of cube. :id: T_ARMI_COMP_VOL6 :tests: R_ARMI_COMP_VOL """ lengthO = self.component.getDimension("lengthOuter") widthO = self.component.getDimension("widthOuter") heightO = self.component.getDimension("heightOuter") lengthI = self.component.getDimension("lengthInner") widthI = self.component.getDimension("widthInner") heightI = self.component.getDimension("heightInner") mult = self.component.getDimension("mult") ref = mult * (lengthO * widthO * heightO - lengthI * widthI * heightI) cur = self.component.getVolume() self.assertAlmostEqual(cur, ref) def test_thermallyExpands(self): """Test that ARMI can thermally expands a cube.""" self.assertFalse(self.component.THERMAL_EXPANSION_DIMS) class TestHexagon(TestShapedComponent): """Test hexagon shaped component.""" componentCls = Hexagon componentDims = {"Tinput": 25.0, "Thot": 430.0, "op": 10.0, "ip": 5.0, "mult": 1} def test_getBoundingCircleOuterDiam(self): ref = 2.0 * 10 / math.sqrt(3) cur = self.component.getBoundingCircleOuterDiameter(cold=True) self.assertAlmostEqual(ref, cur) def test_getCircleInnerDiameter(self): ref = 2.0 * 5.0 / math.sqrt(3) cur = self.component.getCircleInnerDiameter(cold=True) self.assertAlmostEqual(ref, cur) def test_getArea(self): """Calculate area of hexagon. .. test:: Calculate area of hexagon. :id: T_ARMI_COMP_VOL7 :tests: R_ARMI_COMP_VOL """ cur = self.component.getArea() mult = self.component.getDimension("mult") op = self.component.getDimension("op") ip = self.component.getDimension("ip") ref = math.sqrt(3.0) / 2.0 * (op**2 - ip**2) * mult self.assertAlmostEqual(cur, ref) def test_thermallyExpands(self): """Test that ARMI can thermally expands a hexagon.""" self.assertTrue(self.component.THERMAL_EXPANSION_DIMS) def test_dimensionThermallyExpands(self): expandedDims = ["op", "ip", "mult"] ref = [True, True, False] for i, d in enumerate(expandedDims): cur = d in self.component.THERMAL_EXPANSION_DIMS self.assertEqual(cur, ref[i]) class TestFilletedHexagon(TestShapedComponent): """Test FilletedHexagon shaped component.""" componentCls = FilletedHexagon componentDims = { "Tinput": 25.0, "Thot": 430.0, "op": 10.0, "ip": 5.0, "mult": 1, "oR": 0.2, "iR": 0.1, } def test_getBoundingCircleOuterDiameter(self): ref = 2.0 * 10 / math.sqrt(3) cur = self.component.getBoundingCircleOuterDiameter(cold=True) self.assertAlmostEqual(ref, cur) def test_getCircleInnerDiameter(self): ref = 2.0 * 5.0 / math.sqrt(3) cur = self.component.getCircleInnerDiameter(cold=True) self.assertAlmostEqual(ref, cur) def test_getComponentArea(self): cur = self.component.getComponentArea() op = self.component.getDimension("op") ip = self.component.getDimension("ip") oR = self.component.getDimension("oR") iR = self.component.getDimension("iR") mult = self.component.getDimension("mult") ref = mult * (FilletedHexagon._area(op, oR) - FilletedHexagon._area(ip, iR)) self.assertAlmostEqual(cur, ref) def test_thermallyExpands(self): """Test that ARMI can thermally expands a Hexagon.""" self.assertTrue(self.component.THERMAL_EXPANSION_DIMS) def test_dimensionThermallyExpands(self): expandedDims = ["op", "ip", "iR", "oR", "mult"] ref = [True, True, True, True, False] for i, d in enumerate(expandedDims): cur = d in self.component.THERMAL_EXPANSION_DIMS self.assertEqual(cur, ref[i]) def test_filletedMatchesNormal(self): """Prove that if the radius of curvature is 0.0, FilletedHexagon is just a hexagon.""" for ip in np.arange(0.1, 1, 0.1): for op in np.arange(1.1, 5, 0.4): componentDims = { "Tinput": 25.0, "Thot": 430.0, "op": op, "ip": ip, "mult": 1.0, } f = FilletedHexagon("xyz", "HT9", **componentDims) h = Hexagon("xyz", "HT9", **componentDims) self.assertAlmostEqual(f.getComponentArea(), h.getComponentArea(), delta=1e-7) self.assertGreaterEqual(h.getArea(), f.getArea() - 1e-7) def test_filletedBecomesACircle(self): """Prove that as the radius of curvature becomes D/2, the shape becomes a circle.""" for op in np.arange(1.0, 5.0, 0.5): componentDims = { "Tinput": 425.0, "Thot": 425.0, "op": op, "ip": 0.0, "oR": op / 2.0, "iR": 0.0, "mult": 1.0, } f = FilletedHexagon("circleHex", "HT9", **componentDims) self.assertAlmostEqual(f.getComponentArea(), math.pi * (op / 2.0) ** 2, delta=1e-7) class TestHoledHexagon(TestShapedComponent): """Test holed hexagon shaped component.""" componentCls = HoledHexagon componentDims = { "Tinput": 25.0, "Thot": 430.0, "op": 16.5, "holeOD": 3.6, "nHoles": 7, "mult": 1.0, } def test_getBoundingCircleOuterDiameter(self): ref = 2.0 * 16.5 / math.sqrt(3) cur = self.component.getBoundingCircleOuterDiameter(cold=True) self.assertAlmostEqual(ref, cur) def test_getCircleInnerDiameter(self): ref = 0 # there are multiple holes, so the function should return 0 cur = self.component.getCircleInnerDiameter(cold=True) self.assertEqual(ref, cur) # make and test another one with just 1 hole simpleHoledHexagon = HoledHexagon( "hex", "Void", self.componentDims["Tinput"], self.componentDims["Thot"], self.componentDims["op"], self.componentDims["holeOD"], nHoles=1, ) self.assertEqual( self.componentDims["holeOD"], simpleHoledHexagon.getCircleInnerDiameter(cold=True), ) def test_getArea(self): """Calculate area of holed hexagon. .. test:: Calculate area of holed hexagon. :id: T_ARMI_COMP_VOL8 :tests: R_ARMI_COMP_VOL """ op = self.component.getDimension("op") odHole = self.component.getDimension("holeOD") nHoles = self.component.getDimension("nHoles") mult = self.component.getDimension("mult") hexarea = math.sqrt(3.0) / 2.0 * (op**2) holeArea = nHoles * math.pi * ((odHole / 2.0) ** 2) ref = mult * (hexarea - holeArea) cur = self.component.getArea() self.assertAlmostEqual(cur, ref) def test_thermallyExpands(self): """Test that ARMI can thermally expands a holed hexagon.""" self.assertTrue(self.component.THERMAL_EXPANSION_DIMS) def test_dimensionThermallyExpands(self): expandedDims = ["op", "holeOD", "mult"] ref = [True, True, False] for i, d in enumerate(expandedDims): cur = d in self.component.THERMAL_EXPANSION_DIMS self.assertEqual(cur, ref[i]) class TestHexHoledCircle(TestShapedComponent): componentCls = HexHoledCircle componentDims = { "Tinput": 25.0, "Thot": 430.0, "od": 16.5, "holeOP": 3.6, "mult": 1.0, } def test_getCircleInnerDiameter(self): simpleHexHoledCircle = HexHoledCircle( "Circle", "Void", self.componentDims["Tinput"], self.componentDims["Thot"], self.componentDims["od"], self.componentDims["holeOP"], ) self.assertEqual( self.componentDims["holeOP"], simpleHexHoledCircle.getCircleInnerDiameter(cold=True), ) def test_getArea(self): """Calculate area of hex holed circle. .. test:: Calculate area of hex holed circle. :id: T_ARMI_COMP_VOL9 :tests: R_ARMI_COMP_VOL """ od = self.component.getDimension("od") holeOP = self.component.getDimension("holeOP") mult = self.component.getDimension("mult") hexarea = math.sqrt(3.0) / 2.0 * (holeOP**2) holeArea = math.pi * ((od / 2.0) ** 2) ref = mult * (holeArea - hexarea) cur = self.component.getArea() self.assertAlmostEqual(cur, ref) def test_thermallyExpands(self): """Test that ARMI can thermally expands a holed hexagon.""" self.assertTrue(self.component.THERMAL_EXPANSION_DIMS) def test_dimensionThermallyExpands(self): expandedDims = ["od", "holeOP", "mult"] ref = [True, True, False] for i, d in enumerate(expandedDims): cur = d in self.component.THERMAL_EXPANSION_DIMS self.assertEqual(cur, ref[i]) class TestHoledRectangle(TestShapedComponent): """Tests HoledRectangle, and provides much support for HoledSquare test.""" componentCls = HoledRectangle componentDims = { "Tinput": 25.0, "Thot": 430.0, "lengthOuter": 16.0, "widthOuter": 10.0, "holeOD": 3.6, "mult": 1.0, } dimsToTestExpansion = ["lengthOuter", "widthOuter", "holeOD", "mult"] def setUp(self): TestShapedComponent.setUp(self) self.setClassDims() def setClassDims(self): # This enables subclassing testing for square self.length = self.component.getDimension("lengthOuter") self.width = self.component.getDimension("widthOuter") def test_getBoundingCircleOuterDiameter(self): # hypotenuse ref = (self.length**2 + self.width**2) ** 0.5 cur = self.component.getBoundingCircleOuterDiameter() self.assertAlmostEqual(ref, cur) def test_getCircleInnerDiameter(self): ref = self.componentDims["holeOD"] cur = self.component.getCircleInnerDiameter(cold=True) self.assertEqual(ref, cur) def test_getArea(self): """Calculate area of holed rectangle. .. test:: Calculate area of holed rectangle. :id: T_ARMI_COMP_VOL10 :tests: R_ARMI_COMP_VOL """ rectArea = self.length * self.width odHole = self.component.getDimension("holeOD") mult = self.component.getDimension("mult") holeArea = math.pi * ((odHole / 2.0) ** 2) ref = mult * (rectArea - holeArea) cur = self.component.getArea() self.assertAlmostEqual(cur, ref) def test_thermallyExpands(self): self.assertTrue(self.component.THERMAL_EXPANSION_DIMS) def test_dimensionThermallyExpands(self): ref = [True] * len(self.dimsToTestExpansion) ref[-1] = False # mult shouldn't expand for i, d in enumerate(self.dimsToTestExpansion): cur = d in self.component.THERMAL_EXPANSION_DIMS self.assertEqual(cur, ref[i]) class TestHoledSquare(TestHoledRectangle): """Test holed square shaped component.""" componentCls = HoledSquare componentDims = { "Tinput": 25.0, "Thot": 430.0, "widthOuter": 16.0, "holeOD": 3.6, "mult": 1.0, } dimsToTestExpansion = ["widthOuter", "holeOD", "mult"] def setClassDims(self): # This enables subclassing testing for square self.width = self.length = self.component.getDimension("widthOuter") def test_thermallyExpands(self): self.assertTrue(self.component.THERMAL_EXPANSION_DIMS) def test_getCircleInnerDiameter(self): ref = self.componentDims["holeOD"] cur = self.component.getCircleInnerDiameter(cold=True) self.assertEqual(ref, cur) class TestHelix(TestShapedComponent): """Test helix shaped component.""" componentCls = Helix componentDims = { "Tinput": 25.0, "Thot": 430.0, "od": 0.25, "axialPitch": 1.0, "mult": 1.5, "helixDiameter": 2.0, "id": 0.1, } def test_getBoundingCircleOuterDiameter(self): ref = 2.0 + 0.25 cur = self.component.getBoundingCircleOuterDiameter(cold=True) self.assertAlmostEqual(ref, cur) def test_getCircleInnerDiameter(self): ref = 2.0 - 0.25 cur = self.component.getCircleInnerDiameter(cold=True) self.assertAlmostEqual(ref, cur) def test_getArea(self): """Calculate area of helix. .. test:: Calculate area of helix. :id: T_ARMI_COMP_VOL11 :tests: R_ARMI_COMP_VOL """ cur = self.component.getArea() axialPitch = self.component.getDimension("axialPitch") helixDiameter = self.component.getDimension("helixDiameter") innerDiameter = self.component.getDimension("id") outerDiameter = self.component.getDimension("od") mult = self.component.getDimension("mult") c = axialPitch / (2.0 * math.pi) helixFactor = math.sqrt((helixDiameter / 2.0) ** 2 + c**2) / c ref = mult * math.pi * (outerDiameter**2 / 4.0 - innerDiameter**2 / 4.0) * helixFactor self.assertAlmostEqual(cur, ref) def test_thermallyExpands(self): self.assertTrue(self.component.THERMAL_EXPANSION_DIMS) def test_dimensionThermallyExpands(self): expandedDims = ["od", "id", "axialPitch", "helixDiameter", "mult"] ref = [True, True, True, True, False] for i, d in enumerate(expandedDims): cur = d in self.component.THERMAL_EXPANSION_DIMS self.assertEqual(cur, ref[i]) def test_validParameters(self): """Testing the Helix class performs as expected with various inputs.""" # stupid/simple inputs h = Helix("thing", "Cu", 0, 0, 1, 1, 1) self.assertEqual(h.getDimension("axialPitch"), 1) # standard case / inputs ordered well h = Helix( "what", "Cu", Tinput=25.0, Thot=425.0, id=0.1, od=0.35, mult=1.0, axialPitch=1.123, helixDiameter=1.5, ) self.assertTrue(1.123 < h.getDimension("axialPitch") < 1.15) # inputs ordered crazy h = Helix( material="Cu", id=0.1, mult=1.0, Tinput=25.0, Thot=425.0, axialPitch=1.123, name="stuff", od=0.35, helixDiameter=1.5, ) self.assertTrue(1.123 < h.getDimension("axialPitch") < 1.15) # missing helixDiameter input with self.assertRaises(TypeError): h = Helix( name="helix", material="Cu", Tinput=25.0, Thot=425.0, id=0.1, od=0.35, mult=1.0, axialPitch=1.123, ) class TestSphere(TestShapedComponent): componentCls = Sphere componentDims = {"Tinput": 25.0, "Thot": 430.0, "od": 1.0, "id": 0.0, "mult": 3} def test_getVolume(self): """Calculate area of sphere. .. test:: Calculate volume of sphere. :id: T_ARMI_COMP_VOL12 :tests: R_ARMI_COMP_VOL """ od = self.component.getDimension("od") idd = self.component.getDimension("id") mult = self.component.getDimension("mult") ref = mult * 4.0 / 3.0 * math.pi * ((od / 2.0) ** 3 - (idd / 2.0) ** 3) cur = self.component.getVolume() self.assertAlmostEqual(cur, ref) def test_thermallyExpands(self): self.assertFalse(self.component.THERMAL_EXPANSION_DIMS) class TestRadialSegment(TestShapedComponent): componentCls = RadialSegment componentDims = { "Tinput": 25.0, "Thot": 430.0, "inner_radius": 110, "outer_radius": 170, "height": 160, "mult": 1, } def test_getVolume(self): mult = self.component.getDimension("mult") outerRad = self.component.getDimension("outer_radius") innerRad = self.component.getDimension("inner_radius") outerTheta = self.component.getDimension("outer_theta") innerTheta = self.component.getDimension("inner_theta") height = self.component.getDimension("height") radialArea = math.pi * (outerRad**2 - innerRad**2) aziFraction = (outerTheta - innerTheta) / (math.pi * 2.0) ref = mult * radialArea * aziFraction * height cur = self.component.getVolume() self.assertAlmostEqual(cur, ref) def test_thermallyExpands(self): self.assertFalse(self.component.THERMAL_EXPANSION_DIMS) def test_getBoundingCircleOuterDiameter(self): self.assertEqual(self.component.getBoundingCircleOuterDiameter(cold=True), 340.0) class TestDifferentialRadialSegment(TestShapedComponent): componentCls = DifferentialRadialSegment componentDims = { "Tinput": 25.0, "Thot": 430.0, "inner_radius": 110, "radius_differential": 60, "inner_axial": 60, "height": 160, } def test_getVolume(self): mult = self.component.getDimension("mult") outerRad = self.component.getDimension("outer_radius") innerRad = self.component.getDimension("inner_radius") outerTheta = self.component.getDimension("outer_theta") innerTheta = self.component.getDimension("inner_theta") height = self.component.getDimension("height") radialArea = math.pi * (outerRad**2 - innerRad**2) aziFraction = (outerTheta - innerTheta) / (math.pi * 2.0) ref = mult * radialArea * aziFraction * height cur = self.component.getVolume() self.assertAlmostEqual(cur, ref) def test_updateDims(self): """ Test Update dimensions. .. test:: Dimensions can be updated. :id: T_ARMI_COMP_VOL13 :tests: R_ARMI_COMP_VOL """ self.assertEqual(self.component.getDimension("inner_radius"), 110) self.assertEqual(self.component.getDimension("radius_differential"), 60) self.component.updateDims() self.assertEqual(self.component.getDimension("outer_radius"), 170) self.assertEqual(self.component.getDimension("outer_axial"), 220) self.assertEqual(self.component.getDimension("outer_theta"), 2 * math.pi) def test_thermallyExpands(self): self.assertFalse(self.component.THERMAL_EXPANSION_DIMS) def test_getBoundingCircleOuterDiameter(self): self.assertEqual(self.component.getBoundingCircleOuterDiameter(cold=True), 340) class TestMaterialAdjustments(unittest.TestCase): """Tests to make sure enrichment and mass fractions can be adjusted properly.""" def setUp(self): dims = {"Tinput": 25.0, "Thot": 600.0, "od": 10.0, "id": 5.0, "mult": 1.0} self.fuel = Circle("fuel", "UZr", **dims) class FakeBlock: reactor = Reactor("testMatReactor", None) def getHeight(self): # unit height return 1.0 def getSymmetryFactor(self): return 1.0 def getAncestor(self, fn): return self.reactor self.fuel.parent = FakeBlock() def test_setMassFrac(self): """Make sure we can set a mass fraction properly.""" target35 = 0.2 self.fuel.setMassFrac("U235", target35) self.assertAlmostEqual(self.fuel.getMassFrac("U235"), target35) def test_setMassFracOnComponentMaterial(self): """Checks for valid and invalid mass fraction assignments on a component's material.""" # Negative value is not acceptable. with self.assertRaises(ValueError): self.fuel.material.setMassFrac("U235", -0.1) # Greater than 1.0 value is not acceptable. with self.assertRaises(ValueError): self.fuel.material.setMassFrac("U235", 1.1) # String is not acceptable. with self.assertRaises(TypeError): self.fuel.material.setMassFrac("U235", "") # `NoneType` is not acceptable. with self.assertRaises(TypeError): self.fuel.material.setMassFrac("U235", None) # Zero is acceptable self.fuel.material.setMassFrac("U235", 0.0) self.assertAlmostEqual(self.fuel.material.getMassFrac("U235"), 0.0) # One is acceptable self.fuel.material.setMassFrac("U235", 1.0) self.assertAlmostEqual(self.fuel.material.getMassFrac("U235"), 1.0) def test_adjustMassFrac_invalid(self): with self.assertRaises(ValueError): self.fuel.adjustMassFrac(nuclideToAdjust="ZR", val=-0.23) with self.assertRaises(ValueError): self.fuel.adjustMassFrac(nuclideToAdjust="ZR", val=1.12) alwaysFalse = lambda a: False self.fuel.parent = None self.assertIsNone(self.fuel.getAncestorAndDistance(alwaysFalse)) def test_adjustMassFrac_U235(self): zrMass = self.fuel.getMass("ZR") uMass = self.fuel.getMass("U") zrFrac = zrMass / (uMass + zrMass) enrichmentFrac = 0.3 u235Frac = enrichmentFrac * uMass / (uMass + zrMass) u238Frac = (1.0 - enrichmentFrac) * uMass / (uMass + zrMass) self.fuel.adjustMassFrac(nuclideToAdjust="U235", elementToHoldConstant="ZR", val=u235Frac) self.assertAlmostEqual(self.fuel.getMassFrac("U235"), u235Frac) self.assertAlmostEqual(self.fuel.getMassFrac("U238"), u238Frac) self.assertAlmostEqual(self.fuel.getMassFrac("ZR"), zrFrac) def test_adjustMassFrac_U(self): self.fuel.adjustMassFrac(elementToAdjust="U", val=0.7) uFrac = self.fuel.getMassFrac("U") u235Enrichment = 0.1 u238Frac = (1.0 - u235Enrichment) * uFrac u235Frac = u235Enrichment * uFrac self.assertAlmostEqual(self.fuel.getMassFrac("U235"), u235Frac) self.assertAlmostEqual(self.fuel.getMassFrac("U238"), u238Frac) self.assertAlmostEqual(self.fuel.getMassFrac("ZR"), 0.30) def test_adjustMassFrac_clear_ZR(self): self.fuel.adjustMassFrac(nuclideToAdjust="ZR", val=0.0) self.assertAlmostEqual(self.fuel.getMassFrac("ZR"), 0.0) self.assertAlmostEqual(self.fuel.getNumberDensity("ZR"), 0.0) self.assertAlmostEqual(self.fuel.getMassFrac("U235") + self.fuel.getMassFrac("U238"), 1.0) def test_adjustMassFrac_set_ZR(self): u235Enrichment = 0.1 zrFrac = 0.1 uFrac = 1.0 - zrFrac u238Frac = (1.0 - u235Enrichment) * uFrac u235Frac = u235Enrichment * uFrac self.fuel.adjustMassFrac(nuclideToAdjust="ZR", val=zrFrac) self.assertAlmostEqual(self.fuel.getMassFrac("U235"), u235Frac) self.assertAlmostEqual(self.fuel.getMassFrac("U238"), u238Frac) self.assertAlmostEqual(self.fuel.getMassFrac("ZR"), zrFrac) def test_adjustMassFrac_leave_same(self): zrFrac = 0.1 u238Enrichment = 0.9 uFrac = 1.0 - zrFrac u238Frac = uFrac * u238Enrichment self.fuel.adjustMassFrac(nuclideToAdjust="ZR", val=zrFrac) self.assertAlmostEqual(self.fuel.getMassFrac("U238"), u238Frac) self.assertAlmostEqual(self.fuel.getMassFrac("ZR"), zrFrac) def test_adjustMassEnrichment(self): self.fuel.adjustMassEnrichment(0.2) self.assertAlmostEqual(self.fuel.getMassFrac("U235"), 0.18) self.assertAlmostEqual(self.fuel.getMassFrac("U238"), 0.72) self.assertAlmostEqual(self.fuel.getMassFrac("ZR"), 0.1) def test_getEnrichment(self): self.fuel.adjustMassEnrichment(0.3) self.assertAlmostEqual(self.fuel.getEnrichment(), 0.3) def test_finalizeLoadDBAdjustsTD(self): """Ensure component is fully loaded through finalize methods.""" tdFrac = 0.54321 comp = self.fuel comp.p.theoreticalDensityFrac = tdFrac comp.finalizeLoadingFromDB() self.assertEqual(comp.material.getTD(), tdFrac) class TestPinQuantities(unittest.TestCase): """Test methods that involve retrieval of pin quantities.""" def setUp(self): self.r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")[1] def test_getPinMgFluxes(self): """Test proper retrieval of pin multigroup flux for fuel component.""" # Get a fuel block and its fuel component from the core fuelBlock: Block = self.r.core.getFirstBlock(flags.Flags.FUEL) fuelComponent: Component = fuelBlock.getComponent(flags.Flags.FUEL) numPins = int(fuelComponent.p.mult) self.assertEqual(numPins, 169) # Set pin fluxes at block level fuelBlock.assignPinIndices() pinMgFluxes = np.random.rand(numPins, 33) pinMgFluxesAdj = np.random.rand(numPins, 33) pinMgFluxesGamma = np.random.rand(numPins, 33) fuelBlock.setPinMgFluxes(pinMgFluxes) fuelBlock.setPinMgFluxes(pinMgFluxesAdj, adjoint=True) fuelBlock.setPinMgFluxes(pinMgFluxesGamma, gamma=True) # Retrieve from component to ensure they match simPinMgFluxes = fuelComponent.getPinMgFluxes() simPinMgFluxesAdj = fuelComponent.getPinMgFluxes(adjoint=True) simPinMgFluxesGamma = fuelComponent.getPinMgFluxes(gamma=True) assert_equal(pinMgFluxes, simPinMgFluxes) assert_equal(pinMgFluxesAdj, simPinMgFluxesAdj) assert_equal(pinMgFluxesGamma, simPinMgFluxesGamma) # Check assertion for adjoint gamma flux with self.assertRaisesRegex(ValueError, "Adjoint gamma flux is currently unsupported."): fuelComponent.getPinMgFluxes(adjoint=True, gamma=True) # Check assertion for not-found parameter fuelBlock.p.pinMgFluxes = None with self.assertRaisesRegex( ValueError, f"Failure getting pinMgFluxes from {fuelComponent} via parent {fuelBlock}", ): fuelComponent.getPinMgFluxes() ================================================ FILE: armi/reactor/tests/test_composites.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the composite pattern.""" import itertools import logging import unittest from copy import deepcopy from armi import nuclearDataIO, runLog, settings, utils from armi.nucDirectory import nucDir from armi.nucDirectory.nuclideBases import NuclideBase, NuclideBases from armi.physics.neutronics.fissionProductModel.tests.test_lumpedFissionProduct import ( getDummyLFPFile, ) from armi.reactor import assemblies, components, composites, grids, parameters from armi.reactor.blueprints import assemblyBlueprint from armi.reactor.components import basicShapes from armi.reactor.flags import Flags, TypeSpec from armi.reactor.tests.test_blocks import loadTestBlock from armi.testing import loadTestReactor from armi.tests import ISOAA_PATH, TEST_ROOT, mockRunLogs class MockBP: nuclideBases = NuclideBases() allNuclidesInProblem = set(nuclideBases.byName.keys()) """:meta hide-value:""" activeNuclides = allNuclidesInProblem """:meta hide-value:""" inactiveNuclides = set() elementsToExpand = set() customIsotopics = {} def getDummyParamDefs(): dummyDefs = parameters.ParameterDefinitionCollection() with dummyDefs.createBuilder() as pb: pb.defParam("type", units=utils.units.UNITLESS, description="Fake type") return dummyDefs _testGrid = grids.CartesianGrid.fromRectangle(0.01, 0.01) class DummyComposite(composites.Composite): pDefs = getDummyParamDefs() def __init__(self, name, i=0): composites.Composite.__init__(self, name) self.p.type = name self.spatialLocator = grids.IndexLocation(i, i, i, _testGrid) class DummyLeaf(composites.Composite): pDefs = getDummyParamDefs() def __init__(self, name, i=0): composites.Composite.__init__(self, name) self.p.type = name self.spatialLocator = grids.IndexLocation(i, i, i, _testGrid) # Some special material attribute for testing getChildren(includeMaterials=True) self.material = ("hello", "world") def getChildren(self, deep=False, generationNum=1, includeMaterials=False, predicate=None): """Return empty list, representing that this object has no children.""" return [] def getChildrenWithFlags(self, typeSpec: TypeSpec, exactMatch=True): """Return empty list, representing that this object has no children.""" return [] def getBoundingCircleOuterDiameter(self, Tc=None, cold=False): return 1.0 def iterComponents(self, typeSpec=None, exact=False): if self.hasFlags(typeSpec, exact): yield self class TestCompositePattern(unittest.TestCase): def setUp(self): self.cs = settings.Settings() runLog.setVerbosity("error") self.container = DummyComposite("inner test fuel", 99) # Make sure the Composite is within the Reactor _o, r = loadTestReactor(TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml") r.core.getFirstBlock().add(self.container) lib = nuclearDataIO.isotxs.readBinary(ISOAA_PATH) r.core.lib = lib for i in range(5): leaf = DummyLeaf(f"duct {i}", i + 100) leaf.setType("duct") self.container.add(leaf) nested = DummyComposite("clad", 98) nested.setType("clad") self.cladChild = nested self.secondGen = DummyComposite("liner", 97) self.thirdGen = DummyLeaf("pin 77", 33) self.secondGen.add(self.thirdGen) nested.add(self.secondGen) self.container.add(nested) # Composite tree structure in list of lists for testing. tree[i] contains the children at generation / depth i self.tree: list[list[composites.Composite]] = [ [self.container], list(self.container), [self.secondGen], [self.thirdGen], ] def test_composite(self): """Test basic Composite things. .. test:: Composites are part of a hierarchical model. :id: T_ARMI_CMP0 :tests: R_ARMI_CMP """ container = self.container children = container.getChildren() for child in children: self.assertEqual(child.parent, container) allChildren = container.getChildren(deep=True) self.assertEqual(len(allChildren), 8) def test_printContents(self): with mockRunLogs.BufferLog() as mock: self.assertEqual("", mock.getStdout()) testName = "test_printContents" runLog.LOG.startLog(testName) runLog.LOG.setVerbosity(logging.IMPORTANT) self.container.printContents(includeNuclides=True) logMsg = mock.getStdout() self.assertIn("DummyComposite", logMsg) self.assertIn("DummyLeaf", logMsg) def test_iterComponents(self): self.assertIn(self.thirdGen, list(self.container.iterComponents())) def test_getChildren(self): """Test the get children method. .. test:: Composites are part of a hierarchical model. :id: T_ARMI_CMP1 :tests: R_ARMI_CMP """ firstGen = self.container.getChildren() self.assertEqual(firstGen, self.tree[1]) secondGen = self.container.getChildren(generationNum=2) self.assertEqual(secondGen, self.tree[2]) self.assertIs(secondGen[0], self.secondGen) third = self.container.getChildren(generationNum=3) self.assertEqual(third, self.tree[3]) self.assertIs(third[0], self.thirdGen) allC = self.container.getChildren(deep=True) expected = self.tree[1] + self.tree[2] + self.tree[3] self.assertTrue( all(a is e for a, e in itertools.zip_longest(allC, expected)), msg=f"Deep traversal differs: {allC=} != {expected=}", ) onlyLiner = self.container.getChildren(deep=True, predicate=lambda o: o.p.type == "liner") self.assertEqual(len(onlyLiner), 1) self.assertIs(onlyLiner[0], self.secondGen) def test_getChildrenWithMaterials(self): """Test the ability for getChildren to place the material after the object.""" withMaterials = self.container.getChildren(deep=True, includeMaterials=True) # Grab the iterable so we can control the progression items = iter(withMaterials) for item in items: expectedMat = getattr(item, "material", None) if expectedMat is None: continue # Material should be the next item in the list actualMat = next(items) self.assertIs(actualMat, expectedMat) break else: raise RuntimeError("No materials found with includeMaterials=True") def test_iterChildren(self): """Detailed testing on Composite.iterChildren.""" def compareIterables(actual, expected: list[composites.Composite]): for e in expected: a = next(actual) self.assertIs(a, e) # Ensure we've consumed the actual iterator and there's nothing left with self.assertRaises(StopIteration): next(actual) compareIterables(self.container.iterChildren(), self.tree[1]) compareIterables(self.container.iterChildren(generationNum=2), self.tree[2]) compareIterables(self.container.iterChildren(generationNum=3), self.tree[3]) compareIterables( self.container.iterChildren(deep=True), self.tree[1] + self.tree[2] + self.tree[3], ) def test_iterAndGetChildren(self): """Compare that iter children and get children are consistent.""" self._compareIterGetChildren() self._compareIterGetChildren(deep=True) self._compareIterGetChildren(generationNum=2) # Some wacky predicate just to check we can use that too self._compareIterGetChildren(deep=True, predicate=lambda c: len(c.name) % 3) def _compareIterGetChildren(self, **kwargs): fromIter = self.container.iterChildren(**kwargs) fromGetter = self.container.getChildren(**kwargs) msg = repr(kwargs) # Use zip longest just in case one iterator comes up short for count, (it, gt) in enumerate(itertools.zip_longest(fromIter, fromGetter)): self.assertIs(it, gt, msg=f"{count=} :: {msg}") def test_simpleIterChildren(self): """Test that C.iterChildren() is identical to iter(C).""" for count, (fromNative, fromIterChildren) in enumerate( itertools.zip_longest(self.container, self.container.iterChildren()) ): self.assertIs(fromIterChildren, fromNative, msg=count) def test_iterChildrenWithMaterials(self): """Test that C.iterChildrenWithMaterials gets materials following their parent component.""" items = iter(self.container.iterChildrenWithMaterials(deep=True)) for item in items: if isinstance(item, components.Component): mat = next(items) self.assertIs(mat, item.material) def test_getName(self): """Test the getName method.""" self.assertEqual(self.secondGen.getName(), "liner") self.assertEqual(self.thirdGen.getName(), "pin 77") self.assertEqual(self.secondGen.getName(), "liner") self.assertEqual(self.container.getName(), "inner test fuel") def test_sort(self): # in this case, the children should start sorted c0 = [c.name for c in self.container] self.container.sort() c1 = [c.name for c in self.container] self.assertNotEqual(c0, c1) # verify repeated sorting behave for _ in range(3): self.container.sort() ci = [c.name for c in self.container] self.assertEqual(c1, ci) # break the order children = self.container.getChildren() self.container._children = children[2:] + children[:2] c2 = [c.name for c in self.container] self.assertNotEqual(c1, c2) # verify the sort order self.container.sort() c3 = [c.name for c in self.container] self.assertEqual(c1, c3) def test_areChildernOfType(self): expectedResults = [False, False, False, False, False, True] for i, b in enumerate(self.container.doChildrenHaveFlags(Flags.CLAD)): self.assertEqual(b, expectedResults[i]) def test_containsAtLeastOneChildOfType(self): c = self.container self.assertTrue(c.containsAtLeastOneChildWithFlags(Flags.DUCT)) self.assertTrue(c.containsAtLeastOneChildWithFlags(Flags.CLAD)) def test_containsOnlyChildrenOfType(self): c = self.container for b in c: b.setType("bond") self.assertTrue(c.containsOnlyChildrenWithFlags(Flags.BOND)) def test_nameContains(self): c = self.container c.setName("test one two three") self.assertTrue(c.nameContains("one")) self.assertTrue(c.nameContains("One")) self.assertTrue(c.nameContains("THREE")) self.assertFalse(c.nameContains("nope")) self.assertFalse(c.nameContains(["nope"])) self.assertTrue(c.nameContains(["one", "TWO", "three"])) self.assertTrue(c.nameContains(["nope", "dope", "three"])) def test_nucSpec(self): self.assertEqual(self.container._getNuclidesFromSpecifier("U235"), ["U235"]) uNucs = self.container._getNuclidesFromSpecifier("U") self.assertIn("U235", uNucs) self.assertIn("U241", uNucs) self.assertIn("U227", uNucs) self.assertEqual(self.container._getNuclidesFromSpecifier(["U238", "U235"]), ["U235", "U238"]) uzr = self.container._getNuclidesFromSpecifier(["U238", "U235", "ZR"]) self.assertIn("U235", uzr) self.assertIn("ZR92", uzr) self.assertNotIn("ZR", uzr) puIsos = self.container._getNuclidesFromSpecifier(["PU"]) # PU is special because it has no natural isotopics self.assertIn("PU239", puIsos) self.assertNotIn("PU", puIsos) self.assertEqual(self.container._getNuclidesFromSpecifier(["FE", "FE56"]).count("FE56"), 1) def test_hasFlags(self): """Ensure flags are queryable. .. test:: Flags can be queried. :id: T_ARMI_CMP_FLAG :tests: R_ARMI_CMP_FLAG """ self.container.setType("fuel") self.assertFalse(self.container.hasFlags(Flags.SHIELD | Flags.FUEL, exact=True)) self.assertTrue(self.container.hasFlags(Flags.FUEL)) self.assertTrue(self.container.hasFlags(None)) def test_hasFlagsSubstring(self): """Make sure typespecs with the same word in them no longer match.""" self.container.setType("intercoolant") self.assertFalse(self.container.hasFlags(Flags.COOLANT)) self.assertFalse(self.container.hasFlags(Flags.COOLANT, exact=True)) self.assertTrue(self.container.hasFlags(Flags.INTERCOOLANT, exact=True)) self.container.setType("innerduct") self.assertFalse(self.container.hasFlags(Flags.DUCT, exact=True)) def test_hasFlagsNoTypeSpecified(self): self.container.setType("fuel") types = [None, [], [None]] for t in types: self.assertTrue(self.container.hasFlags(t)) self.assertFalse(self.container.hasFlags(t, exact=True)) def test_calcTotalParam(self): minSerialNumberCount = 21.0 kids = self.container.getChildren() tot = self.container.calcTotalParam("serialNum", kids) self.assertGreaterEqual(tot, minSerialNumberCount) tot = self.container.calcTotalParam("serialNum", kids, calcBasedOnFullObj=True) self.assertGreaterEqual(tot, minSerialNumberCount) tot = self.container.calcTotalParam("serialNum", kids, typeSpec=Flags.FUEL) self.assertEqual(tot, 0.0) with self.assertRaises(ValueError): self.container.calcTotalParam( "power", self.container.getChildren(), addSymmetricPositions=True, calcBasedOnFullObj=True ) def test_getBoundingCirlceOuterDiameter(self): od = self.container.getBoundingCircleOuterDiameter() self.assertAlmostEqual(od, len(list(self.container.iterComponents()))) def test_getParamNames(self): params = self.container.getParamNames() self.assertEqual(len(params), 3) self.assertIn("flags", params) self.assertIn("serialNum", params) self.assertIn("type", params) def test_updateVolume(self): self.assertAlmostEqual(self.container.getVolume(), 0) self.container._updateVolume() self.assertAlmostEqual(self.container.getVolume(), 0) def test_expandLFPs(self): # simple test, with no lumped fission product mappings numDens = {"NA23": 1.0} numDens = self.container._expandLFPs(numDens) self.assertEqual(len(numDens), 1) # set the lumped fission product mapping fpd = getDummyLFPFile() lfps = fpd.createLFPsFromFile() self.container.setLumpedFissionProducts(lfps) # get back the lumped fission product mapping, just to check lfp = self.container.getLumpedFissionProductCollection() self.assertEqual(len(lfp), 3) self.assertIn("LFP35", lfp) self.assertIn("LFP38", lfp) self.assertIn("LFP39", lfp) # quick test WITH some lumped fission products in the mix numDens = {"NA23": 1.0, "LFP35": 2.0} numDens = self.container._expandLFPs(numDens) self.assertEqual(len(numDens), 9) self.assertEqual(numDens["MO99"], 0) def test_setChildrenLumpedFissionProducts(self): # build a lumped fission product collection fpd = getDummyLFPFile() lfps = fpd.createLFPsFromFile() # validate that the LFP collection is None self.container.setChildrenLumpedFissionProducts(None) for c in self.container: self.assertIsNone(c._lumpedFissionProducts) # validate that the LFP collection is not None self.container.setChildrenLumpedFissionProducts(lfps) for c in self.container: self.assertIsNotNone(c._lumpedFissionProducts) def test_requiresLumpedFissionProds(self): # build a lumped fission product collection fpd = getDummyLFPFile() lfps = fpd.createLFPsFromFile() self.container.setChildrenLumpedFissionProducts(lfps) # test the null case result = self.container.requiresLumpedFissionProducts(None) self.assertFalse(result) # test the usual case result = self.container.requiresLumpedFissionProducts(set()) self.assertFalse(result) # test a positive case result = self.container.requiresLumpedFissionProducts(["LFP35"]) self.assertTrue(result) def test_getLumpedFissionProdsIfNullCase(self): # build a lumped fission product collection fpd = getDummyLFPFile() lfps = fpd.createLFPsFromFile() self.container.setChildrenLumpedFissionProducts(lfps) # test the null case result = self.container.getLumpedFissionProductsIfNecessary(None) self.assertEqual(len(result), 0) # test a positive case result = self.container.getLumpedFissionProductsIfNecessary(["LFP35"]) self.assertGreater(len(result), 0) def test_getIntegratedMgFlux(self): mgFlux = self.container.getIntegratedMgFlux() self.assertEqual(mgFlux, [0.0]) def test_getReactionRates(self): # test the null case rRates = self.container.getReactionRates("U235") self.assertEqual(len(rRates), 6) self.assertEqual(sum([r for r in rRates.values()]), 0) # init reactor _o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") lib = nuclearDataIO.isotxs.readBinary(ISOAA_PATH) r.core.lib = lib # test on a Component b = r.core.getFirstAssembly().getFirstBlock() b.p.mgFlux = 1 c = b.getComponents()[0] rRatesComp = c.getReactionRates("U235") self.assertEqual(len(rRatesComp), 6) self.assertGreater(sum([r for r in rRatesComp.values()]), 0) # test on a Block rRatesBlock = b.getReactionRates("U235") self.assertEqual(len(rRatesBlock), 6) self.assertGreater(sum([r for r in rRatesBlock.values()]), 0) # test on an Assembly assem = r.core.getFirstAssembly() rRatesAssem = assem.getReactionRates("U235") self.assertEqual(len(rRatesAssem), 6) self.assertGreater(sum([r for r in rRatesAssem.values()]), 0) # test on a Core rRatesCore = r.core.getReactionRates("U235") self.assertEqual(len(rRatesCore), 6) self.assertGreater(sum([r for r in rRatesCore.values()]), 0) # test on a Reactor rRatesReactor = r.getReactionRates("U235") self.assertEqual(len(rRatesReactor), 6) self.assertGreater(sum([r for r in rRatesReactor.values()]), 0) # test that all different levels of the hierarchy have the same reaction rates for key, val in rRatesBlock.items(): self.assertAlmostEqual(rRatesAssem[key], val) self.assertAlmostEqual(rRatesCore[key], val) self.assertAlmostEqual(rRatesReactor[key], val) def test_getFirstComponent(self): c = self.container.getComponents()[0] c0 = self.container.getFirstComponent() self.assertIs(c, c0) self.assertIsInstance(c0, composites.Composite) c = self.cladChild.getComponents()[0] c0 = self.cladChild.getFirstComponent() self.assertIs(c, c0) self.assertIsInstance(c0, composites.Composite) c = self.secondGen.getComponents()[0] c0 = self.secondGen.getFirstComponent() self.assertIs(c, c0) self.assertIsInstance(c0, composites.Composite) b = loadTestBlock() c = b.getComponents()[0] c0 = b.getFirstComponent() self.assertIs(c, c0) self.assertIsInstance(c0, composites.Composite) # covering edge case: someone passes in a flag that doesn't exist on on the object with self.assertRaises(ValueError): b.getFirstComponent(typeSpec=Flags.POISON) def test_getReactionRateDict(self): lib = nuclearDataIO.isotxs.readBinary(ISOAA_PATH) rxRatesDict = self.container._getReactionRateDict(nucName="PU239", lib=lib, xsSuffix="AA", mgFlux=1, nDens=1) self.assertEqual(rxRatesDict["nG"], sum(lib["PU39AA"].micros.nGamma)) def test_syncParameters(self): data = [{"serialNum": 123}, {"flags": "FAKE"}] numSynced = self.container._syncParameters(data, {}) self.assertEqual(numSynced, 2) def test_iterChildrenWithFlags(self): expectedChildren = {c for c in self.container if c.hasFlags(Flags.DUCT)} found = set() for c in self.container.iterChildrenWithFlags(Flags.DUCT): self.assertIn(c, expectedChildren) found.add(c) self.assertSetEqual(found, expectedChildren) def test_iterChildrenOfType(self): clads = self.container.iterChildrenOfType("clad") first = next(clads) self.assertIs(first, self.cladChild) with self.assertRaises(StopIteration): next(clads) def test_removeAll(self): """Test the ability to remove all children of a composite.""" self.container.removeAll() self.assertEqual(len(self.container), 0) # Nothing to iterate over items = iter(self.container) with self.assertRaises(StopIteration): next(items) for child in self.tree[1]: self.assertIsNone(child.parent) def test_setChildren(self): """Test the ability to override children on a composite.""" newChildren = self.tree[2] + self.tree[3] oldChildren = list(self.container) self.container.setChildren(newChildren) self.assertEqual(len(self.container), len(newChildren)) for old in oldChildren: self.assertIsNone(old.parent) for actualNew, expectedNew in zip(newChildren, self.container): self.assertIs(actualNew, expectedNew) def test_add(self): # get the size of the container at the start lenContainer = len(self.container) # add a dummy leaf to the container leaf = DummyLeaf("duct 9", 99) leaf.setType("duct") self.container.add(leaf) # verify the container's size has increased by one self.assertEqual(len(self.container), lenContainer + 1) def test_extend(self): # generate a list of elements to add to this container elements = [] lenElements = 5 for i in range(lenElements): leaf = DummyLeaf(f"duct {i}", i + 100) leaf.setType("duct") elements.append(leaf) # extend the container by the above list lenContainer = len(self.container) self.container.extend(elements) self.assertEqual(len(self.container), lenContainer + lenElements) # show all the composites in the block have the block as the parent for c in self.container: self.assertIs(c.parent, self.container) class TestCompositeTree(unittest.TestCase): blueprintYaml = """ name: test assembly height: [1, 1] # 2 blocks axial mesh points: [1, 1] xs types: [A, A] specifier: AA blocks: - &block_metal_fuel name: metal fuel fuel: &component_metal_fuel_fuel shape: Circle material: UZr Tinput: 500 Thot: 500.0 id: 0.0 od: 1.0 mult: 7 clad: &component_metal_fuel_clad shape: Circle material: HT9 Tinput: 450.0 Thot: 450.0 id: 1.09 od: 1.1 mult: 7 bond: &component_metal_fuel_bond shape: Circle material: Sodium Tinput: 450.0 Thot: 450.0 id: fuel.od od: clad.id mult: 7 coolant: &component_metal_fuel_coolant shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 duct: &component_metal_fuel_duct shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 16.0 mult: 1.0 op: 16.6 - &block_oxide_fuel name: mox fuel fuel: <<: *component_metal_fuel_fuel material: MOX clad: *component_metal_fuel_clad bond: *component_metal_fuel_bond coolant: *component_metal_fuel_coolant duct: *component_metal_fuel_duct """ def setUp(self): self.block = loadTestBlock() self.r = self.block.core.r self.block.setHeight(100.0) self.refDict = { "U235": 0.00275173784234, "U238": 0.0217358415457, "W182": 1.09115150103e-05, "W183": 5.89214392093e-06, "W184": 1.26159558164e-05, "W186": 1.17057432664e-05, "V": 2e-2, "NA23": 2e-2, "ZR": 0.00709003962772, } self.block.setNumberDensities(self.refDict) def test_ordering(self): a = assemblies.Assembly("dummy") a.spatialGrid = grids.AxialGrid.fromNCells(2, armiObject=a) otherBlock = deepcopy(self.block) a.add(self.block) a.add(otherBlock) self.assertTrue(self.block < otherBlock) locator = self.block.spatialLocator self.block.spatialLocator = otherBlock.spatialLocator otherBlock.spatialLocator = locator self.assertTrue(otherBlock < self.block) # test some edge cases otherBlock.spatialLocator._grid = None with self.assertRaises(ValueError): otherBlock < self.block otherBlock.spatialLocator = None with self.assertRaises(ValueError): otherBlock < self.block def test_getAncestorWithFlags(self): # this test block is not part of an assembly, so it should not have a parent/ancestor parent = self.block.getAncestorWithFlags(Flags.FUEL) self.assertIsNone(parent) # pick a component that is not part of a fuel composite, so it should not have a fuel ancestor grandchild = self.block.getFirstComponent() child = grandchild.getAncestorWithFlags(Flags.FUEL) self.assertIsNone(child) # test the usual case: get a ancestor with the fuel flag child = self.block.getChildrenWithFlags(Flags.FUEL)[0] grandchild = child.getFirstComponent() child1 = grandchild.getAncestorWithFlags(Flags.FUEL) self.assertEqual(child1, grandchild) # default case: the only ancestor with the fuel flag is the composite itself, so return that child2 = child.getAncestorWithFlags(Flags.FUEL) self.assertEqual(child2, child) def test_changeNDensByFactor(self): b = deepcopy(self.block.getChildrenWithFlags(Flags.FUEL)[0]) # test inital state dens = b.getNumberDensities() zrDens = dens["ZR"] u235Dens = dens["U235"] u238Dens = dens["U238"] b.changeNDensByFactor(0.5) # test new state dens = b.getNumberDensities() self.assertAlmostEqual(dens["ZR"], zrDens / 2, delta=1e-6) self.assertAlmostEqual(dens["U235"], u235Dens / 2, delta=1e-6) self.assertAlmostEqual(dens["U238"], u238Dens / 2, delta=1e-6) def test_summing(self): a = assemblies.Assembly("dummy") a.spatialGrid = grids.AxialGrid.fromNCells(2, armiObject=a) otherBlock = deepcopy(self.block) a.add(self.block) a.add(otherBlock) b = self.block + otherBlock self.assertEqual(len(b), 26) self.assertFalse(b[0].is3D) self.assertIn("Circle", str(b[0])) self.assertFalse(b[-1].is3D) self.assertIn("Hexagon", str(b[-1])) def test_constituentReport(self): runLog.info(self.r.core.constituentReport()) runLog.info(self.r.core.getFirstAssembly().constituentReport()) runLog.info(self.r.core.getFirstBlock().constituentReport()) runLog.info(self.r.core.getFirstBlock().getComponents()[0].constituentReport()) def test_getNuclides(self): """ The getNuclides should return all keys that have ever been in this block, including values that are at trace. """ cur = self.block.getNuclides() ref = self.refDict.keys() for key in ref: self.assertIn(key, cur) self.assertIn("FE", cur) # this is in at trace value. def test_getFuelMass(self): """ This test creates a dummy assembly and ensures that the assembly, block, and fuel component masses are consistent. `getFuelMass` ensures that the fuel component is used to `getMass`. """ cs = settings.Settings() assemDesign = assemblyBlueprint.AssemblyBlueprint.load(self.blueprintYaml) a = assemDesign.construct(cs, MockBP) fuelMass = 0.0 for b in a: fuel = b.getComponent(Flags.FUEL) fuelMass += fuel.getMass() self.assertEqual(b.getFuelMass(), fuel.getMass()) self.assertEqual(fuelMass, a.getFuelMass()) def test_getChildrenIncludeMaterials(self): """Test that the ``StateRetainer`` retains material properties when they are modified.""" cs = settings.Settings() assemDesign = assemblyBlueprint.AssemblyBlueprint.load(self.blueprintYaml) a = assemDesign.construct(cs, MockBP) component = a[0][0] referenceDensity = component.material.pseudoDensity(Tc=200) self.assertEqual(component.material.pseudoDensity(Tc=200), referenceDensity) def test_getHMMass(self): fuelDims = {"Tinput": 273.0, "Thot": 273.0, "od": 0.76, "id": 0.0, "mult": 1.0} self.fuelComponent = components.Circle("fuel", "UZr", **fuelDims) self.block.add(self.fuelComponent) self.block.clearNumberDensities() self.refDict = { "U235": 0.00275173784234, "U238": 0.0217358415457, "W182": 1.09115150103e-05, "W183": 5.89214392093e-06, "W184": 1.26159558164e-05, "W186": 1.17057432664e-05, "V": 3e-2, "NA23": 2e-2, "ZR": 0.00709003962772, } self.block.setNumberDensities(self.refDict) cur = self.block.getHMMass() mass = 0.0 for nucName in self.refDict.keys(): if nucDir.isHeavyMetal(nucName): mass += self.block.getMass(nucName) places = 6 self.assertAlmostEqual(cur, mass, places=places) def test_getFPMass(self): fuelDims = {"Tinput": 273.0, "Thot": 273.0, "od": 0.76, "id": 0.0, "mult": 1.0} self.fuelComponent = components.Circle("fuel", "UZr", **fuelDims) self.fuelComponent.material.setMassFrac("LFP38", 0.25) self.block.add(self.fuelComponent) refDict = {"LFP35": 0.1, "LFP38": 0.05, "LFP39": 0.7} self.fuelComponent.setNumberDensities(refDict) cur = self.block.getFPMass() mass = 0.0 for nucName in refDict.keys(): mass += self.block.getMass(nucName) ref = mass places = 6 self.assertAlmostEqual(cur, ref, places=places) def test_setMassFrac(self): # build test component c = DummyComposite("test_setMassFrac") c.getHeight = lambda: 1.0 fuelDims = {"Tinput": 273.0, "Thot": 273.0, "od": 0.76, "id": 0.0, "mult": 1.0} fuelComponent = components.Circle("fuel", "UZr", **fuelDims) c.add(fuelComponent) # test initial state self.assertEqual(c.getFPMass(), 0.0) self.assertAlmostEqual(c.getHMMass(), 6.468105962375698, delta=1e-6) self.assertAlmostEqual(c.getMass(), 7.186784402639664, delta=1e-6) # use setMassFrac c.setMassFrac("U235", 0.99) c.setMassFrac("U238", 0.01) # test new state self.assertEqual(c.getFPMass(), 0.0) self.assertAlmostEqual(c.getHMMass(), 7.178895593948443, delta=1e-6) self.assertAlmostEqual(c.getMass(), 7.186784402639666, delta=1e-6) # test edge case were zero density c.setNumberDensities({}) with self.assertRaises(ValueError): c.setMassFrac("U235", 0.98) def test_getFissileMass(self): cur = self.block.getFissileMass() mass = 0.0 for nucName in self.refDict.keys(): if nucName in NuclideBase.fissile: mass += self.block.getMass(nucName) ref = mass places = 6 self.assertAlmostEqual(cur, ref, places=places) def test_getMaxParam(self): """Test getMaxParam(). .. test:: Composites have parameter collections. :id: T_ARMI_CMP_PARAMS0 :tests: R_ARMI_CMP_PARAMS """ for ci, c in enumerate(self.block): if isinstance(c, basicShapes.Circle): c.p.id = ci lastSeen = c lastIndex = ci cMax, comp = self.block.getMaxParam("id", returnObj=True) self.assertEqual(cMax, lastIndex) self.assertIs(comp, lastSeen) def test_getMinParam(self): """Test getMinParam(). .. test:: Composites have parameter collections. :id: T_ARMI_CMP_PARAMS1 :tests: R_ARMI_CMP_PARAMS """ for ci, c in reversed(list(enumerate(self.block))): if isinstance(c, basicShapes.Circle): c.p.id = ci lastSeen = c lastIndex = ci cMax, comp = self.block.getMinParam("id", returnObj=True) self.assertEqual(cMax, lastIndex) self.assertIs(comp, lastSeen) class TestFlagSerializer(unittest.TestCase): class TestFlagsA(utils.Flag): A = utils.flags.auto() B = utils.flags.auto() C = utils.flags.auto() D = utils.flags.auto() class TestFlagsB(utils.Flag): A = utils.flags.auto() B = utils.flags.auto() BPRIME = utils.flags.auto() C = utils.flags.auto() D = utils.flags.auto() def test_flagSerialization(self): data = [ Flags.FUEL, Flags.FUEL | Flags.INNER, Flags.A | Flags.B | Flags.CONTROL, ] flagsArray, attrs = composites.FlagSerializer.pack(data) data2 = composites.FlagSerializer.unpack(flagsArray, composites.FlagSerializer.version, attrs) self.assertEqual(data, data2) # discrepant versions with self.assertRaises(ValueError): data2 = composites.FlagSerializer.unpack(flagsArray, "0", attrs) # missing flags in current version Flags attrs["flag_order"].append("NONEXISTANTFLAG") with mockRunLogs.BufferLog() as mock: self.assertEqual("", mock.getStdout()) testName = "test_flagSerialization" runLog.LOG.startLog(testName) runLog.LOG.setVerbosity(logging.WARNING) data2 = composites.FlagSerializer.unpack(flagsArray, composites.FlagSerializer.version, attrs) flagLog = mock.getStdout() self.assertIn("The set of flags", flagLog) self.assertIn("NONEXISTANTFLAG", flagLog) def test_flagConversion(self): data = [ self.TestFlagsA.A, self.TestFlagsA.A | self.TestFlagsA.C, self.TestFlagsA.A | self.TestFlagsA.C | self.TestFlagsA.D, ] serialized, attrs = composites.FlagSerializer._packImpl(data, self.TestFlagsA) data2 = composites.FlagSerializer._unpackImpl( serialized, composites.FlagSerializer.version, attrs, self.TestFlagsB ) expected = [ self.TestFlagsB.A, self.TestFlagsB.A | self.TestFlagsB.C, self.TestFlagsB.A | self.TestFlagsB.C | self.TestFlagsB.D, ] self.assertEqual(data2, expected) class TestMiscMethods(unittest.TestCase): """ Test a variety of methods on the composite. these may get moved to composted classes in the future. """ def setUp(self): self.obj = loadTestBlock() def test_setMass(self): """Test setting and retrieving mass. .. test:: Mass of a composite is retrievable. :id: T_ARMI_CMP_GET_MASS :tests: R_ARMI_CMP_GET_MASS """ masses = {"U235": 5.0, "U238": 3.0} self.obj.setMasses(masses) self.assertAlmostEqual(self.obj.getMass("U235"), 5.0) self.assertAlmostEqual(self.obj.getMass("U238"), 3.0) self.assertAlmostEqual(self.obj.getMass(), 8.0) self.obj.addMasses(masses) self.assertAlmostEqual(self.obj.getMass("U238"), 6.0) # make sure it works with groups of groups group = composites.Composite("group") group.add(self.obj) group.add(loadTestBlock()) group.setMass("U235", 5) self.assertAlmostEqual(group.getMass("U235"), 5) # ad a second block, and confirm it works group.add(loadTestBlock()) self.assertGreater(group.getMass("U235"), 5) self.assertAlmostEqual(group.getMass("U235"), 1364.28376185) def test_getNumberDensities(self): """Get number densities from composite. .. test:: Number density of composite is retrievable. :id: T_ARMI_CMP_GET_NDENS0 :tests: R_ARMI_CMP_GET_NDENS """ # verify the number densities from the composite ndens = self.obj.getNumberDensities() self.assertAlmostEqual(0.0001096, ndens["SI"], 7) self.assertAlmostEqual(0.0000368, ndens["W"], 7) ndens = self.obj.getNumberDensity("SI") self.assertAlmostEqual(0.0001096, ndens, 7) # sum nuc densities from children components totalVolume = self.obj.getVolume() childDensities = {} for o in self.obj: m = o.getVolume() d = o.getNumberDensities() for nuc, val in d.items(): if nuc not in childDensities: childDensities[nuc] = val * (m / totalVolume) else: childDensities[nuc] += val * (m / totalVolume) # verify the children match this composite for nuc in ["FE", "SI"]: self.assertAlmostEqual(self.obj.getNumberDensity(nuc), childDensities[nuc], 4, msg=nuc) def test_getNumDensWithExpandedFissProds(self): """Get number densities from composite. .. test:: Get number densities. :id: T_ARMI_CMP_NUC :tests: R_ARMI_CMP_NUC """ # verify the number densities from the composite ndens = self.obj.getNumberDensities(expandFissionProducts=True) self.assertAlmostEqual(0.0001096, ndens["SI"], 7) self.assertAlmostEqual(0.0000368, ndens["W"], 7) ndens = self.obj.getNumberDensity("SI") self.assertAlmostEqual(0.0001096, ndens, 7) # set the lumped fission product mapping fpd = getDummyLFPFile() lfps = fpd.createLFPsFromFile() self.obj.setLumpedFissionProducts(lfps) # sum nuc densities from children components totalVolume = self.obj.getVolume() childDensities = {} for o in self.obj: # get the number densities with and without fission products d0 = o.getNumberDensities(expandFissionProducts=False) d = o.getNumberDensities(expandFissionProducts=True) # prove that the expanded fission products have more isotopes if len(d0) > 0: self.assertGreater(len(d), len(d0)) # sum the child nuclide densites (weighted by mass fraction) m = o.getVolume() for nuc, val in d.items(): if nuc not in childDensities: childDensities[nuc] = val * (m / totalVolume) else: childDensities[nuc] += val * (m / totalVolume) # verify the children match this composite for nuc in ["FE", "SI"]: self.assertAlmostEqual(self.obj.getNumberDensity(nuc), childDensities[nuc], 4, msg=nuc) def test_dimensionReport(self): report = self.obj.setComponentDimensionsReport() self.assertEqual(len(report), len(self.obj)) def test_getAtomicWeight(self): weight = self.obj.getAtomicWeight() self.assertTrue(50 < weight < 100) def test_containsHeavyMetal(self): self.assertTrue(self.obj.containsHeavyMetal()) def test_copyParamsToChildren(self): self.obj.p.percentBu = 5 self.obj.copyParamsToChildren(["percentBu"]) for child in self.obj: self.assertEqual(child.p.percentBu, self.obj.p.percentBu) def test_copyParamsFrom(self): obj2 = loadTestBlock() obj2.p.percentBu = 15.2 self.obj.copyParamsFrom(obj2) self.assertEqual(obj2.p.percentBu, self.obj.p.percentBu) ================================================ FILE: armi/reactor/tests/test_cores.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import random import typing import unittest from unittest import mock from unittest.mock import patch from armi.nuclearDataIO.xsLibraries import IsotxsLibrary from armi.reactor.assemblies import HexAssembly from armi.reactor.blocks import Block from armi.reactor.flags import Flags from armi.reactor.tests.test_reactors import TEST_ROOT, loadTestReactor from armi.testing import TESTING_ROOT from armi.tests import ISOAA_PATH from armi.utils import directoryChangers class HexCoreTests(unittest.TestCase): """Tests on a hex reactor core.""" @classmethod def setUpClass(cls): cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT) cls.directoryChanger.open() r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml" )[1] cls.core = r.core def assertAllIs(self, actuals: typing.Iterable[typing.Any], expecteds: typing.Iterable[typing.Any], fill=None): """Assert that all items in two iterables are the same objects.""" for actual, expected in itertools.zip_longest(actuals, expecteds, fillvalue=fill): self.assertIs(actual, expected) @classmethod def tearDownClass(cls): cls.directoryChanger.close() def test_getAllAssem(self): """Test the ability to produce all assemblies.""" expectedAll = list(self.core) actualAll = self.core.getAssemblies() self.assertAllIs(actualAll, expectedAll) def test_getAllAssemWithFlag(self): """Test the ability to produce assemblies with a flag.""" for spec in (Flags.FUEL, Flags.CONTROL): expected = self.core.getChildrenWithFlags(spec) actual = self.core.getAssemblies(typeSpec=spec) for a in actual: self.assertIsInstance(a, HexAssembly) self.assertTrue(a.hasFlags(spec)) self.assertAllIs(actual, expected) def test_iterChildrenWithFlags(self): aa = list(self.core.iterChildrenWithFlags(Flags.BOOSTER)) self.assertEqual(len(aa), 0) aa = list(self.core.iterChildrenWithFlags(Flags.FUEL)) self.assertTrue(1 < len(aa) < 10) aa = list(self.core.iterChildrenWithFlags(Flags.CONTROL)) self.assertEqual(len(aa), 0) def test_getAssemsInZones(self): """Test the ability to produce assemblies in a zone.""" # Grab a few assemblies and add their locations to those in the zones selection = random.choices(self.core.getAssemblies(), k=5) locations = [a.getLocation() for a in selection] fakeZones = ["hot", "cold"] with mock.patch.object(self.core.zones, "getZoneLocations", mock.Mock(return_value=locations)): actuals = self.core.getAssemblies(zones=fakeZones) for a in actuals: self.assertIn(a.getLocation(), locations, msg=str(a)) def test_getBlocks(self): """Test the ability to get all blocks in the core.""" blocks = [] for a in self.core: blocks.extend(a) actual = self.core.iterBlocks() self.assertAllIs(actual, blocks) def test_getBlocksWithFlag(self): """Test the ability to get all blocks with a flag in the core.""" blocks = [] for a in self.core: blocks.extend(filter(lambda b: b.hasFlags(Flags.FUEL), a)) actual = self.core.getBlocks(Flags.FUEL) self.assertAllIs(actual, blocks) def test_traverseAllBlocks(self): """Test the ability to iterate over all blocks in the core.""" blocks = [] for a in self.core: blocks.extend(a) actual = self.core.iterBlocks() self.assertAllIs(actual, blocks) def test_traverseAllBlocksWithFlag(self): """Test the ability to traverse blocks in the core with a flag.""" blocks: list[Block] = [] for a in self.core: blocks.extend(a) for spec in (Flags.FUEL, Flags.CONTROL, Flags.FUEL | Flags.CONTROL): expected = list(filter(lambda b: b.hasFlags(spec), blocks)) actual = self.core.iterBlocks(spec) self.assertAllIs(actual, expected) # Fake the flag check with hasFlags as predicate actual = self.core.iterBlocks(predicate=lambda b: b.hasFlags(spec)) self.assertAllIs(actual, expected) def test_traverseBlocksWithPredicate(self): """Test the ability to traverse blocks that meet some criteria with a flag.""" fuelBlocks: list[Block] = [] for a in self.core: fuelBlocks.extend(filter(lambda b: b.hasFlags(Flags.FUEL), a)) # Make some contrived condition to exclude some blocks meanElevation = sum(b.p.z for b in fuelBlocks) / len(fuelBlocks) checker = lambda b: b.p.z >= meanElevation expected = list(filter(checker, fuelBlocks)) actual = self.core.iterBlocks(Flags.FUEL, predicate=checker) self.assertAllIs(actual, expected) @patch("armi.nuclearDataIO.getExpectedISOTXSFileName") def test_lib(self, mockFileName): # the default case will look something like this mockFileName.return_value = "ISOTXS-c0n0" self.assertIsNone(self.core.lib) self.assertFalse(self.core.hasLib()) # we can inject some mock data, and retrieve it mockFileName.return_value = ISOAA_PATH self.assertTrue(isinstance(self.core.lib, IsotxsLibrary)) self.assertTrue(self.core.hasLib()) def test_getAssembliesInRing(self): assems = self.core.getAssembliesInRing(0) self.assertEqual(len(assems), 0) assems = self.core.getAssembliesInRing(1) self.assertEqual(len(assems), 1) self.assertIsInstance(assems[0], HexAssembly) def test_getAssembliesInSquareOrHexRing(self): assems = self.core.getAssembliesInSquareOrHexRing(0) self.assertEqual(len(assems), 0) assems = self.core.getAssembliesInSquareOrHexRing(1) self.assertEqual(len(assems), 1) self.assertIsInstance(assems[0], HexAssembly) def test_getAssembliesInCircularRing(self): assems = self.core.getAssembliesInCircularRing(0) self.assertEqual(len(assems), 0) assems = self.core.getAssembliesInCircularRing(1) self.assertEqual(len(assems), 5) self.assertIsInstance(assems[0], HexAssembly) def test_getBlockByName(self): with self.assertRaises(KeyError): self.core.getBlockByName("badName") b = self.core.getBlockByName("B0004-000") self.assertIsInstance(b, Block) def test_getFirstBlock(self): b = self.core.getFirstBlock() self.assertIsInstance(b, Block) def test_getFirstAssembly(self): a = self.core.getFirstAssembly() self.assertIsInstance(a, HexAssembly) ================================================ FILE: armi/reactor/tests/test_excoreStructures.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Direct tests of the Excore Structures and Spent Fuel Pools.""" from unittest import TestCase from unittest.mock import MagicMock from armi.reactor import grids from armi.reactor.composites import Composite from armi.reactor.excoreStructure import ExcoreCollection, ExcoreStructure from armi.reactor.reactors import Reactor from armi.reactor.spentFuelPool import SpentFuelPool from armi.reactor.tests.test_assemblies import makeTestAssembly class TestExcoreStructure(TestCase): def test_constructor(self): evst1 = ExcoreStructure("evst1") self.assertEqual(evst1.name, "evst1") self.assertIsNone(evst1.parent) self.assertIsNone(evst1.spatialGrid) evst2 = ExcoreStructure("evst2", parent=evst1) self.assertEqual(evst2.name, "evst2") self.assertEqual(evst2.parent, evst1) self.assertIsNone(evst2.spatialGrid) def test_representation(self): evst7 = ExcoreStructure("evst7") rep = evst7.__repr__() self.assertIn("ExcoreStructure", rep) self.assertIn("evst7", rep) self.assertIn("id:", rep) def test_parentReactor(self): fr = Reactor("Reactor", MagicMock()) evst3 = ExcoreStructure("evst3", parent=fr) self.assertEqual(evst3.r, fr) def test_add(self): # build an ex-core structure ivs = ExcoreStructure("ivs") ivs.spatialGrid = grids.CartesianGrid.fromRectangle(1.0, 1.0) # add one composite object and validate comp1 = Composite("thing1") loc = ivs.spatialGrid[(-5, -5, 0)] self.assertEqual(len(ivs.getChildren()), 0) ivs.add(comp1, loc) self.assertEqual(len(ivs.getChildren()), 1) # add another composite object and validate comp1 = Composite("thing2") loc = ivs.spatialGrid[(1, -4, 0)] ivs.add(comp1, loc) self.assertEqual(len(ivs.getChildren()), 2) class TestSpentFuelPool(TestCase): def setUp(self): self.sfp = SpentFuelPool("sfp") self.sfp.spatialGrid = grids.CartesianGrid.fromRectangle(1.0, 1.0) def test_constructor(self): """Show that the spent fuel pool is a composite. .. test:: The spent fuel pool is a Composite structure. :id: T_ARMI_SFP0 :tests: R_ARMI_SFP """ self.assertEqual(self.sfp.name, "sfp") self.assertIsNone(self.sfp.parent) self.assertIsNone(self.sfp.numColumns) self.assertTrue(isinstance(self.sfp, Composite)) self.assertTrue(isinstance(self.sfp, ExcoreStructure)) self.assertTrue(isinstance(self.sfp.spatialGrid, grids.CartesianGrid)) def test_representation(self): rep = self.sfp.__repr__() self.assertIn("SpentFuelPool", rep) self.assertIn("sfp", rep) self.assertIn("id:", rep) def test_addRemove(self): """Show that we can add and remove Assemblies from the spent fuel pool. .. test:: Show that we can add and remove Assemblies from the spent fuel pool. :id: T_ARMI_SFP1 :tests: R_ARMI_SFP """ self.assertEqual(len(self.sfp.getChildren()), 0) # add one assembly object and validate a0 = makeTestAssembly(1, 987, spatialGrid=self.sfp.spatialGrid) self.sfp.add(a0) self.assertEqual(len(self.sfp.getChildren()), 1) # add another assembly object and validate a1 = makeTestAssembly(1, 988, spatialGrid=self.sfp.spatialGrid) loc = self.sfp.spatialGrid[(1, -4, 0)] self.sfp.add(a1, loc) self.assertEqual(len(self.sfp.getChildren()), 2) # remove the first assembly we added and validate self.sfp.remove(a0) self.assertEqual(len(self.sfp.getChildren()), 1) def test_getAssembly(self): a0 = makeTestAssembly(1, 678, spatialGrid=self.sfp.spatialGrid) self.sfp.add(a0) aReturn = self.sfp.getAssembly("A0678") self.assertEqual(aReturn, a0) def test_updateNumberOfColumns(self): self.assertIsNone(self.sfp.numColumns) self.sfp._updateNumberOfColumns() self.assertEqual(self.sfp.numColumns, 10) def test_getNextLocation(self): self.sfp._updateNumberOfColumns() # test against an empty grid loc = self.sfp._getNextLocation() self.assertEqual(loc._i, 0) self.assertEqual(loc._j, 0) self.assertEqual(loc._k, 0) # test against a non-empty grid a0 = makeTestAssembly(1, 234, spatialGrid=self.sfp.spatialGrid) self.sfp.add(a0) def test_normalizeNames(self): # test against an empty grid self.assertEqual(self.sfp.normalizeNames(), 0) self.assertEqual(self.sfp.normalizeNames(17), 17) # test against a non-empty grid a0 = makeTestAssembly(1, 456, spatialGrid=self.sfp.spatialGrid) self.sfp.add(a0) self.assertEqual(self.sfp.normalizeNames(), 1) self.assertEqual(self.sfp.normalizeNames(17), 18) class TestExcoreCollection(TestCase): def test_addLikeDict(self): sfp = SpentFuelPool("sfp") excore = ExcoreCollection() excore["sfp"] = sfp self.assertTrue(isinstance(excore["sfp"], SpentFuelPool)) self.assertTrue(isinstance(excore.sfp, SpentFuelPool)) def test_addLikeAttribute(self): ivs = ExcoreStructure("ivs") excore = ExcoreCollection() excore.ivs = ivs self.assertTrue(isinstance(excore["ivs"], ExcoreStructure)) self.assertTrue(isinstance(excore.ivs, ExcoreStructure)) ================================================ FILE: armi/reactor/tests/test_flags.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for flags.""" import pickle import unittest from armi.reactor import flags class TestFlags(unittest.TestCase): """Tests for flags system.""" def test_fromString(self): self._help_fromString(flags.Flags.fromStringIgnoreErrors) self.assertEqual(flags.Flags.fromStringIgnoreErrors("invalid"), flags.Flags(0)) def test_fromStringWithNumbers(self): # testing pure numbers self.assertEqual(flags.Flags.fromStringIgnoreErrors("1"), flags.Flags(0)) self.assertEqual(flags.Flags.fromStringIgnoreErrors("7"), flags.Flags(0)) # testing fuel naming logic self.assertEqual(flags.Flags.fromStringIgnoreErrors("Fuel1"), flags.Flags.FUEL) self.assertEqual(flags.Flags.fromStringIgnoreErrors("Fuel123"), flags.Flags.FUEL) self.assertEqual(flags.Flags.fromStringIgnoreErrors("fuel 1"), flags.Flags.FUEL) self.assertEqual(flags.Flags.fromStringIgnoreErrors("fuel 123"), flags.Flags.FUEL) def test_flagsDefinedWithNumbers(self): """Test that if we DEFINE flags with numbers in them, those are treated as exceptions.""" # define flags TYPE1 and TYPE1B (arbitrary example) flags.Flags.extend({"TYPE1": flags.auto(), "TYPE1B": flags.auto()}) # verify that these flags are correctly found self.assertEqual(flags.Flags["TYPE1"], flags.Flags.TYPE1) self.assertEqual(flags.Flags["TYPE1B"], flags.Flags.TYPE1B) self.assertEqual(flags.Flags.fromStringIgnoreErrors("type1"), flags.Flags.TYPE1) self.assertEqual(flags.Flags.fromStringIgnoreErrors("Type1b"), flags.Flags.TYPE1B) # the more complicated situation where our exceptions are mixed with the usual flag logic self.assertEqual(flags.Flags.fromString("type1 fuel"), flags.Flags.TYPE1 | flags.Flags.FUEL) self.assertEqual( flags.Flags.fromString("type1 fuel 123 bond"), flags.Flags.TYPE1 | flags.Flags.FUEL | flags.Flags.BOND, ) self.assertEqual( flags.Flags.fromString("type1 fuel123 bond"), flags.Flags.TYPE1 | flags.Flags.FUEL | flags.Flags.BOND, ) def test_flagsToAndFromString(self): """ Convert flag to and from string for serialization. .. test:: Convert flag to a string. :id: T_ARMI_FLAG_TO_STR :tests: R_ARMI_FLAG_TO_STR """ f = flags.Flags.FUEL self.assertEqual(flags.Flags.toString(f), "FUEL") self.assertEqual(f, flags.Flags.fromString("FUEL")) def test_toStringAlphabetical(self): """Ensure that, for multiple flags, toString() returns them in alphabetical order.""" flagz = flags.Flags.AXIAL | flags.Flags.LOWER self.assertEqual(flags.Flags.toString(flagz), "AXIAL LOWER") flagz = flags.Flags.LOWER | flags.Flags.AXIAL self.assertEqual(flags.Flags.toString(flagz), "AXIAL LOWER") def test_fromStringStrict(self): self._help_fromString(flags.Flags.fromString) with self.assertRaises(flags.InvalidFlagsError): flags.Flags.fromString("invalid") with self.assertRaises(flags.InvalidFlagsError): flags.Flags.fromString("fuel invalid") def _help_fromString(self, method): self.assertEqual(method("bond"), flags.Flags.BOND) self.assertEqual(method("bond1"), flags.Flags.BOND) self.assertEqual(method("bond 2"), flags.Flags.BOND) self.assertEqual(method("fuel test"), flags.Flags.FUEL | flags.Flags.TEST) # test the more strict GRID conversion, which can cause collisions with GRID_PLATE self.assertEqual(flags.Flags.fromStringIgnoreErrors("grid_plate"), flags.Flags.GRID_PLATE) # test that "nozzle" is not consumed in the conversion, leaving behind "inlet_" # and leading to an error. Interesting thing here is that if the IgnoreErrors # variant is used, this works out fine since the "inlet_" is ignored and # "nozzle" -> INLET_NOZZLE. self.assertEqual(flags.Flags.fromString("inlet_nozzle"), flags.Flags.INLET_NOZZLE) def test_lookup(self): """Make sure lookup table is working.""" self.assertEqual(flags.Flags.fromString("GAP1"), flags.Flags.GAP | flags.Flags.A) self.assertEqual(flags.Flags.fromString("handLing sOcket"), flags.Flags.HANDLING_SOCKET) # order in CONVERSIONS can matter for multi word flags. # tests that order is good. for conv, flag in flags._CONVERSIONS.items(): # the conversions are specified as RE patterns, so we need to do a little # work to get them into something that can serve as candidate input (i.e. a # string that the pattern would match). Since we are only using \b and \s+, # this is pretty straightforward. If any more complicated patterns work # their way in there, this will need to become more sophisticated. One might # be tempted to bake the plain-text versions of the conversions in the # collection in the flags module, but this is pretty much only needed for # testing, so that wouldn't be appropriate. exampleInput = conv.pattern.replace(r"\b", "") exampleInput = exampleInput.replace(r"\s+", " ") self.assertEqual(flags.Flags.fromString(exampleInput), flag) def test_convertsStringsWithNonFlags(self): # Useful for verifying block / assembly names convert to Flags. self.assertEqual(flags.Flags.fromStringIgnoreErrors("banana bond banana"), flags.Flags.BOND) self.assertEqual( flags.Flags.fromStringIgnoreErrors("banana socket"), flags.Flags.HANDLING_SOCKET, ) self.assertEqual( flags.Flags.fromStringIgnoreErrors("grid plate banana"), flags.Flags.GRID_PLATE, ) self.assertEqual( flags.Flags.fromStringIgnoreErrors("handling socket socket"), flags.Flags.HANDLING_SOCKET, ) def test_defaultIsFalse(self): self.assertFalse(flags.Flags(0)) def test_isPickleable(self): """Must be pickleable to use syncMpiState.""" stream = pickle.dumps(flags.Flags.BOND | flags.Flags.A) flag = pickle.loads(stream) self.assertEqual(flag, flags.Flags.BOND | flags.Flags.A) ================================================ FILE: armi/reactor/tests/test_geometry.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the geometry (loading input) file.""" import unittest from armi.reactor import geometry class TestGeomType(unittest.TestCase): def test_fromStr(self): # note the bonkers case and extra whitespace to exercise the canonicalization self.assertEqual(geometry.GeomType.fromStr("HeX"), geometry.GeomType.HEX) self.assertEqual(geometry.GeomType.fromStr("cARTESIAN"), geometry.GeomType.CARTESIAN) self.assertEqual(geometry.GeomType.fromStr(" thetaRZ"), geometry.GeomType.RZT) self.assertEqual(geometry.GeomType.fromStr("rZ "), geometry.GeomType.RZ) with self.assertRaises(ValueError): geometry.GeomType.fromStr("what even is this?") def test_label(self): gt = geometry.GeomType.fromStr("hex") self.assertEqual(gt.label, "Hexagonal") gt = geometry.GeomType.fromStr("cartesian") self.assertEqual(gt.label, "Cartesian") gt = geometry.GeomType.fromStr("rz") self.assertEqual(gt.label, "R-Z") gt = geometry.GeomType.fromStr("thetarz") self.assertEqual(gt.label, "R-Z-Theta") def test_str(self): for geom in {geometry.HEX, geometry.CARTESIAN, geometry.RZ, geometry.RZT}: self.assertEqual(str(geometry.GeomType.fromStr(geom)), geom) class TestSymmetryType(unittest.TestCase): def test_fromStr(self): # note the bonkers case and extra whitespace to exercise the canonicalization self.assertEqual( geometry.SymmetryType.fromStr("thiRd periodic ").domain, geometry.DomainType.THIRD_CORE, ) st = geometry.SymmetryType.fromStr("sixteenth reflective") self.assertEqual(st.boundary, geometry.BoundaryType.REFLECTIVE) self.assertEqual(str(st), "sixteenth reflective") with self.assertRaises(ValueError): geometry.SymmetryType.fromStr("what even is this?") def test_fromAny(self): st = geometry.SymmetryType.fromAny("eighth reflective through center assembly") self.assertTrue(st.isThroughCenterAssembly) self.assertEqual(st.domain, geometry.DomainType.EIGHTH_CORE) self.assertEqual(st.boundary, geometry.BoundaryType.REFLECTIVE) st = geometry.SymmetryType(geometry.DomainType.EIGHTH_CORE, geometry.BoundaryType.REFLECTIVE, True) self.assertTrue(st.isThroughCenterAssembly) self.assertEqual(st.domain, geometry.DomainType.EIGHTH_CORE) self.assertEqual(st.boundary, geometry.BoundaryType.REFLECTIVE) newST = geometry.SymmetryType.fromAny(st) self.assertTrue(newST.isThroughCenterAssembly) self.assertEqual(newST.domain, geometry.DomainType.EIGHTH_CORE) self.assertEqual(newST.boundary, geometry.BoundaryType.REFLECTIVE) def test_baseConstructor(self): self.assertEqual( geometry.SymmetryType(geometry.DomainType.SIXTEENTH_CORE, geometry.BoundaryType.REFLECTIVE).domain, geometry.DomainType.SIXTEENTH_CORE, ) self.assertEqual( str(geometry.SymmetryType(geometry.DomainType.FULL_CORE, geometry.BoundaryType.NO_SYMMETRY).boundary), "", ) def test_label(self): st = geometry.SymmetryType(geometry.DomainType.FULL_CORE, geometry.BoundaryType.NO_SYMMETRY) self.assertEqual(st.domain.label, "Full") self.assertEqual(st.boundary.label, "No Symmetry") st = geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC) self.assertEqual(st.domain.label, "Third") self.assertEqual(st.boundary.label, "Periodic") st = geometry.SymmetryType(geometry.DomainType.QUARTER_CORE, geometry.BoundaryType.REFLECTIVE) self.assertEqual(st.domain.label, "Quarter") self.assertEqual(st.boundary.label, "Reflective") st = geometry.SymmetryType(geometry.DomainType.EIGHTH_CORE, geometry.BoundaryType.REFLECTIVE) self.assertEqual(st.domain.label, "Eighth") st = geometry.SymmetryType(geometry.DomainType.SIXTEENTH_CORE, geometry.BoundaryType.REFLECTIVE) self.assertEqual(st.domain.label, "Sixteenth") def test_SymmetryFactor(self): st = geometry.SymmetryType(geometry.DomainType.FULL_CORE, geometry.BoundaryType.NO_SYMMETRY) self.assertEqual(st.symmetryFactor(), 1.0) st = geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC) self.assertEqual(st.symmetryFactor(), 3.0) st = geometry.SymmetryType(geometry.DomainType.QUARTER_CORE, geometry.BoundaryType.REFLECTIVE) self.assertEqual(st.symmetryFactor(), 4.0) st = geometry.SymmetryType(geometry.DomainType.EIGHTH_CORE, geometry.BoundaryType.REFLECTIVE) self.assertEqual(st.symmetryFactor(), 8.0) st = geometry.SymmetryType(geometry.DomainType.SIXTEENTH_CORE, geometry.BoundaryType.REFLECTIVE) self.assertEqual(st.symmetryFactor(), 16.0) def test_domainTypeNulls(self): self.assertEqual(geometry.DomainType.NULL.label, "") self.assertEqual(str(geometry.DomainType.NULL), "") with self.assertRaises(ValueError): geometry.DomainType.NULL.symmetryFactor() def test_checkValidGeomSymmetryCombo(self): geomHex = geometry.GeomType.HEX geomCart = geometry.GeomType.CARTESIAN geomRZT = geometry.GeomType.RZT geomRZ = geometry.GeomType.RZ fullCore = geometry.SymmetryType(geometry.DomainType.FULL_CORE, geometry.BoundaryType.NO_SYMMETRY) thirdPeriodic = geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC) quarterCartesian = geometry.SymmetryType(geometry.DomainType.QUARTER_CORE, geometry.BoundaryType.REFLECTIVE) self.assertTrue(geometry.checkValidGeomSymmetryCombo(geomHex, thirdPeriodic)) self.assertTrue(geometry.checkValidGeomSymmetryCombo(geomHex, fullCore)) self.assertTrue(geometry.checkValidGeomSymmetryCombo(geomCart, quarterCartesian)) self.assertTrue(geometry.checkValidGeomSymmetryCombo(geomRZT, quarterCartesian)) self.assertTrue(geometry.checkValidGeomSymmetryCombo(geomRZ, fullCore)) with self.assertRaises(ValueError): _ = geometry.SymmetryType( geometry.DomainType.THIRD_CORE, geometry.BoundaryType.REFLECTIVE, False, ) with self.assertRaises(ValueError): geometry.checkValidGeomSymmetryCombo(geomHex, quarterCartesian) with self.assertRaises(ValueError): geometry.checkValidGeomSymmetryCombo(geomCart, thirdPeriodic) ================================================ FILE: armi/reactor/tests/test_hexBlockRotate.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the ability to rotate a hexagonal block.""" import copy import math import unittest import numpy as np from armi.reactor.blocks import HexBlock from armi.reactor.components import Component from armi.reactor.grids import ( CoordinateLocation, HexGrid, IndexLocation, MultiIndexLocation, ) from armi.reactor.tests.test_blocks import NUM_PINS_IN_TEST_BLOCK, loadTestBlock from armi.utils import iterables class HexBlockRotateTests(unittest.TestCase): """Tests for various rotation aspects of a hex block.""" BOUNDARY_PARAMS = [ "cornerFastFlux", "pointsCornerDpa", "pointsCornerDpaRate", "pointsCornerFastFluxFr", "pointsEdgeDpa", "pointsEdgeDpaRate", "pointsEdgeFastFluxFr", "THedgeTemp", "THcornTemp", ] BOUNDARY_DATA = np.arange(6, dtype=float) * 10 PIN_PARAMS = [ "percentBuByPin", "linPowByPin", ] PIN_DATA = np.arange(NUM_PINS_IN_TEST_BLOCK, dtype=float) def setUp(self): self.baseBlock = loadTestBlock() self._assignParamData(self.BOUNDARY_PARAMS, self.BOUNDARY_DATA) self._assignParamData(self.PIN_PARAMS, self.PIN_DATA) def _assignParamData(self, names: list[str], referenceData: np.ndarray): """Assign initial rotatable pararameter data. Make some arrays, some lists to make sure we have good coverage of usage. """ # Yes we're putting the variable type in the name but that's why this method exists listData = referenceData.tolist() for ix, name in enumerate(names): self.baseBlock.p[name] = referenceData if (ix % 2) else listData def test_orientationVector(self): """Test the z-value in the orientation vector matches rotation. .. test:: Demonstrate that a HexBlock can be rotated in 60 degree increments, and the resultant orientation parameter reflects the current rotation. :id: T_ARMI_ROTATE_HEX_BLOCK :tests: R_ARMI_ROTATE_HEX """ for nRotations in range(-10, 10): rotationAmount = 60 * nRotations fresh = copy.deepcopy(self.baseBlock) self.assertEqual(fresh.p.orientation[2], 0.0, msg=nRotations) fresh.rotate(math.radians(rotationAmount)) # Ensure rotation is bounded [0, 360) postRotationOrientation = fresh.p.orientation[2] self.assertTrue(0 <= postRotationOrientation < 360, msg=nRotations) # Trim off any extra rotation if beyond 360 or negative # What is the effective counter clockwise rotation? expectedOrientation = rotationAmount % 360 self.assertEqual(postRotationOrientation, expectedOrientation, msg=nRotations) def test_rotateBoundaryParameters(self): """Test that boundary parameters are correctly rotated. .. test:: Rotating a hex block updates parameters on the boundary of the hexagon. :id: T_ARMI_ROTATE_HEX_BOUNDARY :tests: R_ARMI_ROTATE_HEX """ # No rotation == no changes to data self._rotateAndCompareBoundaryParams(0, self.BOUNDARY_DATA) for rotNum in range(1, 6): expected = iterables.pivot(self.BOUNDARY_DATA, -rotNum) self._rotateAndCompareBoundaryParams(rotNum * 60, expected) # Six rotations of 60 degrees puts us back to the original layout self._rotateAndCompareBoundaryParams(360, self.BOUNDARY_DATA) def _rotateAndCompareBoundaryParams(self, degrees: float, expected: np.ndarray): fresh = copy.deepcopy(self.baseBlock) fresh.rotate(math.radians(degrees)) for name in self.BOUNDARY_PARAMS: data = fresh.p[name] msg = f"{name=} :: {degrees=} :: {data=}" np.testing.assert_array_equal(data, expected, err_msg=msg) def assertIndexLocationEquivalent(self, actual: IndexLocation, expected: IndexLocation): """More flexible equivalency check on index locations. Specifically focused on locations on hex grids because this file is testing things on hex blocks. Checks that 1. ``i``, ``j``, and ``k`` are equal 2. Grids are both hex grid 3. Grids have same pitch and orientation. """ self.assertEqual(actual.i, expected.i) self.assertEqual(actual.j, expected.j) self.assertEqual(actual.k, expected.k) self.assertIsInstance(actual.grid, HexGrid) self.assertIsInstance(expected.grid, HexGrid) self.assertEqual(actual.grid.cornersUp, expected.grid.cornersUp) self.assertEqual(actual.grid.pitch, expected.grid.pitch) def test_pinRotationLocations(self): """Test that pin locations are updated through rotation. .. test:: HexBlock.getPinLocations is consistent with rotation. :id: T_ARMI_ROTATE_HEX_PIN_LOCS :tests: R_ARMI_ROTATE_HEX """ preRotation = self.baseBlock.getPinLocations() for nRotations in range(-10, 10): degrees = 60 * nRotations fresh = copy.deepcopy(self.baseBlock) g = fresh.spatialGrid fresh.rotate(math.radians(degrees)) postRotation = fresh.getPinLocations() self.assertEqual(len(preRotation), len(postRotation)) for pre, post in zip(preRotation, postRotation): expected = g.rotateIndex(pre, nRotations) self.assertIndexLocationEquivalent(post, expected) def test_pinRotationCoordinates(self): """Test that pin coordinates are updated through rotation. .. test:: HexBlock.getPinCoordinates is consistent through rotation. :id: T_ARMI_ROTATE_HEX_PIN_COORDS :tests: R_ARMI_ROTATE_HEX """ preRotation = self.baseBlock.getPinCoordinates() # Over- and under-rotate to make sure we can handle clockwise and counter # clockwise rotations, and cases that wrap around a full rotation for degrees in range(-600, 600, 60): fresh = copy.deepcopy(self.baseBlock) rads = math.radians(degrees) fresh.rotate(rads) rotationMatrix = np.array( [ [math.cos(rads), -math.sin(rads)], [math.sin(rads), math.cos(rads)], ] ) postRotation = fresh.getPinCoordinates() self.assertEqual(len(preRotation), len(postRotation)) for pre, post in zip(preRotation, postRotation): start = pre[:2] finish = post[:2] if np.allclose(start, 0): np.testing.assert_equal(start, finish) continue expected = rotationMatrix.dot(start) np.testing.assert_allclose(expected, finish, atol=1e-8) def test_updateChildLocations(self): """Test that locations of all children are updated through rotation. .. test:: Rotating a hex block updates the spatial coordinates on contained objects. :id: T_ARMI_ROTATE_HEX_CHILD_LOCS :tests: R_ARMI_ROTATE_HEX """ for nRotations in range(-10, 10): fresh = copy.deepcopy(self.baseBlock) degrees = 60 * nRotations rads = math.radians(degrees) fresh.rotate(rads) for originalC, newC in zip(self.baseBlock, fresh): self._compareComponentLocationsAfterRotation(originalC, newC, nRotations, rads) def _compareComponentLocationsAfterRotation( self, original: Component, updated: Component, nRotations: int, radians: float ): if isinstance(original.spatialLocator, MultiIndexLocation): for originalLoc, newLoc in zip(original.spatialLocator, updated.spatialLocator): expected = originalLoc.grid.rotateIndex(originalLoc, nRotations) self.assertIndexLocationEquivalent(newLoc, expected) elif isinstance(original.spatialLocator, CoordinateLocation): ox, oy, oz = original.spatialLocator.getLocalCoordinates() nx, ny, nz = updated.spatialLocator.getLocalCoordinates() self.assertEqual(nz, oz, msg=f"{original=} :: {radians=}") rotationMatrix = np.array( [ [math.cos(radians), -math.sin(radians)], [math.sin(radians), math.cos(radians)], ] ) expectedX, expectedY = rotationMatrix.dot((ox, oy)) np.testing.assert_allclose((nx, ny), (expectedX, expectedY), err_msg=f"{original=} :: {radians=}") def test_pinParametersUnmodified(self): """Test that pin data are not modified through rotation. Reinforces the idea that data like ``linPowByPin[i]`` are assigned to pin ``i``, wherever it may be. Locations are defined instead by ``getPinCoordinates()[i]``. """ fresh = copy.deepcopy(self.baseBlock) fresh.rotate(math.radians(60)) for paramName in self.PIN_PARAMS: actual = fresh.p[paramName] np.testing.assert_equal(actual, self.PIN_DATA, err_msg=paramName) class EmptyBlockRotateTest(unittest.TestCase): """Rotation tests on an empty hexagonal block. Useful for enforcing rotation works on blocks without pins. """ def setUp(self): self.block = HexBlock("empty") def test_orientation(self): """Test the orientation parameter is updated on a rotated empty block.""" rotDegrees = 60 preRotateOrientation = self.block.p.orientation[2] self.block.rotate(math.radians(rotDegrees)) postRotationOrientation = self.block.p.orientation[2] self.assertNotEqual(preRotateOrientation, postRotationOrientation) self.assertEqual(postRotationOrientation, rotDegrees) ================================================ FILE: armi/reactor/tests/test_parameters.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for assorted Parameters tools.""" import copy import os import unittest from glob import glob from shutil import copyfile from armi.reactor import parameters from armi.reactor.reactorParameters import makeParametersReadOnly from armi.testing import loadTestReactor from armi.tests import TEST_ROOT from armi.utils.directoryChangers import TemporaryDirectoryChanger class MockComposite: def __init__(self, name): self.name = name self.p = {} class MockCompositeGrandParent(MockComposite): pass class MockCompositeParent(MockCompositeGrandParent): pass class MockCompositeChild(MockCompositeParent): pass class ParameterTests(unittest.TestCase): @classmethod def setUpClass(cls): cls.defs = parameters.ALL_DEFINITIONS._paramDefs @classmethod def tearDownClass(cls): parameters.ALL_DEFINITIONS._paramDefs = cls.defs def setUp(self): parameters.ALL_DEFINITIONS._paramDefs = [] def test_mutableDefaultsNotSupported(self): class Mock(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: with self.assertRaises(AssertionError): pb.defParam("units", "description", "location", default=[]) with self.assertRaises(AssertionError): pb.defParam("units", "description", "location", default={}) with self.assertRaises(AssertionError): fail = pDefs.createBuilder(default=[]) with self.assertRaises(AssertionError): fail = pDefs.createBuilder(default={}) def test_writeSomeParamsToDB(self): """ This tests the ability to specify which parameters should be written to the database. It assumes that the list returned by ParameterDefinitionCollection.toWriteToDB() is used to filter for which parameters to include in the database. .. test:: Restrict parameters from DB write. :id: T_ARMI_PARAM_DB :tests: R_ARMI_PARAM_DB .. test:: Ensure that new parameters can be defined. :id: T_ARMI_PARAM0 :tests: R_ARMI_PARAM """ pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam("write_me", "units", "description", "location", default=42) pb.defParam("and_me", "units", "description", "location", default=42) pb.defParam( "dont_write_me", "units", "description", "location", default=42, saveToDB=False, ) db_params = pDefs.toWriteToDB(32) self.assertListEqual(["write_me", "and_me"], [p.name for p in db_params]) def test_serializer_pack_unpack(self): """ This tests the ability to add a serializer to a parameter instantiation line. It assumes that if this parameter is not None, that the pack and unpack methods will be called during storage to and reading from the database. See database._writeParams for an example use of this functionality. .. test:: Custom parameter serializer :id: T_ARMI_PARAM_SERIALIZE :tests: R_ARMI_PARAM_SERIALIZE """ class TestSerializer(parameters.Serializer): @staticmethod def pack(data): array = [d + 1 for d in data] return array @staticmethod def unpack(data): array = [d - 1 for d in data] return array param = parameters.Parameter( name="myparam", units="kg", description="a param", location=None, saveToDB=True, default=[1], setter=None, categories=None, serializer=TestSerializer(), ) param.assigned = [1] packed = param.serializer.pack(param.assigned) unpacked = param.serializer.unpack(packed) self.assertEqual(packed, [2]) self.assertEqual(unpacked, [1]) def test_paramPropertyDoesNotConflict(self): class Mock(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam("doodle", "units", "description", "location", default=42) with pDefs.createBuilder(MockComposite, default=0.0) as pb: pb.defParam("cheese", "kg", "pressed curds of milk", "avg") pb.defParam("fudge", "kg", "saturated chocolate", "avg", default=19) pb.defParam( "noodles", "kg", "strip, ring, or tube of pasta", "avg", default=None, ) mock1 = Mock() mock2 = Mock() self.assertEqual(42, mock1.doodle) self.assertEqual(42, mock2.doodle) self.assertEqual(0.0, mock1.cheese) # make sure factory default is applied self.assertEqual(19, mock2.fudge) # make sure we can override the factory default self.assertEqual(None, mock2.noodles) # make sure we can override the factory default mock1.doodle = 17 self.assertEqual(17, mock1.doodle) self.assertEqual(42, mock2.doodle) def test_paramPropNoConflictNoneDefault(self): """Parameter property does not conflict with None default.""" class Mock(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam("noneDefault", "units", "description", "location", default=None) mock1 = Mock() mock2 = Mock() self.assertIsNone(mock1.noneDefault) self.assertIsNone(mock2.noneDefault) mock1.noneDefault = 1.234 self.assertEqual(1.234, mock1.noneDefault) self.assertEqual(None, mock2.noneDefault) def test_getNoDefaultRaisesError(self): """Get without default raises parameter error.""" class Mock(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam("noDefault", "units", "description", "location") mock = Mock() with self.assertRaises(parameters.ParameterError): print(mock.noDefault) def test_setParamWithoutSetter(self): """Attempting to set paramter without setter fails.""" class Mock(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam( "noSetter", "noSetter", "units", "description", "location", default="encapsulated", setter=None, ) mock = Mock() self.assertEqual("encapsulated", mock.noSetter) with self.assertRaises(parameters.ParameterError): mock.noSetter = False self.assertEqual("encapsulated", mock.noSetter) def test_setter(self): """Test the Parameter setter() tooling, that signifies if a Parameter has been updated. .. test:: Tooling that allows a Parameter to signal it needs to be updated across processes. :id: T_ARMI_PARAM_PARALLEL0 :tests: R_ARMI_PARAM_PARALLEL """ class Mock(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: def n(self, value): self._p_n = value self._p_nPlus1 = value + 1 pb.defParam("n", "units", "description", "location", setter=n) def nPlus1(self, value): self._p_nPlus1 = value self._p_n = value - 1 pb.defParam("nPlus1", "units", "description", "location", setter=nPlus1) mock = Mock() self.assertTrue(all(pd.assigned == parameters.NEVER for pd in mock.paramDefs if pd.name != "serialNum")) with self.assertRaises(parameters.ParameterError): print(mock.n) with self.assertRaises(parameters.ParameterError): print(mock.nPlus1) mock.n = 15 self.assertEqual(15, mock.n) self.assertEqual(16, mock.nPlus1) mock.nPlus1 = 22 self.assertEqual(21, mock.n) self.assertEqual(22, mock.nPlus1) self.assertTrue(all(pd.assigned != parameters.NEVER for pd in mock.paramDefs)) def test_setterGetterBasics(self): """Test the Parameter setter/getter tooling, through the lifecycle of a Parameter being updated. .. test:: Tooling that allows a Parameter to signal it needs to be updated across processes. :id: T_ARMI_PARAM_PARALLEL1 :tests: R_ARMI_PARAM_PARALLEL """ class Mock(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: def n(self, value): self._p_n = value self._p_nPlus1 = value + 1 pb.defParam("n", "units", "description", "location", setter=n) def nPlus1(self, value): self._p_nPlus1 = value self._p_n = value - 1 pb.defParam("nPlus1", "units", "description", "location", setter=nPlus1) mock = Mock() mock.n = 15 mock.nPlus1 = 22 # basic tests of setters and getters self.assertEqual(mock["n"], 21) self.assertEqual(mock["nPlus1"], 22) with self.assertRaises(parameters.exceptions.UnknownParameterError): _ = mock["fake"] with self.assertRaises(KeyError): _ = mock[123] # basic test of __delitem__ method del mock["n"] with self.assertRaises(parameters.exceptions.UnknownParameterError): _ = mock["n"] # basic tests of __in__ method self.assertNotIn("n", mock) self.assertIn("nPlus1", mock) # basic tests of __eq__ method mock2 = copy.deepcopy(mock) self.assertEqual(mock, mock) self.assertNotEqual(mock, mock2) # basic tests of get() method self.assertEqual(mock.get("nPlus1"), 22) self.assertIsNone(mock.get("fake")) self.assertEqual(mock.get("fake", default=333), 333) # basic test of values() method vals = mock.values() self.assertEqual(len(vals), 2) self.assertEqual(vals[0], 22) # basic test of update() method mock.update({"nPlus1": 100}) self.assertEqual(mock.get("nPlus1"), 100) # basic test of getSyncData() method data = mock.getSyncData() self.assertEqual(data["n"], 99) self.assertEqual(data["nPlus1"], 100) def test_cannotDefineParamWithSameName(self): with self.assertRaises(parameters.ParameterDefinitionError): class MockParamCollection(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam("sameName", "units", "description 1", "location") pb.defParam("sameName", "units", "description 2", "location") _ = MockParamCollection() def test_paramDefinitionsCompose(self): class MockBaseParamCollection(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam("base1", "units", "a param on the base collection", "avg") pb.defParam("base2", "units", "another param on the base collection", "avg") class MockDerivedACollection(MockBaseParamCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam("derAp1", "units", "derived a p 1", "centroid") pb.defParam("derAp2", "units", "derived a p 2", "centroid") class MockDerivedBCollection(MockDerivedACollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam("derBp", "units", "derived b param", "centroid") base = MockBaseParamCollection() derA = MockDerivedACollection() derB = MockDerivedBCollection() self.assertTrue(set(base.paramDefs._paramDefs).issubset(set(derA.paramDefs._paramDefs))) self.assertTrue(set(base.paramDefs._paramDefs).issubset(set(derB.paramDefs._paramDefs))) self.assertTrue(set(derA.paramDefs._paramDefs).issubset(set(derB.paramDefs._paramDefs))) def test_cannotDefineParamSameNameColSubclass(self): class MockPCParent(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam("sameName", "units", "description 3", "location") with self.assertRaises(parameters.ParameterDefinitionError): class MockPCChild(MockPCParent): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam("sameName", "units", "description 4", "location") _ = MockPCChild() # same name along a different branch from the base ParameterCollection should # be fine class MockPCUncle(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam("sameName", "units", "description 5", "location") def test_cannotCreateAttrOnParamColSubclass(self): class MockPC(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam("someParam", "units", "description", "location") _ = MockPC() def test_cannotCreateInstanceOf_NoDefault(self): with self.assertRaises(NotImplementedError): _ = parameters.NoDefault() def test_cannotCreateInstanceOf_Undefined(self): with self.assertRaises(NotImplementedError): _ = parameters.parameterDefinitions._Undefined() def test_defaultLocation(self): class MockPC(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(location=parameters.ParamLocation.AVERAGE) as pb: pb.defParam("p1", "units", "p1 description") pb.defParam("p2", "units", "p2 description", parameters.ParamLocation.TOP) pc = MockPC() self.assertEqual(pc.paramDefs["p1"].location, parameters.ParamLocation.AVERAGE) self.assertEqual(pc.paramDefs["p2"].location, parameters.ParamLocation.TOP) def test_categories(self): class MockPC0(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam("p0", "units", "p0 description", "location") pc = MockPC0() self.assertEqual(pc.paramDefs.categories, set()) class MockPC(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(categories=["awesome", "stuff"]) as pb: pb.defParam("p1", "units", "p1 description", "location") pb.defParam("p2", "units", "p2 description", "location", categories=["bacon"]) with pDefs.createBuilder() as pb: pb.defParam("p3", "units", "p3 description", "location", categories=["bacon"]) pc = MockPC() self.assertEqual(pc.paramDefs.categories, set(["awesome", "stuff", "bacon"])) p1 = pc.paramDefs["p1"] p2 = pc.paramDefs["p2"] p3 = pc.paramDefs["p3"] self.assertEqual(p1.categories, set(["awesome", "stuff"])) self.assertEqual(p2.categories, set(["awesome", "stuff", "bacon"])) self.assertEqual(p3.categories, set(["bacon"])) for p in [p1, p2, p3]: self._testCategoryConsistency(p) self.assertEqual(set(pc.paramDefs.inCategory("awesome")), set([p1, p2])) self.assertEqual(set(pc.paramDefs.inCategory("stuff")), set([p1, p2])) self.assertEqual(set(pc.paramDefs.inCategory("bacon")), set([p2, p3])) def _testCategoryConsistency(self, p: parameters.Parameter): for category in p.categories: self.assertTrue(p.hasCategory(category)) self.assertFalse(p.hasCategory("this_shouldnot_exist")) def test_paramColHaveSlots(self): """Tests we prevent accidental creation of attributes.""" self.assertEqual( set( [ "_hist", "_backup", "assigned", "_p_serialNum", "serialNum", "readOnly", ] ), set(parameters.ParameterCollection._slots), ) class MockPC(parameters.ParameterCollection): pass pc = MockPC() with self.assertRaises(AssertionError): pc.whatever = 22 # try again after using a ParameterBuilder class MockPC(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() # use of the ParameterBuilder creates an empty __slots__ with pDefs.createBuilder() as pb: pb.defParam("p0", "units", "p0 description", "location") pc = MockPC() self.assertIn("_p_p0", MockPC._slots) # Make sure we aren't making any weird copies of anything self.assertEqual(pc._slots, MockPC._slots) with self.assertRaises(AssertionError): pc.whatever = 33 self.assertEqual(["serialNum"], pc.keys()) pc.p0 = "hi" self.assertEqual({"p0", "serialNum"}, set(pc.keys())) # Also make sure that subclasses of ParameterCollection subclasses use __slots__ class MockPCChild(MockPC): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam("p2", "foo", "bar") pcc = MockPCChild() with self.assertRaises(AssertionError): pcc.whatever = 33 class ParamCollectionWhere(unittest.TestCase): """Tests for ParameterCollection.where.""" class ScopeParamCollection(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder() as pb: pb.defParam( name="empty", description="Bare", location=None, categories=None, units="", ) pb.defParam( name="keff", description="keff", location=parameters.ParamLocation.VOLUME_INTEGRATED, categories=[parameters.Category.neutronics], units="", ) pb.defParam( name="cornerFlux", description="corner flux", location=parameters.ParamLocation.CORNERS, categories=[ parameters.Category.neutronics, ], units="", ) pb.defParam( name="edgeTemperature", description="edge temperature", location=parameters.ParamLocation.EDGES, categories=[parameters.Category.thermalHydraulics], units="", ) @classmethod def setUpClass(cls) -> None: """Define a couple useful parameters with categories, locations, etc.""" cls.pc = cls.ScopeParamCollection() def test_onCategory(self): """Test the use of Parameter.hasCategory on filtering.""" names = {"keff", "cornerFlux"} for p in self.pc.where(lambda pd: pd.hasCategory(parameters.Category.neutronics)): self.assertTrue(p.hasCategory(parameters.Category.neutronics), msg=p) names.remove(p.name) self.assertFalse(names, msg=f"{names=} should be empty!") def test_onLocation(self): """Test the use of Parameter.atLocation in filtering.""" names = {"edgeTemperature"} for p in self.pc.where(lambda pd: pd.atLocation(parameters.ParamLocation.EDGES)): self.assertTrue(p.atLocation(parameters.ParamLocation.EDGES), msg=p) names.remove(p.name) self.assertFalse(names, msg=f"{names=} should be empty!") def test_complicated(self): """Test a multi-condition filter.""" names = {"cornerFlux"} def check(p: parameters.Parameter) -> bool: return p.atLocation(parameters.ParamLocation.CORNERS) and p.hasCategory(parameters.Category.neutronics) for p in self.pc.where(check): self.assertTrue(check(p), msg=p) names.remove(p.name) self.assertFalse(names, msg=f"{names=} should be empty") class TestMakeParametersReadOnly(unittest.TestCase): def test_makeParametersReadOnly(self): with TemporaryDirectoryChanger(): # copy test reactor to local yamls = glob(os.path.join(TEST_ROOT, "smallestTestReactor", "*.yaml")) for yamlFile in yamls: copyfile(yamlFile, os.path.basename(yamlFile)) # load some random test reactor _o, r = loadTestReactor(os.getcwd(), inputFileName="armiRunSmallest.yaml") # prove we can edit various params at will r.core.p.keff = 1.01 b = r.core.getFirstBlock() b.p.power = 123.4 makeParametersReadOnly(r) # now show we can no longer edit those parameters with self.assertRaises(RuntimeError): r.core.p.keff = 0.99 with self.assertRaises(RuntimeError): b.p.power = 432.1 ================================================ FILE: armi/reactor/tests/test_reactors.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing for reactors.py.""" import copy import logging import os import pickle import unittest from math import sqrt from unittest.mock import patch from numpy.testing import assert_allclose, assert_equal from armi import operators, runLog, settings, tests from armi.materials import uZr from armi.physics.neutronics.settings import CONF_XS_KERNEL from armi.reactor import assemblies, blocks, geometry, grids, reactors from armi.reactor.components import Hexagon, Rectangle from armi.reactor.composites import Composite from armi.reactor.converters import geometryConverters from armi.reactor.converters.axialExpansionChanger import AxialExpansionChanger from armi.reactor.flags import Flags from armi.reactor.grids.hexagonal import HexGrid from armi.reactor.spentFuelPool import SpentFuelPool from armi.settings.fwSettings.globalSettings import ( CONF_ASSEM_FLAGS_SKIP_AXIAL_EXP, CONF_SORT_REACTOR, ) from armi.testing import TESTING_ROOT, loadTestReactor, reduceTestReactorRings # noqa: F401 from armi.tests import TEST_ROOT, mockRunLogs from armi.utils import directoryChangers _THIS_DIR = os.path.dirname(__file__) def buildOperatorOfEmptyHexBlocks(customSettings=None): """ Builds a operator w/ a reactor object with some hex assemblies and blocks, but all are empty. Doesn't depend on inputs and loads quickly. Parameters ---------- customSettings : dict Dictionary of off-default settings to update """ cs = settings.Settings() # fetch new if customSettings is None: customSettings = {} customSettings["db"] = False # stop use of database cs = cs.modified(newSettings=customSettings) r = tests.getEmptyHexReactor() r.core.setOptionsFromCs(cs) o = operators.Operator(cs) o.initializeInterfaces(r) a = assemblies.HexAssembly("fuel") a.spatialGrid = grids.AxialGrid.fromNCells(1) b = blocks.HexBlock("TestBlock") b.setType("fuel") dims = {"Tinput": 600, "Thot": 600, "op": 16.0, "ip": 1, "mult": 1} c = Hexagon("fuel", uZr.UZr(), **dims) b.add(c) a.add(b) a.spatialLocator = r.core.spatialGrid[1, 0, 0] o.r.core.add(a) o.r.sort() return o def buildOperatorOfEmptyCartesianBlocks(customSettings=None): """ Builds a operator w/ a reactor object with some Cartesian assemblies and blocks, but all are empty. Doesn't depend on inputs and loads quickly. Parameters ---------- customSettings : dict Off-default settings to update """ cs = settings.Settings() # fetch new if customSettings is None: customSettings = {} customSettings["db"] = False # stop use of database cs = cs.modified(newSettings=customSettings) r = tests.getEmptyCartesianReactor() r.core.setOptionsFromCs(cs) o = operators.Operator(cs) o.initializeInterfaces(r) a = assemblies.CartesianAssembly("fuel") a.spatialGrid = grids.AxialGrid.fromNCells(1) b = blocks.CartesianBlock("TestBlock") b.setType("fuel") dims = { "Tinput": 600, "Thot": 600, "widthOuter": 16.0, "lengthOuter": 10.0, "widthInner": 1, "lengthInner": 1, "mult": 1, } c = Rectangle("fuel", uZr.UZr(), **dims) b.add(c) a.add(b) a.spatialLocator = r.core.spatialGrid[1, 0, 0] o.r.core.add(a) o.r.sort() return o class ReactorTests(unittest.TestCase): @classmethod def setUpClass(cls): # Prepare the input files. This is important so the unit tests run from wherever they need to run from. cls.td = directoryChangers.TemporaryDirectoryChanger() cls.td.__enter__() @classmethod def tearDownClass(cls): cls.td.__exit__(None, None, None) class HexReactorTests(ReactorTests): """ This is meant to pair with the ``HexReactorReadOnlyTests`` unit test class. The tests in this class all modify the Reactor object, so we need to create a new test reactor for each test. """ def setUp(self): self.o, self.r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml", customSettings={"trackAssems": True}, ) self.r.excore["sfp"].spatialGrid = HexGrid(unitSteps=((2, 0, 0), (0, 3, 0), (0, 0, 0))) def test_getAssembliesInCircularRing(self): expectedAssemsInRing = [5, 2] actualAssemsInRing = [] for ring in range(1, self.r.core.getNumRings()): actualAssemsInRing.append(len(self.r.core.getAssembliesInCircularRing(ring))) self.assertSequenceEqual(actualAssemsInRing, expectedAssemsInRing) def test_getAssembliesInHexRing(self): expectedAssemsInRing = [1, 2, 4] actualAssemsInRing = [] for ring in range(1, self.r.core.getNumRings() + 1): actualAssemsInRing.append(len(self.r.core.getAssembliesInSquareOrHexRing(ring))) self.assertSequenceEqual(actualAssemsInRing, expectedAssemsInRing) def test_factorySortSetting(self): """Create a core object from an input yaml.""" # get a sorted Reactor (the default) cs = settings.Settings(fName=os.path.join(TEST_ROOT, "armiRun.yaml")) r0 = reactors.loadFromCs(cs) # get an unsorted Reactor (for whatever reason) customSettings = {CONF_SORT_REACTOR: False} cs = cs.modified(newSettings=customSettings) r1 = reactors.loadFromCs(cs) # the reactor / core should be the same size self.assertEqual(len(r0), len(r1)) self.assertEqual(len(r0.core), len(r1.core)) # the reactor / core should be in a different order a0 = [a.name for a in r0.core] a1 = [a.name for a in r1.core] self.assertNotEqual(a0, a1) # The reactor object is a Composite self.assertTrue(isinstance(r0.core, Composite)) def test_getSetParameters(self): """ This test works through multiple levels of the data model hierarchy to test ability to modify parameters. .. test:: Parameters are accessible throughout the armi tree. :id: T_ARMI_PARAM1 :tests: R_ARMI_PARAM .. test:: Ensure there is a setting for total core power. :id: T_ARMI_SETTINGS_POWER0 :tests: R_ARMI_SETTINGS_POWER """ # Test at reactor level self.assertEqual(self.r.p.cycle, 0) self.assertEqual(self.r.p.availabilityFactor, 1.0) # Test at core level core = self.r.core self.assertGreater(core.p.power, -1) core.p.power = 123 self.assertEqual(core.p.power, 123) # Test at assembly level assembly = core.getFirstAssembly() self.assertGreater(assembly.p.crRodLength, -1) assembly.p.crRodLength = 234 self.assertEqual(assembly.p.crRodLength, 234) # Test at block level block = core.getFirstBlock() self.assertIsNone(block.p.mgFlux) block.p.mgFlux = 57 self.assertEqual(block.p.mgFlux, 57) # Test at component level component = block[0] self.assertEqual(component.p.temperatureInC, 450.0) def test_sortChildren(self): self.assertEqual(next(self.r.core.__iter__()), self.r.core[0]) self.assertEqual(self.r.core._children, sorted(self.r.core._children)) def test_sortAssemByRing(self): """Demonstrate ring/pos sorting.""" self.r.core.sortAssemsByRing() self.assertEqual((1, 1), self.r.core[0].spatialLocator.getRingPos()) currentRing = -1 currentPos = -1 for a in self.r.core: ring, pos = a.spatialLocator.getRingPos() self.assertGreaterEqual(ring, currentRing) if ring > currentRing: ring = currentRing currentPos = -1 self.assertGreater(pos, currentPos) currentPos = pos def test_growToFullCore(self): nAssemThird = len(self.r.core) self.assertEqual(self.r.core.powerMultiplier, 3.0) self.assertFalse(self.r.core.isFullCore) self.r.core.growToFullCore(self.o.cs) aNums = [] for a in self.r.core: self.assertNotIn(a.getNum(), aNums) aNums.append(a.getNum()) bNames = [b.getName() for b in self.r.core.iterBlocks()] for bName in bNames: self.assertEqual(bNames.count(bName), 1) self.assertEqual(self.r.core.powerMultiplier, 1.0) self.assertTrue(self.r.core.isFullCore) nAssemFull = len(self.r.core) self.assertEqual(nAssemFull, (nAssemThird - 1) * 3 + 1) def test_genBlocksByLocName(self): self.r.core.genBlocksByLocName() self.assertGreater(len(self.r.core.blocksByLocName), 20) self.assertIn("003-002-002", self.r.core.blocksByLocName) def test_setPitchUniform(self): # 1. Original reactor state originalPitch = 16.142 hmMassBefore = 0.0 solidMassBefore = 0.0 liquidMassBefore = 0.0 for b in self.r.core.iterBlocks(): self.assertEqual(b.getPitch(), originalPitch) for c in b: hmMassBefore += c.getHMMass() for comp in c: if comp.containsSolidMaterial(): solidMassBefore += comp.getMass() else: liquidMassBefore += comp.getMass() # 2. decrease pitch size hmMassAfter = 0.0 solidMassAfter = 0.0 liquidMassAfter = 0.0 self.r.core.setPitchUniform(4.0) for b in self.r.core.iterBlocks(): # verify pitch has correctly reduced self.assertEqual(b.getPitch(), 4.0) for c in b: hmMassAfter += c.getHMMass() for comp in c: if comp.containsSolidMaterial(): solidMassAfter += comp.getMass() else: liquidMassAfter += comp.getMass() # verify HM mass has not changed self.assertAlmostEqual(hmMassBefore, hmMassAfter, delta=1e-8) # check that solid masses and liquid masses return to the normal state self.assertAlmostEqual(solidMassBefore, solidMassAfter, delta=1e-8) self.assertLessEqual(liquidMassAfter, liquidMassBefore) # 3. increase pitch size back to original hmMassFinal = 0.0 solidMassFinal = 0.0 liquidMassFinal = 0.0 self.r.core.setPitchUniform(originalPitch) for b in self.r.core.iterBlocks(): # verify pitch has correctly reduced self.assertEqual(b.getPitch(), originalPitch) for c in b: hmMassFinal += c.getHMMass() for comp in c: if comp.containsSolidMaterial(): solidMassFinal += comp.getMass() else: liquidMassFinal += comp.getMass() # verify HM mass goes back to original self.assertAlmostEqual(hmMassBefore, hmMassFinal, delta=1e-8) # check that solid masses and liquid masses return to original self.assertAlmostEqual(solidMassBefore, solidMassFinal, delta=1e-8) self.assertAlmostEqual(liquidMassBefore, liquidMassFinal) def test_normalizeNames(self): # these are the correct, normalized names numAssems = 7 a = self.r.core.getFirstAssembly() correctNames = [a.makeNameFromAssemNum(n) for n in range(numAssems)] # validate the reactor is what we think now self.assertEqual(len(self.r.core), numAssems) currentNames = [a.getName() for a in self.r.core] self.assertNotEqual(correctNames, currentNames) # validate that we can normalize the names correctly once self.r.normalizeNames() currentNames = [a.getName() for a in self.r.core] self.assertEqual(correctNames, currentNames) # validate that repeated applications of this method are stable for _ in range(3): self.r.normalizeNames() currentNames = [a.getName() for a in self.r.core] self.assertEqual(correctNames, currentNames) def test_setB10VolOnCreation(self): """Test the setting of b.p.initialB10ComponentVol.""" for controlBlock in self.r.core.iterBlocks(Flags.CONTROL): controlComps = [c for c in controlBlock if c.getNumberDensity("B10") > 0] self.assertEqual(len(controlComps), 1) controlComp = controlComps[0] startingVol = controlBlock.p.initialB10ComponentVol self.assertGreater(startingVol, 0) self.assertAlmostEqual(controlComp.getArea(cold=True) * controlBlock.getHeight(), startingVol) # input temp is same as hot temp, so change input temp to test that behavior controlComp.inputTemperatureInC = 30 # somewhat non-sensical since its hot, not cold but we just want to check the ratio controlBlock.setB10VolParam(True) self.assertGreater(startingVol, controlBlock.p.initialB10ComponentVol) self.assertAlmostEqual( startingVol / controlComp.getThermalExpansionFactor(), controlBlock.p.initialB10ComponentVol, ) def test_getReactor(self): """The Core object can return its Reactor parent; test that getter.""" self.assertTrue(isinstance(self.r.core.r, reactors.Reactor)) self.r.core.parent = None self.assertIsNone(self.r.core.r) def test_addMoreNodes(self): originalMesh = self.r.core.p.axialMesh bigMesh = list(originalMesh) bigMesh[2] = 30.0 smallMesh = originalMesh[0:2] + [40.0, 47.0] + originalMesh[2:] newMesh1, originalMeshGood = self.r.core.addMoreNodes(originalMesh) newMesh2, bigMeshGood = self.r.core.addMoreNodes(bigMesh) newMesh3, smallMeshGood = self.r.core.addMoreNodes(smallMesh) expectedMesh = [0.0, 15.0, 25.16, 35.32, 59.2125, 83.105, 106.9975, 130.89, 154.7825, 178.675, 202.5675, 226.46] expectedBigMesh = [ 0.0, 15.0, 30.0, 35.32, 59.2125, 83.105, 106.9975, 130.89, 154.7825, 178.675, 202.5675, 226.46, ] expectedSmallMesh = [ 0.0, 15.0, 25.16, 35.32, 40.0, 43.724, 47.0, 59.2125, 83.105, 106.9975, 130.89, 154.7825, 178.675, 202.5675, 226.46, ] self.assertListEqual(expectedMesh, newMesh1) self.assertListEqual(expectedBigMesh, newMesh2) for i in range(len(expectedSmallMesh)): self.assertAlmostEqual(expectedSmallMesh[i], newMesh3[i], delta=1e-8) self.assertTrue(originalMeshGood) self.assertTrue(bigMeshGood) self.assertFalse(smallMeshGood) def test_restoreReactor(self): """Restore a reactor after growing it from third to full core. .. test:: Convert a third-core to a full-core geometry and then restore it. :id: T_ARMI_THIRD_TO_FULL_CORE1 :tests: R_ARMI_THIRD_TO_FULL_CORE """ numOfAssembliesOneThird = len(self.r.core) self.assertFalse(self.r.core.isFullCore) self.assertEqual( self.r.core.symmetry, geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC), ) # grow to full core converter = self.r.core.growToFullCore(self.o.cs) self.assertTrue(self.r.core.isFullCore) self.assertGreater(len(self.r.core), numOfAssembliesOneThird) self.assertEqual(self.r.core.symmetry.domain, geometry.DomainType.FULL_CORE) # restore back to 1/3 core converter.restorePreviousGeometry(self.r) self.assertEqual(numOfAssembliesOneThird, len(self.r.core)) self.assertEqual( self.r.core.symmetry, geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC), ) self.assertFalse(self.r.core.isFullCore) self.assertEqual(numOfAssembliesOneThird, len(self.r.core)) self.assertEqual( self.r.core.symmetry, geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC), ) def test_saveAllFlux(self): # need a lightweight library to indicate number of groups. class MockLib: numGroups = 5 self.r.core.lib = MockLib() for b in self.r.core.iterBlocks(): b.p.mgFlux = range(5) b.p.adjMgFlux = range(5) with directoryChangers.TemporaryDirectoryChanger(root=_THIS_DIR): self.r.core.saveAllFlux() def test_getFluxVector(self): class MockLib: numGroups = 5 self.r.core.lib = MockLib() for b in self.r.core.iterBlocks(): b.p.mgFlux = range(5) b.p.adjMgFlux = [i + 0.1 for i in range(5)] b.p.extSrc = [i + 0.2 for i in range(5)] mgFlux = self.r.core.getFluxVector(energyOrder=1) adjFlux = self.r.core.getFluxVector(adjoint=True) srcVec = self.r.core.getFluxVector(extSrc=True) fluxVol = self.r.core.getFluxVector(volumeIntegrated=True) blocks = self.r.core.getBlocks() expFlux = [i for i in range(5) for _ in blocks] expAdjFlux = [i + 0.1 for _ in blocks for i in range(5)] expSrcVec = [i + 0.2 for _ in blocks for i in range(5)] expFluxVol = list(range(5)) * len(blocks) assert_allclose(expFlux, mgFlux) assert_allclose(expAdjFlux, adjFlux) assert_allclose(expSrcVec, srcVec) assert_allclose(expFluxVol, fluxVol) def test_getFuelBottomHeight(self): for a in self.r.core.getAssemblies(Flags.FUEL): if a[0].hasFlags(Flags.FUEL): a[0].setType("mud") a[1].setType("fuel") fuelBottomHeightRef = self.r.core.getFirstAssembly(Flags.FUEL)[0].getHeight() fuelBottomHeightInCm = self.r.core.getFuelBottomHeight() self.assertEqual(fuelBottomHeightInCm, fuelBottomHeightRef) def test_isPickleable(self): loaded = pickle.loads(pickle.dumps(self.r)) # ensure we didn't break the current reactor self.assertIs(self.r.core.spatialGrid.armiObject, self.r.core) # make sure that the loaded reactor and grid are aligned self.assertIs(loaded.core.spatialGrid.armiObject, loaded.core) self.assertTrue(all(isinstance(key, grids.LocationBase) for key in loaded.core.childrenByLocator.keys())) loc = loaded.core.spatialGrid[0, 0, 0] loaded.core.sortAssemsByRing() self.r.core.sortAssemsByRing() self.assertIs(loc.grid, loaded.core.spatialGrid) self.assertEqual(loaded.core.childrenByLocator[loc], loaded.core[0]) allIDs = set() def checkAdd(comp): self.assertNotIn(id(comp), allIDs) self.assertNotIn(id(comp.p), allIDs) allIDs.add(id(comp)) allIDs.add(id(comp.p)) # check a few locations to be equivalent for a0, a1 in zip(self.r.core, loaded.core): self.assertEqual(str(a0.getLocation()), str(a1.getLocation())) self.assertIs(a0.spatialLocator.grid, self.r.core.spatialGrid) self.assertIs(a1.spatialLocator.grid, loaded.core.spatialGrid) checkAdd(a0) checkAdd(a1) for b0, b1 in zip(a0, a1): self.assertIs(b0.spatialLocator.grid, a0.spatialGrid) self.assertIs(b1.spatialLocator.grid, a1.spatialGrid) self.assertEqual(str(b0.getLocation()), str(b1.getLocation())) self.assertEqual(b0.getSymmetryFactor(), b1.getSymmetryFactor()) self.assertEqual(b0.getHMMoles(), b1.getHMMoles()) checkAdd(b0) checkAdd(b1) def test_removeAssemblyNoSfp(self): with mockRunLogs.BufferLog() as mock: # we should start with a clean slate self.assertEqual("", mock.getStdout()) runLog.LOG.startLog("test_removeAssemblyNoSfp") runLog.LOG.setVerbosity(logging.INFO) a = self.r.core[-1] # last assembly aLoc = a.spatialLocator self.assertIsNotNone(aLoc.grid) self.r.excore["sfp"] = None del self.r.excore["sfp"] self.r.core.removeAssembly(a) self.assertIn("No Spent Fuel Pool", mock.getStdout()) def test_createAssemblyOfType(self): """Test creation of new assemblies.""" # basic creation aOld = self.r.core.getFirstAssembly(Flags.FUEL) aNew = self.r.core.createAssemblyOfType(aOld.getType(), cs=self.o.cs) self.assertAlmostEqual(aOld.getMass(), aNew.getMass()) # test axial mesh alignment aNewMesh = aNew.getAxialMesh() for i, meshValue in enumerate(aNewMesh): self.assertAlmostEqual(meshValue, self.r.core.p.referenceBlockAxialMesh[i + 1]) # use i+1 to skip 0.0 # creation with modified enrichment aNew2 = self.r.core.createAssemblyOfType(aOld.getType(), 0.195, self.o.cs) fuelBlock = aNew2.getFirstBlock(Flags.FUEL) self.assertAlmostEqual(fuelBlock.getUraniumMassEnrich(), 0.195) # creation with modified enrichment on an expanded BOL assem. fuelComp = fuelBlock.getComponent(Flags.FUEL) bol = self.r.blueprints.assemblies[aOld.getType()] changer = AxialExpansionChanger() changer.performPrescribedAxialExpansion(bol, [fuelComp], [0.05]) aNew3 = self.r.core.createAssemblyOfType(aOld.getType(), 0.195, self.o.cs) self.assertAlmostEqual(aNew3.getFirstBlock(Flags.FUEL).getUraniumMassEnrich(), 0.195) self.assertAlmostEqual(aNew3.getMass(), bol.getMass()) def test_createAssemOfTypeExpandCore(self): """Test creation of new assemblies in an expanded core.""" # change the mesh of inner blocks mesh = self.r.core.p.referenceBlockAxialMesh[1:] lastIndex = len(mesh) - 1 mesh = [val + 5 for val in mesh] mesh[0] -= 5 mesh[lastIndex] -= 5 # expand the core self.r.core.p.referenceBlockAxialMesh = [0] + mesh for a in self.r.core: a.setBlockMesh(mesh) aType = self.r.core.getFirstAssembly(Flags.FUEL).getType() # demonstrate we can still create assemblies self.assertTrue(self.r.core.createAssemblyOfType(aType, cs=self.o.cs)) def test_getScalarEvolution(self): self.r.core.scalarVals["fake"] = 123 x = self.r.core.getScalarEvolution("fake") self.assertEqual(x, 123) def test_ifMissingSpatialGrid(self): self.r.core.spatialGrid = None with self.assertRaises(ValueError): self.r.core.symmetry with self.assertRaises(ValueError): self.r.core.geomType def test_pinCoordsAllBlocks(self): """Make sure all blocks can get pin coords.""" for b in self.r.core.iterBlocks(): coords = b.getPinCoordinates() self.assertGreater(len(coords), -1) def test_updateBlockBOLHeights_DBLoad(self): """Test that blueprints assemblies are expanded in DB load.""" originalAssems = sorted(a for a in self.r.blueprints.assemblies.values()) nonEqualParameters = ["heightBOL", "molesHmBOL", "massHmBOL"] equalParameters = ["smearDensity", "nHMAtBOL", "enrichmentBOL"] _o, coldHeightR = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml", customSettings={"inputHeightsConsideredHot": False}, ) coldHeightAssems = sorted(a for a in coldHeightR.blueprints.assemblies.values()) for a, coldHeightA in zip(originalAssems, coldHeightAssems): if not a.hasFlags(Flags.CONTROL): for b, coldHeightB in zip(a[1:], coldHeightA[1:]): for param in nonEqualParameters: p, coldHeightP = b.p[param], coldHeightB.p[param] if p and coldHeightP: self.assertNotEqual(p, coldHeightP) else: self.assertAlmostEqual(p, coldHeightP) for param in equalParameters: p, coldHeightP = b.p[param], coldHeightB.p[param] self.assertAlmostEqual(p, coldHeightP) def test_buildManualZones(self): # define some manual zones in the settings newSettings = {} newSettings["zoneDefinitions"] = [ "ring-1: 001-001", "ring-2: 002-001, 002-002", "ring-3: 003-001, 003-002, 003-003", ] cs = self.o.cs.modified(newSettings=newSettings) self.r.core.buildManualZones(cs) zonez = self.r.core.zones self.assertEqual(len(list(zonez)), 3) self.assertIn("002-001", zonez["ring-2"]) self.assertIn("003-002", zonez["ring-3"]) def test_buildManualZonesEmpty(self): # ensure there are no zone definitions in the settings newSettings = {} newSettings["zoneDefinitions"] = [] cs = self.o.cs.modified(newSettings=newSettings) # verify that buildZones behaves well when no zones are defined self.r.core.buildManualZones(cs) self.assertEqual(len(list(self.r.core.zones)), 0) def test_setPowerIfNecessary(self): self.assertAlmostEqual(self.r.core.p.power, 0) self.assertAlmostEqual(self.r.core.p.powerDensity, 0) # to start, this method shouldn't do anything self.r.core.setPowerIfNecessary() self.assertAlmostEqual(self.r.core.p.power, 0) # take the powerDensity when needed self.r.core.p.power = 0 self.r.core.p.powerDensity = 1e9 mass = self.r.core.getHMMass() self.r.core.setPowerIfNecessary() self.assertAlmostEqual(self.r.core.p.power, 1e9 * mass) # don't take the powerDensity when not needed self.r.core.p.power = 3e9 self.r.core.p.powerDensity = 2e9 self.r.core.setPowerIfNecessary() self.assertAlmostEqual(self.r.core.p.power, 3e9) def test_findAllMeshPoints(self): """Test findAllMeshPoints(). .. test:: Test that the reactor can calculate its core block mesh. :id: T_ARMI_R_MESH :tests: R_ARMI_R_MESH """ # lets do some basic sanity checking of the meshpoints x, y, z = self.r.core.findAllMeshPoints() # no two meshpoints should be the same, and they should all be monotonically increasing for xx in range(1, len(x)): self.assertGreater(x[xx], x[xx - 1], msg=f"x={xx}") for yy in range(1, len(y)): self.assertGreater(y[yy], y[yy - 1], msg=f"y={yy}") for zz in range(1, len(z)): self.assertGreater(z[zz], z[zz - 1], msg=f"z={zz}") # the z-index should start at zero (the bottom) self.assertEqual(z[0], 0) # ensure the X and Y mesh spacing is correct (for a hex core) pitch = self.r.core.spatialGrid.pitch xPitch = pitch / 2 for xx in range(1, len(x)): self.assertAlmostEqual(x[xx] - x[xx - 1], xPitch, delta=0.0001) yPitch = sqrt(3) * pitch / 2 for yy in range(1, len(y)): self.assertAlmostEqual(y[yy] - y[yy - 1], yPitch, delta=0.001) def test_removeAssembliesInRing(self): aLoc = [self.r.core.spatialGrid.getLocatorFromRingAndPos(3, i + 1) for i in range(12)] assems = { i: self.r.core.childrenByLocator[loc] for i, loc in enumerate(aLoc) if loc in self.r.core.childrenByLocator } self.r.core.removeAssembliesInRing(3, self.o.cs) for i, a in assems.items(): self.assertNotEqual(aLoc[i], a.spatialLocator) self.assertEqual(a.spatialLocator.grid, self.r.excore["sfp"].spatialGrid) def test_removeAssembly(self): """Test the removeAssembly method. In particular, the Settings here set trackAssems to True, so when an Assembly is removed from the Core, it shows up in the SFP. """ a = self.r.core[-1] # last assembly b = a[-1] # use the last block in case we ever figure out stationary blocks aLoc = a.spatialLocator self.assertIsNotNone(aLoc.grid) bLoc = b.spatialLocator self.r.core.removeAssembly(a) self.assertNotEqual(aLoc, a.spatialLocator) # confirm the Assembly is now in the SFP self.assertEqual(a.spatialLocator.grid, self.r.excore["sfp"].spatialGrid) # confirm only attached to removed assem self.assertIs(bLoc, b.spatialLocator) # block location does not change self.assertIs(a, b.parent) self.assertIs(a, b.spatialLocator.grid.armiObject) def test_removeAssembliesInRingHex(self): """ Since the test reactor is hex, we need to use the overrideCircularRingMode option to remove assemblies from it. """ self.assertEqual(self.r.core.getNumRings(), 3) for ringNum in range(6, 10): self.r.core.removeAssembliesInRing(ringNum, self.o.cs, overrideCircularRingMode=True) self.assertEqual(self.r.core.getNumRings(), 3) class HexReactorReadOnlyTests(unittest.TestCase): """ This is meant to pair with the ``HexReactorTests`` unit test class. The tests in this class only READ, and not WRITE to the Reactor object, so we only have to create one test reactor. """ @classmethod def setUpClass(cls): cls.td = directoryChangers.TemporaryDirectoryChanger() cls.td.__enter__() cls.o, cls.r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml", customSettings={"trackAssems": True}, ) @classmethod def tearDownClass(cls): cls.td.__exit__(None, None, None) def test_coreSfp(self): """The reactor object includes a core and an SFP. .. test:: The reactor object is a composite. :id: T_ARMI_R :tests: R_ARMI_R """ self.assertTrue(isinstance(self.r.core, reactors.Core)) self.assertTrue(isinstance(self.r.excore["sfp"], SpentFuelPool)) self.assertTrue(isinstance(self.r, Composite)) self.assertTrue(isinstance(self.r.core, Composite)) self.assertTrue(isinstance(self.r.excore["sfp"], Composite)) def test_getTotalParam(self): # verify that the block params are being read. val = self.r.core.getTotalBlockParam("power") val2 = self.r.core.getTotalBlockParam("power", addSymmetricPositions=True) self.assertEqual(val2 / self.r.core.powerMultiplier, val) with self.assertRaises(ValueError): self.r.core.getTotalBlockParam(generationNum=1) def test_geomType(self): self.assertEqual(self.r.core.geomType, geometry.GeomType.HEX) def test_getBlocksByIndices(self): indices = [(1, 1, 1)] actualBlocks = self.r.core.getBlocksByIndices(indices) actualNames = [b.getName() for b in actualBlocks] expectedNames = ["B0005-001"] self.assertListEqual(expectedNames, actualNames) def test_getAllXsSuffixes(self): actualSuffixes = self.r.core.getAllXsSuffixes() expectedSuffixes = ["AA", "BA"] self.assertListEqual(expectedSuffixes, actualSuffixes) def test_countBlocksOfType(self): numControlBlocks = self.r.core.countBlocksWithFlags([Flags.DUCT, Flags.CONTROL]) self.assertEqual(numControlBlocks, 0) numControlBlocks = self.r.core.countBlocksWithFlags([Flags.DUCT, Flags.CONTROL, Flags.FUEL], Flags.CONTROL) self.assertEqual(numControlBlocks, 0) def test_countFuelAxialBlocks(self): """Tests that the users definition of fuel blocks is preserved.""" numFuelBlocks = self.r.core.countFuelAxialBlocks() self.assertEqual(numFuelBlocks, 1) def test_getFirstFuelBlockAxialNode(self): firstFuelBlock = self.r.core.getFirstFuelBlockAxialNode() self.assertEqual(firstFuelBlock, 1) def test_getMaxAssembliesInHexRing(self): maxAssems = self.r.core.getMaxAssembliesInHexRing(3) self.assertEqual(maxAssems, 4) def test_getMaxNumPins(self): numPins = self.r.core.getMaxNumPins() self.assertEqual(271, numPins) def test_findAxialMeshIndexOf(self): numMeshPoints = len(self.r.core.p.axialMesh) - 2 # -1 for typical reason, -1 more because mesh includes 0 self.assertEqual(self.r.core.findAxialMeshIndexOf(0.0), 0) self.assertEqual(self.r.core.findAxialMeshIndexOf(0.1), 0) self.assertEqual(self.r.core.findAxialMeshIndexOf(self.r.core[0].getHeight()), numMeshPoints) self.assertEqual( self.r.core.findAxialMeshIndexOf(self.r.core[0].getHeight() - 0.1), numMeshPoints, ) self.assertEqual(self.r.core.findAxialMeshIndexOf(self.r.core[0][0].getHeight() + 0.1), 1) def test_findAllAxialMeshPoints(self): mesh = self.r.core.findAllAxialMeshPoints(applySubMesh=False) self.assertEqual(mesh[0], 0) self.assertAlmostEqual(mesh[-1], self.r.core[0].getHeight()) blockMesh = self.r.core.getFirstAssembly(Flags.FUEL).spatialGrid._bounds[2] assert_allclose(blockMesh, mesh) def test_findAxialMeshsSubmesh(self): """Find all axial mesh points with a provided sub-mesh.""" referenceMesh = [ 0.0, 15.0, 25.16, 35.32, 59.2125, 83.105, 106.9975, 130.89, 154.7825, 178.675, 202.5675, 226.46, ] mesh = self.r.core.findAllAxialMeshPoints(assems=[self.r.core.getFirstAssembly(Flags.FUEL)], applySubMesh=True) self.assertListEqual(referenceMesh, mesh) def test_findAllAziMeshPoints(self): aziPoints = self.r.core.findAllAziMeshPoints() expectedPoints = [-16.142, -8.071, 0.0, 8.071, 16.142, 24.213] assert_allclose(expectedPoints, aziPoints) def test_findAllRadMeshPoints(self): radPoints = self.r.core.findAllRadMeshPoints() expectedPoints = [-13.979382, 0.0, 13.979382, 27.958764, 41.938146] assert_allclose(expectedPoints, radPoints) def test_getAssemblyPitch(self): self.assertEqual(self.r.core.getAssemblyPitch(), 16.142) def test_getNumAssemsAllRingsFilled(self): """Basic test of getNumAssembliesWithAllRingsFilledOut.""" nRings = self.r.core.getNumRings(indexBased=True) nAssmWithBlanks = self.r.core.getNumAssembliesWithAllRingsFilledOut(nRings) self.assertEqual(8, nAssmWithBlanks) @patch("armi.reactor.reactors.Core.powerMultiplier", 1) def test_getNumAssemsWithAllRingsBipass(self): """Test edge case in getNumAssembliesWithAllRingsFilledOut by bypassing some of the logic.""" nAssems = self.r.core.getNumAssembliesWithAllRingsFilledOut(3) self.assertEqual(19, nAssems) def test_getNumEnergyGroups(self): # this Core doesn't have a loaded ISOTXS library, so this test is minimally useful with self.assertRaises(AttributeError): self.r.core.getNumEnergyGroups() def test_getMinimumPercentFluxInFuel(self): # there is no flux in the test reactor YET, so this test is minimally useful with self.assertRaises(ZeroDivisionError): _targetRing, _fluxFraction = self.r.core.getMinimumPercentFluxInFuel() def test_getAssemblyWithLoc(self): """ Get assembly by location, in a couple different ways to ensure they all work. .. test:: Get assembly by location. :id: T_ARMI_R_GET_ASSEM0 :tests: R_ARMI_R_GET_ASSEM """ a0 = self.r.core.getAssemblyWithStringLocation("003-012") a1 = self.r.core.getAssemblyWithAssemNum(assemNum=1) a2 = self.r.core.getAssembly(locationString="003-012") self.assertEqual(a0, a2) self.assertEqual(a1, a2) self.assertEqual(a1.getLocation(), "003-012") def test_getAssemblyWithName(self): """Test getting an assembly by name. .. test:: Get assembly by name. :id: T_ARMI_R_GET_ASSEM1 :tests: R_ARMI_R_GET_ASSEM """ a1 = self.r.core.getAssemblyWithAssemNum(assemNum=1) a2 = self.r.core.getAssembly(assemblyName="A0001") self.assertEqual(a1, a2) self.assertEqual(a1.name, "A0001") def test_getDominantMaterial(self): dominantDuct = self.r.core.getDominantMaterial(Flags.DUCT) dominantFuel = self.r.core.getDominantMaterial(Flags.FUEL) dominantCool = self.r.core.getDominantMaterial(Flags.COOLANT) self.assertEqual(dominantDuct.getName(), "HT9") self.assertEqual(dominantFuel.getName(), "UraniumOxide") self.assertEqual(dominantCool.getName(), "Sodium") def test_getSymmetryFactor(self): """ Test getSymmetryFactor(). .. test:: Get the core symmetry. :id: T_ARMI_R_SYMM :tests: R_ARMI_R_SYMM """ for b in self.r.core.iterBlocks(): sym = b.getSymmetryFactor() i, j, _ = b.spatialLocator.getCompleteIndices() if i == 0 and j == 0: self.assertEqual(sym, 3.0) else: self.assertEqual(sym, 1.0) def test_getAssembliesOnSymmetryLine(self): center = self.r.core.getAssembliesOnSymmetryLine(grids.BOUNDARY_CENTER) self.assertEqual(len(center), 1) upper = self.r.core.getAssembliesOnSymmetryLine(grids.BOUNDARY_120_DEGREES) self.assertEqual(len(upper), 0) lower = self.r.core.getAssembliesOnSymmetryLine(grids.BOUNDARY_0_DEGREES) self.assertEqual(len(lower), 1) def test_getGridBounds(self): """Test getGridBounds() works on different scales. .. test:: Test that assembly grids nest inside core grids. :id: T_ARMI_GRID_NEST :tests: R_ARMI_GRID_NEST """ (minI, maxI), (minJ, maxJ), (_minK, _maxK) = self.r.core.getBoundingIndices() self.assertEqual((minI, maxI), (0, 2)) self.assertEqual((minJ, maxJ), (-1, 2)) randomBlock = self.r.core.getFirstAssembly() (minI, maxI), (minJ, maxJ), (_minK, _maxK) = randomBlock.getBoundingIndices() self.assertEqual((minI, maxI), (2, 2)) self.assertEqual((minJ, maxJ), (-1, -1)) def test_locations(self): loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(3, 2) a = self.r.core.childrenByLocator[loc] assert_allclose(a.spatialLocator.indices, [1, 1, 0]) for bi, b in enumerate(a): assert_allclose(b.spatialLocator.getCompleteIndices(), [1, 1, bi]) self.assertEqual(a.getLocation(), "003-002") self.assertEqual(a[0].getLocation(), "003-002-000") def test_getMass(self): # If these are not in agreement check on block symmetry factor being applied to volumes mass1 = self.r.core.getMass() mass2 = sum([b.getMass() for b in self.r.core.iterBlocks()]) assert_allclose(mass1, mass2) def test_getNumRings(self): self.assertEqual(len(self.r.core.circularRingList), 0) self.assertEqual(self.r.core.getNumRings(indexBased=True), 3) self.assertEqual(self.r.core.getNumRings(indexBased=False), 3) self.r.core.circularRingList = {1, 2, 3} self.assertEqual(len(self.r.core.circularRingList), 3) self.assertEqual(self.r.core.getNumRings(indexBased=True), 3) self.assertEqual(self.r.core.getNumRings(indexBased=False), 3) @patch("armi.reactor.reactors.Core.getAssemblies") def test_whenNoAssemblies(self, mockGetAssemblies): """Test various edge cases when there are no assemblies.""" mockGetAssemblies.return_value = [] self.assertEqual(self.r.core.countBlocksWithFlags(Flags.FUEL), 0) self.assertEqual(self.r.core.countFuelAxialBlocks(), 0) self.assertGreater(self.r.core.getFirstFuelBlockAxialNode(), 9e9) def test_addMultipleCores(self): """Test the catch that a reactor can only have one core.""" with self.assertRaises(RuntimeError): self.r.add(self.r.core) def test_getNozzleTypes(self): nozzleTypes = self.r.core.getNozzleTypes() expectedTypes = ["Default"] for nozzle in expectedTypes: self.assertIn(nozzle, nozzleTypes) def test_getAvgTemp(self): t0 = self.r.core.getAvgTemp([Flags.CLAD, Flags.WIRE, Flags.DUCT]) self.assertAlmostEqual(t0, 450.0, delta=0.01) t1 = self.r.core.getAvgTemp([Flags.CLAD, Flags.FUEL]) self.assertAlmostEqual(t1, 450.04232366477936, delta=0.01) t2 = self.r.core.getAvgTemp([Flags.CLAD, Flags.WIRE, Flags.DUCT, Flags.FUEL]) self.assertAlmostEqual(t2, 450.02442095419906, delta=0.01) def test_getNuclideCategories(self): """Test that nuclides are categorized correctly.""" self.r.core.getNuclideCategories() self.assertIn("coolant", self.r.core._nuclideCategories) self.assertIn("structure", self.r.core._nuclideCategories) self.assertIn("fuel", self.r.core._nuclideCategories) self.assertEqual(self.r.core._nuclideCategories["coolant"], set(["NA23"])) self.assertIn("FE56", self.r.core._nuclideCategories["structure"]) self.assertIn("U235", self.r.core._nuclideCategories["fuel"]) def test_differentNuclideModels(self): self.assertEqual(self.o.cs[CONF_XS_KERNEL], "MC2v3") _o2, r2 = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml", customSettings={CONF_XS_KERNEL: "MC2v2"}, ) self.assertNotEqual(set(self.r.blueprints.elementsToExpand), set(r2.blueprints.elementsToExpand)) for b2, b3 in zip(r2.core.iterBlocks(), self.r.core.iterBlocks()): for element in self.r.blueprints.elementsToExpand: # nucspec allows elemental mass to be computed mass2 = b2.getMass(element.symbol) mass3 = b3.getMass(element.symbol) assert_allclose(mass2, mass3) constituentNucs = [nn.name for nn in element.nuclides if nn.a > 0] nuclideLevelMass3 = b3.getMass(constituentNucs) assert_allclose(mass3, nuclideLevelMass3) def test_applyThermalExpanCoreConst(self): """Test that assemblies in core are correctly expanded. Notes ----- All assertions skip the first block as it has no 'Delta T' and does not expand. """ originalAssems = self.r.core.getAssemblies() # stash original axial mesh info oldRefBlockAxialMesh = self.r.core.p.referenceBlockAxialMesh oldAxialMesh = self.r.core.p.axialMesh nonEqualParameters = ["heightBOL", "molesHmBOL", "massHmBOL"] equalParameters = ["smearDensity", "nHMAtBOL", "enrichmentBOL"] o, coldHeightR = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml", customSettings={ "inputHeightsConsideredHot": False, "assemFlagsToSkipAxialExpansion": ["feed fuel"], }, ) aToSkip = list(Flags.fromStringIgnoreErrors(t) for t in o.cs[CONF_ASSEM_FLAGS_SKIP_AXIAL_EXP]) for i, val in enumerate(oldRefBlockAxialMesh[1:]): self.assertNotEqual(val, coldHeightR.core.p.referenceBlockAxialMesh[i]) for i, val in enumerate(oldAxialMesh[1:]): self.assertNotEqual(val, coldHeightR.core.p.axialMesh[i]) coldHeightAssems = coldHeightR.core.getAssemblies() for a, coldHeightA in zip(originalAssems, coldHeightAssems): if a.hasFlags(Flags.CONTROL) or any(a.hasFlags(aFlags) for aFlags in aToSkip): continue for b, coldHeightB in zip(a[1:], coldHeightA[1:]): for param in nonEqualParameters: p, coldHeightP = b.p[param], coldHeightB.p[param] if p and coldHeightP: self.assertNotEqual(p, coldHeightP, f"{param} {p} {coldHeightP}") else: self.assertAlmostEqual(p, coldHeightP) for param in equalParameters: p, coldHeightP = b.p[param], coldHeightB.p[param] self.assertAlmostEqual(p, coldHeightP) class HexReactorSoloTests(ReactorTests): """ This is meant to pair with the ``HexReactorTests`` unit test class. Each test here creates its own, slightly unique, test reactor. """ def test_nonUniformAssems(self): o, r = loadTestReactor(customSettings={"nonUniformAssemFlags": ["primary control"]}) a = o.r.core.getFirstAssembly(Flags.FUEL) self.assertTrue(all(b.p.topIndex != 0 for b in a[1:])) a = o.r.core.getFirstAssembly(Flags.PRIMARY) self.assertTrue(all(b.p.topIndex == 0 for b in a)) originalHeights = [b.p.height for b in a] differntMesh = [val + 2 for val in r.core.p.referenceBlockAxialMesh] # won't change because nonUniform assem doesn't conform to reference mesh a.setBlockMesh(differntMesh) heights = [b.p.height for b in a] self.assertEqual(originalHeights, heights) class BigHexReactorTests(ReactorTests): """ This is meant to pair with the ``HexReactorTests`` unit test class. These tests all need a larger test reactor. Ideally, we will migrate these to smaller test reactors one day. """ def setUp(self): self.o, self.r = loadTestReactor(inputFilePath=TEST_ROOT, customSettings={"trackAssems": True}) def test_genAssembliesAddedThisCycle(self): allAssems = self.r.core.getAssemblies() self.assertTrue(all(a1 is a2 for a1, a2 in zip(allAssems, self.r.core.genAssembliesAddedThisCycle()))) a = self.r.core.getFirstAssembly() newA = copy.deepcopy(a) newA.name = None self.r.p.cycle = 1 self.assertEqual(len(list(self.r.core.genAssembliesAddedThisCycle())), 0) self.r.core.removeAssembly(a) self.r.core.add(newA) self.assertEqual(next(self.r.core.genAssembliesAddedThisCycle()), newA) def test_createFreshFeed(self): # basic creation aOld = self.r.core.getFirstAssembly(Flags.FEED) aNew = self.r.core.createFreshFeed(cs=self.o.cs) self.assertAlmostEqual(aOld.getMass(), aNew.getMass()) def test_getAssemblies(self): """Basic test of getAssemblies, with and without including the SFP. .. test:: The spent fuel pool is a Composite structure. :id: T_ARMI_SFP2 :tests: R_ARMI_SFP """ # where are we starting numCoreStart = len(self.r.core) numTotalStart = len(self.r.core.getAssemblies(includeSFP=True)) # remove one assembly and confirm behavior for i in range(1, 5): self.r.core.removeAssembly(self.r.core.getFirstAssembly()) self.assertEqual(len(self.r.core), numCoreStart - i) self.assertEqual(len(self.r.core.getAssemblies(includeSFP=True)), numTotalStart) def test_findNeighbors(self): """ Find neighbors of a given assembly. .. test:: Retrieve neighboring assemblies of a given assembly. :id: T_ARMI_R_FIND_NEIGHBORS :tests: R_ARMI_R_FIND_NEIGHBORS """ loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(1, 1) a = self.r.core.childrenByLocator[loc] neighbs = self.r.core.findNeighbors(a, duplicateAssembliesOnReflectiveBoundary=True) locs = [a.spatialLocator.getRingPos() for a in neighbs] self.assertEqual(len(neighbs), 6) self.assertIn((2, 1), locs) self.assertIn((2, 2), locs) self.assertEqual(locs.count((2, 1)), 3) loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(1, 1) a = self.r.core.childrenByLocator[loc] neighbs = self.r.core.findNeighbors(a, duplicateAssembliesOnReflectiveBoundary=True) locs = [a.spatialLocator.getRingPos() for a in neighbs] self.assertEqual(locs, [(2, 1), (2, 2)] * 3, 6) loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(2, 2) a = self.r.core.childrenByLocator[loc] neighbs = self.r.core.findNeighbors(a, duplicateAssembliesOnReflectiveBoundary=True) locs = [a.spatialLocator.getRingPos() for a in neighbs] self.assertEqual(len(neighbs), 6) self.assertEqual(locs, [(3, 2), (3, 3), (3, 12), (2, 1), (1, 1), (2, 1)]) # try with edge assemblies # With edges, the neighbor is the one that's actually next to it. converter = geometryConverters.EdgeAssemblyChanger() converter.addEdgeAssemblies(self.r.core) loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(2, 2) a = self.r.core.childrenByLocator[loc] neighbs = self.r.core.findNeighbors(a, duplicateAssembliesOnReflectiveBoundary=True) locs = [a.spatialLocator.getRingPos() for a in neighbs] self.assertEqual(len(neighbs), 6) # in this case no locations that aren't actually in the core should be returned self.assertEqual(locs, [(3, 2), (3, 3), (3, 4), (2, 1), (1, 1), (2, 1)]) converter.removeEdgeAssemblies(self.r.core) # try with full core self.r.core.growToFullCore(self.o.cs) loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(3, 4) a = self.r.core.childrenByLocator[loc] neighbs = self.r.core.findNeighbors(a) self.assertEqual(len(neighbs), 6) locs = [a.spatialLocator.getRingPos() for a in neighbs] for loc in [(2, 2), (2, 3), (3, 3), (3, 5), (4, 5), (4, 6)]: self.assertIn(loc, locs) loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(2, 2) a = self.r.core.childrenByLocator[loc] neighbs = self.r.core.findNeighbors(a) locs = [a.spatialLocator.getRingPos() for a in neighbs] for loc in [(1, 1), (2, 1), (2, 3), (3, 2), (3, 3), (3, 4)]: self.assertIn(loc, locs) # Try the duplicate option in full core as well loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(2, 2) a = self.r.core.childrenByLocator[loc] neighbs = self.r.core.findNeighbors(a, duplicateAssembliesOnReflectiveBoundary=True) locs = [a.spatialLocator.getRingPos() for a in neighbs] self.assertEqual(len(neighbs), 6) self.assertEqual(locs, [(3, 2), (3, 3), (3, 4), (2, 3), (1, 1), (2, 1)]) class CartesianReactorTests(ReactorTests): def setUp(self): self.o = buildOperatorOfEmptyCartesianBlocks() self.r = self.o.r def test_add(self): a = self.r.core.getFirstAssembly() numA = len(a) a.add(blocks.CartesianBlock("test cart block")) self.assertEqual(len(a), numA + 1) with self.assertRaises(TypeError): a.add(blocks.HexBlock("test hex block")) def test_getAssemblyPitch(self): # Cartesian pitch should have 2 dims since it could be a rectangle that is not square. assert_equal(self.r.core.getAssemblyPitch(), [10.0, 16.0]) def test_getAssembliesInSquareRing(self, exclusions=[2]): expectedAssemsInRing = [1, 0] actualAssemsInRing = [] for ring in range(1, self.r.core.getNumRings() + 1): actualAssemsInRing.append(len(self.r.core.getAssembliesInSquareOrHexRing(ring))) self.assertSequenceEqual(actualAssemsInRing, expectedAssemsInRing) def test_getNuclideCategoriesLogging(self): """Simplest possible test of the getNuclideCategories method and its logging.""" log = mockRunLogs.BufferLog() # this strange namespace-stomping is used to the test to set the logger in reactors.Core from armi.reactor import reactors reactors.runLog = runLog runLog.LOG = log # run the actual method in question self.r.core.getNuclideCategories() messages = log.getStdout() self.assertIn("Nuclide categorization", messages) self.assertIn("Structure", messages) class CartesianReactorNeighborTests(ReactorTests): def setUp(self): self.r = loadTestReactor(TEST_ROOT, inputFileName="zpprTest.yaml")[1] def test_findNeighborsCartesian(self): """Find neighbors of a given assembly in a Cartesian grid.""" loc = self.r.core.spatialGrid[1, 1, 0] a = self.r.core.childrenByLocator[loc] neighbs = self.r.core.findNeighbors(a) locs = [tuple(a.spatialLocator.indices[:2]) for a in neighbs] self.assertEqual(len(neighbs), 4) self.assertIn((2, 1), locs) self.assertIn((1, 2), locs) self.assertIn((0, 1), locs) self.assertIn((1, 0), locs) # try with edge assembly loc = self.r.core.spatialGrid[0, 0, 0] a = self.r.core.childrenByLocator[loc] neighbs = self.r.core.findNeighbors(a, showBlanks=False) locs = [tuple(a.spatialLocator.indices[:2]) for a in neighbs] self.assertEqual(len(neighbs), 2) # in this case no locations that aren't actually in the core should be returned self.assertIn((1, 0), locs) self.assertIn((0, 1), locs) ================================================ FILE: armi/reactor/tests/test_rz_reactors.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test loading Theta-RZ reactor models.""" import math import os import unittest from armi import settings from armi.reactor import reactors from armi.testing import TESTING_ROOT class TestRZTReactorModern(unittest.TestCase): @classmethod def setUpClass(cls): cs = settings.Settings(fName=os.path.join(TESTING_ROOT, "reactors", "godiva", "godiva.armi.unittest.yaml")) cls.r = reactors.loadFromCs(cs) def test_loadRZT_reactor(self): """ The Godiva benchmark model is a HEU sphere with a radius of 8.74 cm. This tests loading and verifies the reactor is loaded correctly by comparing volumes against expected volumes for full core (including void boundary conditions) and just the fuel. """ godivaRadius = 8.7407 reactorRadius = 9 reactorHeight = 17.5 refReactorVolume = math.pi * reactorRadius**2 * reactorHeight / 8 refFuelVolume = 4.0 / 3.0 * math.pi * (godivaRadius) ** 3 / 8 reactorVolumes = [] fuelVolumes = [] for b in self.r.core.iterBlocks(): reactorVolumes.append(b.getVolume()) for c in b: if "godiva" in c.name: fuelVolumes.append(c.getVolume()) # verify the total reactor volume is as expected tolerance = 1e-3 error = math.fabs((refReactorVolume - sum(reactorVolumes)) / refReactorVolume) self.assertLess(error, tolerance) # verify the total fuel volume is as expected error = math.fabs((refFuelVolume - sum(fuelVolumes)) / refFuelVolume) self.assertLess(error, tolerance) def test_loadRZT(self): self.assertEqual(len(self.r.core), 3) radMeshes = [a.p.RadMesh for a in self.r.core] aziMeshes = [a.p.AziMesh for a in self.r.core] print(f"radMeshes: {radMeshes}") print(f"aziMeshes: {aziMeshes}") self.assertTrue(all(radMesh == 2 for radMesh in radMeshes)) self.assertTrue(all(aziMesh == 7 for aziMesh in aziMeshes)) def test_findAllMeshPoints(self): """Test findAllMeshPoints().""" i, _, _ = self.r.core.findAllMeshPoints() self.assertLess(i[-1], 2 * math.pi) ================================================ FILE: armi/reactor/tests/test_zones.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for Zones.""" import logging import os import unittest from armi import runLog from armi.reactor import ( assemblies, blocks, blueprints, geometry, grids, reactors, zones, ) from armi.testing import TESTING_ROOT, loadTestReactor from armi.tests import mockRunLogs THIS_DIR = os.path.dirname(__file__) class TestZone(unittest.TestCase): def setUp(self): # set up a Reactor, for the spatialLocator bp = blueprints.Blueprints() r = reactors.Reactor("zonetest", bp) r.add(reactors.Core("Core")) r.core.spatialGrid = grids.HexGrid.fromPitch(1.0) r.core.spatialGrid._bounds = ( [0, 1, 2, 3, 4], [0, 10, 20, 30, 40], [0, 20, 40, 60, 80], ) r.core.spatialGrid.symmetry = geometry.SymmetryType( geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC ) r.core.spatialGrid.geomType = geometry.HEX # some testing constants self.numAssems = 5 self.numBlocks = 5 # build a list of Assemblies self.aList = [] for ring in range(self.numAssems): a = assemblies.HexAssembly("fuel") a.spatialGrid = r.core.spatialGrid a.spatialLocator = r.core.spatialGrid[ring, 1, 0] a.parent = r.core self.aList.append(a) # build a list of Blocks self.bList = [] for _ in range(self.numBlocks): b = blocks.HexBlock("TestHexBlock") b.setType("defaultType") b.p.nPins = 3 b.setHeight(3.0) self.aList[0].add(b) self.bList.append(b) def test_addItem(self): """ Test adding an item. .. test:: Add item to a zone. :id: T_ARMI_ZONE0 :tests: R_ARMI_ZONE """ zone = zones.Zone("test_addItem") zone.addItem(self.aList[0]) self.assertIn(self.aList[0].getLocation(), zone) self.assertRaises(AssertionError, zone.addItem, "nope") def test_removeItem(self): zone = zones.Zone("test_removeItem", [a.getLocation() for a in self.aList]) zone.removeItem(self.aList[0]) self.assertNotIn(self.aList[0].getLocation(), zone) self.assertRaises(AssertionError, zone.removeItem, "also nope") def test_addItems(self): """ Test adding items. .. test:: Add multiple items to a zone. :id: T_ARMI_ZONE1 :tests: R_ARMI_ZONE """ zone = zones.Zone("test_addItems") zone.addItems(self.aList) for a in self.aList: self.assertIn(a.getLocation(), zone) def test_removeItems(self): zone = zones.Zone("test_removeItems", [a.getLocation() for a in self.aList]) zone.removeItems(self.aList) for a in self.aList: self.assertNotIn(a.getLocation(), zone) def test_addLoc(self): """ Test adding a location. .. test:: Add location to a zone. :id: T_ARMI_ZONE2 :tests: R_ARMI_ZONE """ zone = zones.Zone("test_addLoc") zone.addLoc(self.aList[0].getLocation()) self.assertIn(self.aList[0].getLocation(), zone) self.assertRaises(AssertionError, zone.addLoc, 1234) def test_removeLoc(self): zone = zones.Zone("test_removeLoc", [a.getLocation() for a in self.aList]) zone.removeLoc(self.aList[0].getLocation()) self.assertNotIn(self.aList[0].getLocation(), zone) self.assertRaises(AssertionError, zone.removeLoc, 1234) def test_addLocs(self): """ Test adding locations. .. test:: Add multiple locations to a zone. :id: T_ARMI_ZONE3 :tests: R_ARMI_ZONE """ zone = zones.Zone("test_addLocs") zone.addLocs([a.getLocation() for a in self.aList]) for a in self.aList: self.assertIn(a.getLocation(), zone) def test_removeLocs(self): zone = zones.Zone("test_removeLocs", [a.getLocation() for a in self.aList]) zone.removeLocs([a.getLocation() for a in self.aList]) for a in self.aList: self.assertNotIn(a.getLocation(), zone) def test_iteration(self): locs = [a.getLocation() for a in self.aList] zone = zones.Zone("test_iteration") # BONUS TEST: Zone.__len__() self.assertEqual(len(zone), 0) zone.addLocs(locs) self.assertEqual(len(zone), self.numAssems) # loop once to prove looping works for aLoc in zone: self.assertIn(aLoc, locs) self.assertTrue(aLoc in zone) # Tests Zone.__contains__() # loop twice to make sure it iterates nicely. for aLoc in zone: self.assertIn(aLoc, locs) self.assertTrue(aLoc in zone) # Tests Zone.__contains__() def test_repr(self): zone = zones.Zone("test_repr") zone.addItems(self.aList) zStr = "Zone test_repr with 5 Assemblies" self.assertIn(zStr, str(zone)) def test_blocks(self): zone = zones.Zone("test_blocks", zoneType=blocks.Block) # test the blocks were correctly added self.assertEqual(len(zone), 0) zone.addItems(self.bList) self.assertEqual(len(zone), self.numBlocks) # loop once to prove looping works for aLoc in zone: self.assertIn(aLoc, zone.locs) self.assertTrue(aLoc in zone) # test Zone.__contains__() class TestZones(unittest.TestCase): def setUp(self): # spin up the test reactor self.o, self.r = loadTestReactor( inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml" ) # build some generic test zones to get started with newSettings = {} newSettings["zoneDefinitions"] = [ "ring-1: 001-001", "ring-2: 002-001, 002-002", "ring-3: 003-001, 003-002, 003-003", ] cs = self.o.cs.modified(newSettings=newSettings) self.r.core.buildManualZones(cs) self.zonez = self.r.core.zones def test_dictionaryInterface(self): """ Test creating and interacting with the Zones object. .. test:: Create collection of Zones. :id: T_ARMI_ZONE4 :tests: R_ARMI_ZONE """ zs = zones.Zones() # validate the addZone() and __len__() work self.assertEqual(len(zs.names), 0) zs.addZone(self.zonez["ring-2"]) self.assertEqual(len(zs.names), 1) # validate that __contains__() works self.assertFalse("ring-1" in zs) self.assertTrue("ring-2" in zs) self.assertFalse("ring-3" in zs) # validate that __remove__() works del zs["ring-2"] self.assertEqual(len(zs.names), 0) # validate that addZones() works zs.addZones(self.zonez) self.assertEqual(len(zs.names), 3) self.assertTrue("ring-1" in zs) self.assertTrue("ring-2" in zs) self.assertTrue("ring-3" in zs) # validate that get() works ring3 = zs["ring-3"] self.assertEqual(len(ring3), 3) self.assertIn("003-002", ring3) # validate that removeZones() works zonesToRemove = [z.name for z in self.zonez] zs.removeZones(zonesToRemove) self.assertEqual(len(zs.names), 0) self.assertFalse("ring-1" in zs) self.assertFalse("ring-2" in zs) self.assertFalse("ring-3" in zs) def test_findZoneItIsIn(self): # customize settings for this test newSettings = {} newSettings["zoneDefinitions"] = [ "ring-1: 001-001", "ring-2: 002-001, 002-002", ] cs = self.o.cs.modified(newSettings=newSettings) self.r.core.buildManualZones(cs) daZones = self.r.core.zones for zone in daZones: a = self.r.core.getAssemblyWithStringLocation(sorted(zone.locs)[0]) aZone = daZones.findZoneItIsIn(a) self.assertEqual(aZone, zone) # get assem from first zone a = self.r.core.getAssemblyWithStringLocation(sorted(daZones[daZones.names[0]].locs)[0]) # remove the zone daZones.removeZone(daZones.names[0]) # ensure that we can no longer find the assembly in the zone self.assertEqual(daZones.findZoneItIsIn(a), None) def test_getZoneLocations(self): # customize settings for this test newSettings = {} newSettings["zoneDefinitions"] = [ "ring-1: 001-001", "ring-2: 002-001, 002-002", ] cs = self.o.cs.modified(newSettings=newSettings) self.r.core.buildManualZones(cs) # test the retrieval of zone locations self.assertEqual(set(["002-001", "002-002"]), self.r.core.zones.getZoneLocations("ring-2")) def test_getAllLocations(self): # customize settings for this test newSettings = {} newSettings["zoneDefinitions"] = [ "ring-1: 001-001", "ring-2: 002-001, 002-002", ] cs = self.o.cs.modified(newSettings=newSettings) self.r.core.buildManualZones(cs) # test the retrieval of zone locations self.assertEqual(set(["001-001", "002-001", "002-002"]), self.r.core.zones.getAllLocations()) def test_summary(self): # make sure we have a couple of zones to test on for name0 in ["ring-1", "ring-2", "ring-3"]: self.assertIn(name0, self.zonez.names) # test the summary (in the log) with mockRunLogs.BufferLog() as mock: runLog.LOG.startLog("test_summary") runLog.LOG.setVerbosity(logging.INFO) self.assertEqual("", mock.getStdout()) self.zonez.summary() self.assertIn("zoneDefinitions:", mock.getStdout()) self.assertIn("- ring-1: ", mock.getStdout()) self.assertIn("- ring-2: ", mock.getStdout()) self.assertIn("- ring-3: ", mock.getStdout()) self.assertIn("003-001, 003-002, 003-003", mock.getStdout()) def test_sortZones(self): # create some zones in non-alphabetical order zs = zones.Zones() zs.addZone(self.zonez["ring-3"]) zs.addZone(self.zonez["ring-1"]) zs.addZone(self.zonez["ring-2"]) # check the initial order of the zones self.assertEqual(list(zs._zones.keys())[0], "ring-3") self.assertEqual(list(zs._zones.keys())[1], "ring-1") self.assertEqual(list(zs._zones.keys())[2], "ring-2") # sort the zones zs.sortZones() # check the final order of the zones self.assertEqual(list(zs._zones.keys())[0], "ring-1") self.assertEqual(list(zs._zones.keys())[1], "ring-2") self.assertEqual(list(zs._zones.keys())[2], "ring-3") class TestZonesFile(unittest.TestCase): def setUp(self): # spin up the test reactor self.o, self.r = loadTestReactor() # build zones based on a file newSettings = {} newSettings["zonesFile"] = os.path.join(THIS_DIR, "zonesFile.yaml") cs = self.o.cs.modified(newSettings=newSettings) self.r.core.buildManualZones(cs) self.zonez = self.r.core.zones def test_zonesFile(self): """ Test creating and interacting with a zones file. .. test:: Create collection of Zones based on a yaml file. :id: T_ARMI_ZONE5 :tests: R_ARMI_ZONE """ self.assertEqual(set(["001-001"]), self.r.core.zones.getZoneLocations("a_zone")) self.assertEqual(set(["002-001"]), self.r.core.zones.getZoneLocations("a_different_zone")) ================================================ FILE: armi/reactor/tests/zonesFile.yaml ================================================ customZonesMap: 001-001: a_zone 002-001: a_different_zone ================================================ FILE: armi/reactor/zones.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A Zone object is a collection of locations in the Core. A Zones object is a collection of Zone objects. Together, they are used to conceptually divide the Core for analysis. """ from typing import Iterator, List, Optional, Set, Union from armi import runLog from armi.reactor.assemblies import Assembly from armi.reactor.blocks import Block class Zone: """ A group of locations in the Core, used to divide it up for analysis. Each location represents an Assembly or a Block. .. impl:: A user can define a collection of armi locations. :id: I_ARMI_ZONE0 :implements: R_ARMI_ZONE The Zone class facilitates the creation of a Zone object representing a collection of locations in the Core. A Zone contains a group of locations in the Core, used to subdivide it for analysis. Each location represents an Assembly or a Block, where a single Zone must contain items of the same type (i.e., Assembly or Block). Methods are provided to add or remove one or more locations to/from the Zone, and similarly, add or remove one or more items with a Core location (i.e., Assemblies or Blocks) to/from the Zone. In addition, several methods are provided to facilitate the retrieval of locations from a Zone by performing functions to check if a location exists in the Zone, looping through the locations in the Zone in alphabetical order, and returning the number of locations in the Zone, etc. """ VALID_TYPES = (Assembly, Block) def __init__(self, name: str, locations: Optional[List] = None, zoneType: type = Assembly): self.name = name # A single Zone must contain items of the same type if zoneType not in Zone.VALID_TYPES: raise TypeError("Invalid Type {0}; A Zone can only be of type {1}".format(zoneType, Zone.VALID_TYPES)) self.zoneType = zoneType # a Zone is mostly just a collection of locations in the Reactor if locations is None: self.locs = set() else: # NOTE: We are not validating the locations. self.locs = set(locations) def __contains__(self, loc: str) -> bool: return loc in self.locs def __iter__(self) -> Iterator[str]: """Loop through the locations, in alphabetical order.""" for loc in sorted(self.locs): yield loc def __len__(self) -> int: """Return the number of locations.""" return len(self.locs) def __repr__(self) -> str: zType = "Assemblies" if self.zoneType == Block: zType = "Blocks" return "<Zone {0} with {1} {2}>".format(self.name, len(self), zType) def addLoc(self, loc: str) -> None: """ Adds the location to this Zone. Parameters ---------- loc : str Location within the Core. Notes ----- This method does not validate that the location given is somehow "valid". We are not doing any reverse lookups in the Reactor to prove that the type or location is valid. Because this would require heavier computation, and would add some chicken-and-the-egg problems into instantiating a new Reactor. """ assert isinstance(loc, str), "The location must be a str: {0}".format(loc) self.locs.add(loc) def removeLoc(self, loc: str) -> None: """ Removes the location from this Zone. Parameters ---------- loc : str Location within the Core. Notes ----- This method does not validate that the location given is somehow "valid". We are not doing any reverse lookups in the Reactor to prove that the type or location is valid. Because this would require heavier computation, and would add some chicken-and-the-egg problems into instantiating a new Reactor. Returns ------- None """ assert isinstance(loc, str), "The location must be a str: {0}".format(loc) self.locs.remove(loc) def addLocs(self, locs: List) -> None: """ Adds the locations to this Zone. Parameters ---------- items : list List of str objects """ for loc in locs: self.addLoc(loc) def removeLocs(self, locs: List) -> None: """ Removes the locations from this Zone. Parameters ---------- items : list List of str objects """ for loc in locs: self.removeLoc(loc) def addItem(self, item: Union[Assembly, Block]) -> None: """ Adds the location of an Assembly or Block to a zone. Parameters ---------- item : Assembly or Block A single item with Core location (Assembly or Block) """ assert issubclass(type(item), self.zoneType), "The item ({0}) but be have a type in: {1}".format( item, Zone.VALID_TYPES ) self.addLoc(item.getLocation()) def removeItem(self, item: Union[Assembly, Block]) -> None: """ Removes the location of an Assembly or Block from a zone. Parameters ---------- item : Assembly or Block A single item with Core location (Assembly or Block) """ assert issubclass(type(item), self.zoneType), "The item ({0}) but be have a type in: {1}".format( item, Zone.VALID_TYPES ) self.removeLoc(item.getLocation()) def addItems(self, items: List) -> None: """ Adds the locations of a list of Assemblies or Blocks to a zone. Parameters ---------- items : list List of Assembly/Block objects """ for item in items: self.addItem(item) def removeItems(self, items: List) -> None: """ Removes the locations of a list of Assemblies or Blocks from a zone. Parameters ---------- items : list List of Assembly/Block objects """ for item in items: self.removeItem(item) class Zones: """Collection of Zone objects. .. impl:: A user can define a collection of armi zones. :id: I_ARMI_ZONE1 :implements: R_ARMI_ZONE The Zones class facilitates the creation of a Zones object representing a collection of Zone objects. Methods are provided to add or remove one or more Zone to/from the Zones object. Likewise, methods are provided to validate that the zones are mutually exclusive, obtain the location labels of zones, return the Zone object where a particular Assembly or Block resides, sort the Zone objects alphabetically, and summarize the zone definitions. In addition, methods are provided to facilitate the retrieval of Zone objects by name, loop through the Zones in order, and return the number of Zone objects. """ def __init__(self): """Build a Zones object.""" self._zones = {} @property def names(self) -> List: """Ordered names of contained zones. Returns ------- list Alphabetical collection of Zone names """ return sorted(self._zones.keys()) def __contains__(self, name: str) -> bool: return name in self._zones def __delitem__(self, name: str) -> None: del self._zones[name] def __getitem__(self, name: str) -> Zone: """Access a zone by name.""" return self._zones[name] def __iter__(self) -> Iterator[Zone]: """Loop through the zones in order.""" for nm in sorted(self._zones.keys()): yield self._zones[nm] def __len__(self) -> int: """Return the number of Zone objects.""" return len(self._zones) def addZone(self, zone: Zone) -> None: """Add a zone to the collection. Parameters ---------- zone: Zone A new Zone to add to this collection. """ if zone.name in self._zones: raise ValueError("Cannot add {} because a zone of that name already exists.".format(zone.name)) self._zones[zone.name] = zone def addZones(self, zones: List) -> None: """ Add multiple zones to the collection, and validate the Zones collection still make sense. Parameters ---------- zones: List (or Zones) A multiple new Zone objects to add to this collection. """ for zone in zones: self.addZone(zone) self.checkDuplicates() def removeZone(self, name: str) -> None: """Delete a zone by name. Parameters ---------- name: str Name of zone to remove """ del self[name] def removeZones(self, names: List) -> None: """ Delete multiple zones by name. Parameters ---------- names: List (or names) Multiple Zone names to remove from this collection. """ for name in names: self.removeZone(name) def checkDuplicates(self) -> None: """ Validate that the zones are mutually exclusive. That is, make sure that no item appears in more than one Zone. """ allLocs = [] for zone in self: allLocs.extend(list(zone.locs)) # use set lotic to test for duplicates if len(allLocs) == len(set(allLocs)): # no duplicates return # find duplicates by removing unique locs from the full list for uniqueLoc in set(allLocs): allLocs.remove(uniqueLoc) # there are duplicates, so raise an error locs = sorted(set(allLocs)) raise RuntimeError("Duplicate items found in Zones: {0}".format(locs)) def getZoneLocations(self, zoneNames: List) -> Set: """ Get the location labels of a particular (or a few) zone(s). Parameters ---------- zoneNames : str, or list the zone name or list of names Returns ------- zoneLocs : set List of location labels of this/these zone(s) """ if not isinstance(zoneNames, list): zoneNames = [zoneNames] zoneLocs = set() for zn in zoneNames: try: thisZoneLocs = set(self[zn]) except KeyError: runLog.error("The zone {0} does not exist. Please define it.".format(zn)) raise zoneLocs.update(thisZoneLocs) return zoneLocs def getAllLocations(self) -> Set: """Return all locations across every Zone in this Zones object. Returns ------- set A combination set of all locations, from every Zone """ locs = set() for zone in self: locs.update(self[zone.name]) return locs def findZoneItIsIn(self, a: Union[Assembly, Block]) -> Optional[Zone]: """ Return the zone object that this Assembly/Block is in. Parameters ---------- a : Assembly or Block The item to locate Returns ------- zone : Zone object that the input item resides in. """ aLoc = a.getLocation() zoneFound = False for zone in self: if aLoc in zone.locs: zoneFound = True return zone if not zoneFound: runLog.debug(f"Was not able to find which zone {a} is in", single=True) return None def sortZones(self, reverse=False) -> None: """Sorts the Zone objects alphabetically. Parameters ---------- reverse : bool, optional Whether to sort in reverse order, by default False """ self._zones = dict(sorted(self._zones.items(), reverse=reverse)) def summary(self) -> None: """ Summarize the zone definitions clearly, and in a way that can be copy/pasted back into a settings file under "zoneDefinitions", if the user wants to manually reuse these zones later. Examples -------- zoneDefinitions: - ring-1: 001-001 - ring-2: 002-001, 002-002 - ring-3: 003-001, 003-002, 003-003 """ # log a quick header runLog.info("zoneDefinitions:") # log the zone definitions in a way that can be copy/pasted back into a settings file for name in sorted(self._zones.keys()): locs = sorted(self._zones[name].locs) line = "- {0}: ".format(name) + ", ".join(locs) runLog.info(line) ================================================ FILE: armi/resources/burn-chain.yaml ================================================ AM241: - transmutation: branch: 1.0 products: - PU240 type: n2n - transmutation: branch: 0.1384 products: - PU242 type: nGamma - transmutation: branch: 0.6616 products: - CM242 - DUMP2 type: nGamma - transmutation: branch: 0.2 products: - AM242M - DUMP2 type: nGamma - transmutation: branch: 1.0 products: - LFP41 type: fission # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV) - transmutation: branch: 1.6500e-04 products: - H3 - DUMP1 type: fission - decay: branch: 1.0 products: - NP237 type: ad - decay: branch: 4.120055e-12 products: - LFP41 type: sf AM242G: - transmutation: branch: 1.0 products: - AM241 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission - transmutation: branch: 1.0 products: - AM243 type: nGamma - decay: branch: 0.173 products: - PU242 type: ec - decay: branch: 0.827 products: - CM242 type: bmd AM242M: - transmutation: branch: 1.0 products: - AM241 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission - transmutation: branch: 1.0 products: - AM243 type: nGamma - decay: branch: 0.822865 products: - PU242 type: ec - decay: branch: 0.172135 products: - CM242 type: bmd - decay: branch: 0.005 products: - NP238 type: ad AM243: - transmutation: branch: 0.5 products: - AM242M type: n2n - transmutation: branch: 0.0865 products: - CM242 type: n2n - transmutation: branch: 0.4135 products: - PU242 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission - transmutation: branch: 1.0 products: - CM244 type: nGamma - decay: branch: 1.0 products: - NP239 - PU239 type: ad B10: - transmutation: branch: 1.0 products: - B11 type: nGamma - transmutation: branch: 1.0 products: - LI7 - DUMP1 type: nalph - transmutation: branch: 1.0 products: - DUMP1 type: n2n - transmutation: branch: 1.0 products: - BE9 - DUMP1 type: nd - transmutation: branch: 1.0 products: - BE10 - DUMP1 type: np B11: - transmutation: branch: 1.0 products: - DUMP1 type: nGamma - transmutation: # n-alphas to Li-8 -> Be-8 -> 2 alphas branch: 1.0 products: - HE4 - DUMP1 type: nalph productParticle: HE4 - transmutation: branch: 1.0 products: - B10 type: n2n - transmutation: branch: 1.0 products: - BE9 - DUMP1 type: nt BE9: - transmutation: branch: 1.0 products: - LI6 - DUMP1 type: nalph - transmutation: branch: 1.0 products: - LI7 - DUMP1 type: nt BK249: - transmutation: branch: 1.0 products: - CM244 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission - transmutation: branch: 1.0 products: - CF250 type: nGamma - decay: branch: 1.0 products: - CF249 type: bmd - decay: branch: 4.755215e-10 products: - LFP41 type: sf CF249: - transmutation: branch: 1.0 products: - CM244 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission - transmutation: branch: 1.0 products: - CF250 type: nGamma - decay: branch: 1.0 products: - CM245 type: ad - decay: branch: 5.00000e-09 products: - LFP41 type: sf CF250: - transmutation: branch: 1.0 products: - CF249 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission - transmutation: branch: 1.0 products: - CF251 type: nGamma - decay: branch: 1.0 products: - CM246 type: ad - decay: branch: 7.70000e-04 products: - LFP41 type: sf CF251: - transmutation: branch: 1.0 products: - CF250 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission - transmutation: branch: 1.0 products: - CF252 type: nGamma - decay: branch: 1.0 products: - CM247 type: ad CF252: - transmutation: branch: 1.0 products: - CF251 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission - transmutation: branch: 1.0 products: - DUMP2 type: nGamma - decay: branch: 9.69080e-01 products: - CM248 type: ad - decay: branch: 3.093567e-02 products: - LFP41 type: sf CM242: - transmutation: branch: 0.99 products: - AM241 type: n2n - transmutation: branch: 0.01 products: - NP237 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission - transmutation: branch: 1.0 products: - CM243 type: nGamma - decay: branch: 1.0 products: - PU238 type: ad - decay: branch: 6.794544e-08 products: - LFP41 type: sf CM243: - transmutation: branch: 1.0 products: - CM242 type: n2n - transmutation: branch: 1.0 products: - CM244 type: nGamma - transmutation: branch: 1.0 products: - LFP41 type: fission - decay: branch: 0.9971 products: - PU239 type: ad - decay: branch: 0.0029 products: - AM243 type: ec - decay: branch: 5.30000e-11 products: - LFP41 type: sf CM244: - transmutation: branch: 1.0 products: - CM243 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission - transmutation: branch: 1.0 products: - CM245 type: nGamma - decay: branch: 1.0 products: - PU240 type: ad - decay: branch: 1.340741e-06 products: - LFP41 type: sf CM245: - transmutation: branch: 1.0 products: - CM244 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission - transmutation: branch: 1.0 products: - CM246 type: nGamma - decay: branch: 1.0 products: - PU241 type: ad - decay: branch: 6.10000e-09 products: - LFP41 type: sf CM246: - transmutation: branch: 1.0 products: - CM245 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission - transmutation: branch: 1.0 products: - CM247 type: nGamma - decay: branch: 1.0 products: - PU242 type: ad - decay: branch: 2.61500e-04 products: - LFP41 type: sf CM247: - transmutation: branch: 1.0 products: - CM246 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission - transmutation: branch: 1.0 products: - CM248 - DUMP2 type: nGamma - decay: branch: 1.0 products: - AM243 type: ad CM248: - transmutation: branch: 1.0 products: - CM247 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission - transmutation: branch: 1.0 products: - BK249 type: nGamma - decay: branch: 0.9161 products: - DUMP2 type: ad - decay: branch: 8.39000e-02 products: - LFP41 type: sf H3: - decay: branch: 1.0 products: - HE3 - DUMP1 type: bmd HE3: - transmutation: branch: 1.0 products: - HE4 type: nGamma - transmutation: branch: 1.0 products: - H3 type: np HE4: [] IN113: - transmutation: branch: 0.995 products: - SN114 - DUMP1 type: nGamma - transmutation: branch: 0.005 products: - CD114 - DUMP1 type: nGamma IN115: - transmutation: branch: 0.9997 products: - SN116 - DUMP1 type: nGamma - transmutation: branch: 0.0003 products: - CD116 - DUMP1 type: nGamma LI6: - transmutation: branch: 1.0 products: - LI7 type: nGamma - transmutation: branch: 1.0 products: - HE4 - DUMP1 type: nt LI7: # LI7 n,gammas to Be8 which splits into two alphas, so we model both here by setting the productParticle to HE4 - transmutation: branch: 1.0 products: - HE4 - DUMP1 type: nGamma productParticle: HE4 - transmutation: branch: 1.0 products: - LI6 type: n2n NP237: - transmutation: branch: 1.0 products: - LFP38 type: fission # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV) - transmutation: branch: 1.2500e-04 products: - H3 - DUMP1 type: fission - transmutation: branch: 1.0 products: - NP238 - PU238 type: nGamma - transmutation: branch: 0.346 products: - PU236 type: n2n - transmutation: branch: 0.374 products: - U236 type: n2n - transmutation: branch: 0.28 products: - DUMP2 type: n2n - decay: branch: 1.0 products: - PA233 - DUMP2 type: ad - decay: branch: 2.139954e-12 products: - DUMP1 type: sf NP238: - transmutation: branch: 1.0 products: - LFP38 type: fission - transmutation: branch: 1.0 products: - NP239 - PU239 type: nGamma - transmutation: branch: 1.0 products: - NP237 type: n2n - decay: branch: 1.0 products: - PU238 type: bmd PA231: - transmutation: branch: 1.0 products: - U232 type: nGamma - transmutation: branch: 1.0 products: - DUMP2 type: n2n PA233: - transmutation: branch: 1.0 products: - U234 type: nGamma - transmutation: branch: 1.0 products: - LFP35 type: fission - transmutation: branch: 1.0 products: - DUMP2 type: n2n - decay: branch: 1.0 products: - U233 type: bmd PU236: - transmutation: branch: 1.0 products: - NP237 type: nGamma - transmutation: branch: 1.0 products: - LFP35 type: fission - transmutation: branch: 1.0 products: - DUMP2 type: n2n - decay: branch: 1.0 products: - U232 - DUMP2 type: ad - decay: branch: 1.90000e-09 products: - LFP38 type: sf PU238: - transmutation: branch: 1.0 products: - LFP38 type: fission - transmutation: branch: 1.0 products: - PU239 type: nGamma - transmutation: branch: 1.0 products: - NP237 type: n2n - decay: branch: 1.0 products: - U234 type: ad - decay: branch: 1.838574e-09 products: - LFP38 type: sf PU239: - transmutation: branch: 1.0 products: - PU238 type: n2n - transmutation: branch: 1.0 products: - LFP39 type: fission # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV) - transmutation: branch: 1.4200e-04 products: - H3 - DUMP1 type: fission - transmutation: branch: 1.0 products: - PU240 type: nGamma - decay: branch: 1.0 products: - U235 type: ad - decay: branch: 4.399635e-12 products: - LFP39 type: sf PU240: - transmutation: branch: 1.0 products: - PU239 type: n2n - transmutation: branch: 1.0 products: - LFP40 type: fission # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV) - transmutation: branch: 1.9179e-04 products: - H3 - DUMP1 type: fission - transmutation: branch: 1.0 products: - PU241 type: nGamma - decay: branch: 1.0 products: - U236 type: ad - decay: branch: 5.656034e-08 products: - LFP40 type: sf PU241: - transmutation: branch: 1.0 products: - PU240 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV) - transmutation: branch: 1.4100e-04 products: - H3 - DUMP1 type: fission - transmutation: branch: 1.0 products: - PU242 type: nGamma - decay: branch: 1.0 products: - AM241 type: bmd - decay: branch: 5.729878e-15 products: - LFP41 type: sf PU242: - transmutation: branch: 1.0 products: - PU241 type: n2n - transmutation: branch: 1.0 products: - LFP41 type: fission # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV) - transmutation: branch: 1.6348e-04 products: - H3 - DUMP1 type: fission - transmutation: branch: 1.0 products: - AM243 type: nGamma - decay: branch: 1.0 products: - U238 type: ad - decay: branch: 5.482456e-06 products: - LFP41 type: sf TH232: - transmutation: branch: 1.0 products: - PA233 type: nGamma - transmutation: branch: 1.0 products: - LFP35 type: fission - transmutation: branch: 1.0 products: - PA231 type: n2n - decay: branch: 1.0 products: - DUMP2 type: ad - decay: branch: 1.410000e-11 products: - LFP35 type: sf U232: - transmutation: branch: 1.0 products: - U233 type: nGamma - transmutation: branch: 1.0 products: - LFP35 type: fission - transmutation: branch: 1.0 products: - PA231 type: n2n - decay: branch: 1.0 products: - DUMP2 type: ad - decay: branch: 8.612316e-13 products: - LFP35 type: sf U233: - transmutation: branch: 1.0 products: - U234 type: nGamma - transmutation: branch: 1.0 products: - LFP35 type: fission # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV) - transmutation: branch: 1.1400e-04 products: - H3 - DUMP1 type: fission - transmutation: branch: 1.0 products: - U232 type: n2n - decay: branch: 1.0 products: - DUMP2 type: ad - decay: branch: 1.326638e-12 products: - LFP35 type: sf U234: - transmutation: branch: 1.0 products: - U233 - DUMP2 type: n2n - transmutation: branch: 1.0 products: - LFP35 type: fission # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV) - transmutation: branch: 1.5925e-04 products: - H3 - DUMP1 type: fission - transmutation: branch: 1.0 products: - U235 type: nGamma - decay: branch: 1.0 products: - DUMP2 type: ad - decay: branch: 1.169048e-11 products: - LFP35 type: sf U235: - transmutation: branch: 1.0 products: - U234 - DUMP2 type: n2n - transmutation: branch: 1.0 products: - LFP35 type: fission # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV) - transmutation: branch: 1.0800e-04 products: - H3 - DUMP1 type: fission - transmutation: branch: 1.0 products: - U236 type: nGamma - decay: branch: 1.0 products: - DUMP2 type: ad - decay: branch: 2.011429e-09 products: - LFP35 type: sf U236: - transmutation: branch: 1.0 products: - U235 type: n2n - transmutation: branch: 1.0 products: - NP237 type: nGamma - transmutation: branch: 1.0 products: - LFP35 type: fission - transmutation: branch: 1.3094e-04 products: - H3 - DUMP1 type: fission - decay: branch: 1.0 products: - DUMP2 type: ad - decay: branch: 1.201026e-09 products: - LFP35 type: sf U238: - transmutation: branch: 1.0 products: - NP237 type: n2n - transmutation: branch: 1.0 products: - LFP38 type: fission # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV) - transmutation: branch: 1.0262e-04 products: - H3 - DUMP1 type: fission - transmutation: branch: 1.0 products: - NP239 - PU239 type: nGamma - decay: branch: 5.448780e-07 products: - LFP38 type: sf NP239: - transmutation: branch: 1.0 products: - NP238 type: n2n - transmutation: branch: 1.0 products: - LFP38 type: fission - transmutation: branch: 1.0 products: - PU240 type: nGamma - decay: branch: 1.0 products: - PU239 type: bmd Y89: - transmutation: branch: 1.0 products: - SR89 type: np SR89: - transmutation: branch: 1.0 products: - SR90 type: nGamma - decay: branch: 1.0 products: - Y89 type: bmd SR90: - decay: branch: 1.0 products: - DUMP2 type: bmd S32: - transmutation: branch: 1.0 products: - P32 type: np - transmutation: branch: 1.0 products: - S33 type: nGamma P32: - decay: branch: 1.0 products: - S32 type: bmd S33: - transmutation: branch: 1.0 products: - S34 type: nGamma S34: - transmutation: branch: 1.0 products: - S35 type: nGamma S35: - decay: branch: 1.0 products: - DUMP2 type: bmd S36: - transmutation: branch: 1.0 products: - S37 type: nGamma S37: - decay: branch: 1.0 products: - DUMP2 type: bmd TI46: - transmutation: branch: 1.0 products: - TI47 type: nGamma TI47: - transmutation: branch: 1.0 products: - SC47 type: np - transmutation: branch: 1.0 products: - TI48 type: nGamma SC47: - decay: branch: 1.0 products: - TI47 type: bmd TI48: - transmutation: branch: 1.0 products: - TI49 type: nGamma TI49: - transmutation: branch: 1.0 products: - TI50 type: nGamma TI50: - transmutation: branch: 1.0 products: - TI51 type: nGamma TI51: - decay: branch: 1.0 products: - DUMP2 type: bmd ================================================ FILE: armi/resources/mcc-nuclides.yaml ================================================ # This file contains the nuclides that are defined by the MC2-2 and MC2-3 # codes. The MC2-2 code base uses ENDF/B-V.2 and the MC2-3 code base uses # ENDF/B-VII.0 or ENDF/B-VII.1. This file can be amended in the future for # MC2-3 as the code base changes, but the nuclides that MC2-3 models are # consistent with the data that is supplied by ENDF/B-VII.0. # See: Appendix B of ANL/NE-11/41 Rev.3 for V.2 and VII.0 isotopes # Public Link: https://publications.anl.gov/anlpubs/2018/10/147840.pdf. AC225: ENDF/B-V.2: null ENDF/B-VII.0: AC2257 ENDF/B-VII.1: AC2257 AC226: ENDF/B-V.2: null ENDF/B-VII.0: AC2267 ENDF/B-VII.1: AC2267 AC227: ENDF/B-V.2: null ENDF/B-VII.0: AC2277 ENDF/B-VII.1: AC2277 AG107: ENDF/B-V.2: AG1075 ENDF/B-VII.0: AG1077 ENDF/B-VII.1: AG1077 AG109: ENDF/B-V.2: AG1095 ENDF/B-VII.0: AG1097 ENDF/B-VII.1: AG1097 AG110M: ENDF/B-V.2: null ENDF/B-VII.0: AG10M7 ENDF/B-VII.1: AG10M7 AG111: ENDF/B-V.2: AG1115 ENDF/B-VII.0: AG1117 ENDF/B-VII.1: AG1117 AL27: ENDF/B-V.2: AL27 5 ENDF/B-VII.0: AL27_7 ENDF/B-VII.1: AL27_7 AM240: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: AM2407 AM241: ENDF/B-V.2: AM2415 ENDF/B-VII.0: AM2417 ENDF/B-VII.1: AM2417 AM242G: ENDF/B-V.2: AM2425 ENDF/B-VII.0: AM2427 ENDF/B-VII.1: AM2427 AM242M: ENDF/B-V.2: AM242M ENDF/B-VII.0: AM42M7 ENDF/B-VII.1: AM42M7 AM243: ENDF/B-V.2: AM243V ENDF/B-VII.0: AM2437 ENDF/B-VII.1: AM2437 AM244: ENDF/B-V.2: null ENDF/B-VII.0: AM2447 ENDF/B-VII.1: AM2447 AM244M: ENDF/B-V.2: null ENDF/B-VII.0: AM44M7 ENDF/B-VII.1: AM44M7 AR36: ENDF/B-V.2: null ENDF/B-VII.0: AR36_7 ENDF/B-VII.1: AR36_7 AR38: ENDF/B-V.2: null ENDF/B-VII.0: AR38_7 ENDF/B-VII.1: AR38_7 AR40: ENDF/B-V.2: null ENDF/B-VII.0: AR40_7 ENDF/B-VII.1: AR40_7 AS74: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: AS74_7 AS75: ENDF/B-V.2: AS75 5 ENDF/B-VII.0: AS75_7 ENDF/B-VII.1: AS75_7 AU197: ENDF/B-V.2: AU1975 ENDF/B-VII.0: AU1977 ENDF/B-VII.1: AU1977 B10: ENDF/B-V.2: B-10 5 ENDF/B-VII.0: B10__7 ENDF/B-VII.1: B10__7 B11: ENDF/B-V.2: B-11 5 ENDF/B-VII.0: B11__7 ENDF/B-VII.1: B11__7 BA130: ENDF/B-V.2: null ENDF/B-VII.0: BA1307 ENDF/B-VII.1: BA1307 BA132: ENDF/B-V.2: null ENDF/B-VII.0: BA1327 ENDF/B-VII.1: BA1327 BA133: ENDF/B-V.2: null ENDF/B-VII.0: BA1337 ENDF/B-VII.1: BA1337 BA134: ENDF/B-V.2: BA1345 ENDF/B-VII.0: BA1347 ENDF/B-VII.1: BA1347 BA135: ENDF/B-V.2: BA1355 ENDF/B-VII.0: BA1357 ENDF/B-VII.1: BA1357 BA136: ENDF/B-V.2: BA1365 ENDF/B-VII.0: BA1367 ENDF/B-VII.1: BA1367 BA137: ENDF/B-V.2: BA1375 ENDF/B-VII.0: BA1377 ENDF/B-VII.1: BA1377 BA138: ENDF/B-V.2: BA1385 ENDF/B-VII.0: BA1387 ENDF/B-VII.1: BA1387 BA140: ENDF/B-V.2: BA1405 ENDF/B-VII.0: BA1407 ENDF/B-VII.1: BA1407 BE7: ENDF/B-V.2: null ENDF/B-VII.0: BE7__7 ENDF/B-VII.1: BE7__7 BE9: ENDF/B-V.2: BE-9 3 ENDF/B-VII.0: BE9__7 ENDF/B-VII.1: BE9__7 BI209: ENDF/B-V.2: BI2095 ENDF/B-VII.0: BI2097 ENDF/B-VII.1: BI2097 BK245: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: BK2457 BK246: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: BK2467 BK247: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: BK2477 BK248: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: BK2487 BK249: ENDF/B-V.2: BK2495 ENDF/B-VII.0: BK2497 ENDF/B-VII.1: BK2497 BK250: ENDF/B-V.2: null ENDF/B-VII.0: BK2507 ENDF/B-VII.1: BK2507 BR79: ENDF/B-V.2: BR79 5 ENDF/B-VII.0: BR79_7 ENDF/B-VII.1: BR79_7 BR81: ENDF/B-V.2: BR81 5 ENDF/B-VII.0: BR81_7 ENDF/B-VII.1: BR81_7 C: ENDF/B-V.2: C 5 ENDF/B-VII.0: C____7 ENDF/B-VII.1: C____7 CA: ENDF/B-V.2: CA 5 ENDF/B-VII.0: null ENDF/B-VII.1: null CA40: ENDF/B-V.2: null ENDF/B-VII.0: CA40_7 ENDF/B-VII.1: CA40_7 CA42: ENDF/B-V.2: null ENDF/B-VII.0: CA42_7 ENDF/B-VII.1: CA42_7 CA43: ENDF/B-V.2: null ENDF/B-VII.0: CA43_7 ENDF/B-VII.1: CA43_7 CA44: ENDF/B-V.2: null ENDF/B-VII.0: CA44_7 ENDF/B-VII.1: CA44_7 CA46: ENDF/B-V.2: null ENDF/B-VII.0: CA46_7 ENDF/B-VII.1: CA46_7 CA48: ENDF/B-V.2: null ENDF/B-VII.0: CA48_7 ENDF/B-VII.1: CA48_7 CD: ENDF/B-V.2: CD 5 ENDF/B-VII.0: null ENDF/B-VII.1: null CD106: ENDF/B-V.2: CD1065 ENDF/B-VII.0: CD1067 ENDF/B-VII.1: CD1067 CD108: ENDF/B-V.2: CD1085 ENDF/B-VII.0: CD1087 ENDF/B-VII.1: CD1087 CD110: ENDF/B-V.2: CD1105 ENDF/B-VII.0: CD1107 ENDF/B-VII.1: CD1107 CD111: ENDF/B-V.2: CD1115 ENDF/B-VII.0: CD1117 ENDF/B-VII.1: CD1117 CD112: ENDF/B-V.2: CD1125 ENDF/B-VII.0: CD1127 ENDF/B-VII.1: CD1127 CD113: ENDF/B-V.2: CD1135 ENDF/B-VII.0: CD1137 ENDF/B-VII.1: CD1137 CD114: ENDF/B-V.2: CD1145 ENDF/B-VII.0: CD1147 ENDF/B-VII.1: CD1147 CD115M: ENDF/B-V.2: CD115M ENDF/B-VII.0: CD15M7 ENDF/B-VII.1: CD15M7 CD116: ENDF/B-V.2: CD1165 ENDF/B-VII.0: CD1167 ENDF/B-VII.1: CD1167 CE136: ENDF/B-V.2: null ENDF/B-VII.0: CE1367 ENDF/B-VII.1: CE1367 CE138: ENDF/B-V.2: null ENDF/B-VII.0: CE1387 ENDF/B-VII.1: CE1387 CE139: ENDF/B-V.2: null ENDF/B-VII.0: CE1397 ENDF/B-VII.1: CE1397 CE140: ENDF/B-V.2: CE1405 ENDF/B-VII.0: CE1407 ENDF/B-VII.1: CE1407 CE141: ENDF/B-V.2: CE1415 ENDF/B-VII.0: CE1417 ENDF/B-VII.1: CE1417 CE142: ENDF/B-V.2: CE1425 ENDF/B-VII.0: CE1427 ENDF/B-VII.1: CE1427 CE143: ENDF/B-V.2: CE1435 ENDF/B-VII.0: CE1437 ENDF/B-VII.1: CE1437 CE144: ENDF/B-V.2: CE1445 ENDF/B-VII.0: CE1447 ENDF/B-VII.1: CE1447 CF246: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: CF2467 CF248: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: CF2487 CF249: ENDF/B-V.2: CF2495 ENDF/B-VII.0: CF2497 ENDF/B-VII.1: CF2497 CF250: ENDF/B-V.2: CF2505 ENDF/B-VII.0: CF2507 ENDF/B-VII.1: CF2507 CF251: ENDF/B-V.2: CF2515 ENDF/B-VII.0: CF2517 ENDF/B-VII.1: CF2517 CF252: ENDF/B-V.2: CF2525 ENDF/B-VII.0: CF2527 ENDF/B-VII.1: CF2527 CF253: ENDF/B-V.2: CF2535 ENDF/B-VII.0: CF2537 ENDF/B-VII.1: CF2537 CF254: ENDF/B-V.2: null ENDF/B-VII.0: CF2547 ENDF/B-VII.1: CF2547 CL: ENDF/B-V.2: CL 5 ENDF/B-VII.0: null ENDF/B-VII.1: null CL35: ENDF/B-V.2: CL35_7 ENDF/B-VII.0: CL35_7 ENDF/B-VII.1: CL35_7 CL37: ENDF/B-V.2: CL37_7 ENDF/B-VII.0: CL37_7 ENDF/B-VII.1: CL37_7 CM240: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: CM2407 CM241: ENDF/B-V.2: CM2415 ENDF/B-VII.0: CM2417 ENDF/B-VII.1: CM2417 CM242: ENDF/B-V.2: CM2425 ENDF/B-VII.0: CM2427 ENDF/B-VII.1: CM2427 CM243: ENDF/B-V.2: CM2435 ENDF/B-VII.0: CM2437 ENDF/B-VII.1: CM2437 CM244: ENDF/B-V.2: CM2445 ENDF/B-VII.0: CM2447 ENDF/B-VII.1: CM2447 CM245: ENDF/B-V.2: CM2455 ENDF/B-VII.0: CM2457 ENDF/B-VII.1: CM2457 CM246: ENDF/B-V.2: CM2465 ENDF/B-VII.0: CM2467 ENDF/B-VII.1: CM2467 CM247: ENDF/B-V.2: CM2475 ENDF/B-VII.0: CM2477 ENDF/B-VII.1: CM2477 CM248: ENDF/B-V.2: CM2485 ENDF/B-VII.0: CM2487 ENDF/B-VII.1: CM2487 CM249: ENDF/B-V.2: null ENDF/B-VII.0: CM2497 ENDF/B-VII.1: CM2497 CM250: ENDF/B-V.2: null ENDF/B-VII.0: CM2507 ENDF/B-VII.1: CM2507 CO58: ENDF/B-V.2: null ENDF/B-VII.0: CO58_7 ENDF/B-VII.1: CO58_7 CO58M: ENDF/B-V.2: null ENDF/B-VII.0: CO58M7 ENDF/B-VII.1: CO58M7 CO59: ENDF/B-V.2: CO59 5 ENDF/B-VII.0: CO59_7 ENDF/B-VII.1: CO59_7 CR: ENDF/B-V.2: CR S ENDF/B-VII.0: null ENDF/B-VII.1: null CR50: ENDF/B-V.2: null ENDF/B-VII.0: CR50_7 ENDF/B-VII.1: CR50_7 CR52: ENDF/B-V.2: null ENDF/B-VII.0: CR52_7 ENDF/B-VII.1: CR52_7 CR53: ENDF/B-V.2: null ENDF/B-VII.0: CR53_7 ENDF/B-VII.1: CR53_7 CR54: ENDF/B-V.2: null ENDF/B-VII.0: CR54_7 ENDF/B-VII.1: CR54_7 CS133: ENDF/B-V.2: CS1335 ENDF/B-VII.0: CS1337 ENDF/B-VII.1: CS1337 CS134: ENDF/B-V.2: CS1345 ENDF/B-VII.0: CS1347 ENDF/B-VII.1: CS1347 CS135: ENDF/B-V.2: CS1355 ENDF/B-VII.0: CS1357 ENDF/B-VII.1: CS1357 CS136: ENDF/B-V.2: CS1365 ENDF/B-VII.0: CS1367 ENDF/B-VII.1: CS1367 CS137: ENDF/B-V.2: CS1375 ENDF/B-VII.0: CS1377 ENDF/B-VII.1: CS1377 CU: ENDF/B-V.2: CU 5 ENDF/B-VII.0: null ENDF/B-VII.1: null CU63: ENDF/B-V.2: null ENDF/B-VII.0: CU63_7 ENDF/B-VII.1: CU63_7 CU65: ENDF/B-V.2: null ENDF/B-VII.0: CU65_7 ENDF/B-VII.1: CU65_7 DUMP1: ENDF/B-V.2: DUMMY1 ENDF/B-VII.0: DUMMY ENDF/B-VII.1: DUMMY DUMP2: ENDF/B-V.2: DUMMY2 ENDF/B-VII.0: DUMMY ENDF/B-VII.1: DUMMY DY156: ENDF/B-V.2: null ENDF/B-VII.0: DY1567 ENDF/B-VII.1: DY1567 DY158: ENDF/B-V.2: null ENDF/B-VII.0: DY1587 ENDF/B-VII.1: DY1587 DY160: ENDF/B-V.2: DY1605 ENDF/B-VII.0: DY1607 ENDF/B-VII.1: DY1607 DY161: ENDF/B-V.2: DY1615 ENDF/B-VII.0: DY1617 ENDF/B-VII.1: DY1617 DY162: ENDF/B-V.2: DY1625 ENDF/B-VII.0: DY1627 ENDF/B-VII.1: DY1627 DY163: ENDF/B-V.2: DY1635 ENDF/B-VII.0: DY1637 ENDF/B-VII.1: DY1637 DY164: ENDF/B-V.2: DY1645 ENDF/B-VII.0: DY1647 ENDF/B-VII.1: DY1647 ER162: ENDF/B-V.2: null ENDF/B-VII.0: ER1627 ENDF/B-VII.1: ER1627 ER164: ENDF/B-V.2: null ENDF/B-VII.0: ER1647 ENDF/B-VII.1: ER1647 ER166: ENDF/B-V.2: ER1665 ENDF/B-VII.0: ER1667 ENDF/B-VII.1: ER1667 ER167: ENDF/B-V.2: ER1675 ENDF/B-VII.0: ER1677 ENDF/B-VII.1: ER1677 ER168: ENDF/B-V.2: null ENDF/B-VII.0: ER1687 ENDF/B-VII.1: ER1687 ER170: ENDF/B-V.2: null ENDF/B-VII.0: ER1707 ENDF/B-VII.1: ER1707 ES251: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: ES2517 ES252: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: ES2527 ES253: ENDF/B-V.2: ES2535 ENDF/B-VII.0: ES2537 ENDF/B-VII.1: ES2537 ES254: ENDF/B-V.2: null ENDF/B-VII.0: ES2547 ENDF/B-VII.1: ES2547 ES254M: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: ES54M7 ES255: ENDF/B-V.2: null ENDF/B-VII.0: ES2557 ENDF/B-VII.1: ES2557 EU151: ENDF/B-V.2: EU1515 ENDF/B-VII.0: EU1517 ENDF/B-VII.1: EU1517 EU152: ENDF/B-V.2: EU1525 ENDF/B-VII.0: EU1527 ENDF/B-VII.1: EU1527 EU153: ENDF/B-V.2: EU1535 ENDF/B-VII.0: EU1537 ENDF/B-VII.1: EU1537 EU154: ENDF/B-V.2: EU1545 ENDF/B-VII.0: EU1547 ENDF/B-VII.1: EU1547 EU155: ENDF/B-V.2: EU1555 ENDF/B-VII.0: EU1557 ENDF/B-VII.1: EU1557 EU156: ENDF/B-V.2: EU1565 ENDF/B-VII.0: EU1567 ENDF/B-VII.1: EU1567 EU157: ENDF/B-V.2: EU1575 ENDF/B-VII.0: EU1577 ENDF/B-VII.1: EU1577 F19: ENDF/B-V.2: F-19 5 ENDF/B-VII.0: F19__7 ENDF/B-VII.1: F19__7 FE: ENDF/B-V.2: FE SV ENDF/B-VII.0: null ENDF/B-VII.1: null FE54: ENDF/B-V.2: null ENDF/B-VII.0: FE54_7 ENDF/B-VII.1: FE54_7 FE56: ENDF/B-V.2: null ENDF/B-VII.0: FE56_7 ENDF/B-VII.1: FE56_7 FE57: ENDF/B-V.2: null ENDF/B-VII.0: FE57_7 ENDF/B-VII.1: FE57_7 FE58: ENDF/B-V.2: null ENDF/B-VII.0: FE58_7 ENDF/B-VII.1: FE58_7 FM255: ENDF/B-V.2: null ENDF/B-VII.0: FM2557 ENDF/B-VII.1: FM2557 GA: ENDF/B-V.2: GA 5 ENDF/B-VII.0: null ENDF/B-VII.1: null GA69: ENDF/B-V.2: null ENDF/B-VII.0: GA69_7 ENDF/B-VII.1: GA69_7 GA71: ENDF/B-V.2: null ENDF/B-VII.0: GA71_7 ENDF/B-VII.1: GA71_7 GD152: ENDF/B-V.2: GD1525 ENDF/B-VII.0: GD1527 ENDF/B-VII.1: GD1527 GD153: ENDF/B-V.2: null ENDF/B-VII.0: GD1537 ENDF/B-VII.1: GD1537 GD154: ENDF/B-V.2: GD1545 ENDF/B-VII.0: GD1547 ENDF/B-VII.1: GD1547 GD155: ENDF/B-V.2: GD1555 ENDF/B-VII.0: GD1557 ENDF/B-VII.1: GD1557 GD156: ENDF/B-V.2: GD1565 ENDF/B-VII.0: GD1567 ENDF/B-VII.1: GD1567 GD157: ENDF/B-V.2: GD1575 ENDF/B-VII.0: GD1577 ENDF/B-VII.1: GD1577 GD158: ENDF/B-V.2: GD1585 ENDF/B-VII.0: GD1587 ENDF/B-VII.1: GD1587 GD160: ENDF/B-V.2: GD1605 ENDF/B-VII.0: GD1607 ENDF/B-VII.1: GD1607 GE70: ENDF/B-V.2: null ENDF/B-VII.0: GE70_7 ENDF/B-VII.1: GE70_7 GE72: ENDF/B-V.2: GE72 5 ENDF/B-VII.0: GE72_7 ENDF/B-VII.1: GE72_7 GE73: ENDF/B-V.2: GE73 5 ENDF/B-VII.0: GE73_7 ENDF/B-VII.1: GE73_7 GE74: ENDF/B-V.2: GE74 5 ENDF/B-VII.0: GE74_7 ENDF/B-VII.1: GE74_7 GE76: ENDF/B-V.2: GE76 5 ENDF/B-VII.0: GE76_7 ENDF/B-VII.1: GE76_7 H1: ENDF/B-V.2: HYDRGN ENDF/B-VII.0: H1___7 ENDF/B-VII.1: H1___7 H2: ENDF/B-V.2: H-2 5 ENDF/B-VII.0: H2___7 ENDF/B-VII.1: H2___7 H3: ENDF/B-V.2: H-3 5 ENDF/B-VII.0: H3___7 ENDF/B-VII.1: H3___7 HE3: ENDF/B-V.2: HE3 5 ENDF/B-VII.0: HE3__7 ENDF/B-VII.1: HE3__7 HE4: ENDF/B-V.2: HE4 5 ENDF/B-VII.0: HE4__7 ENDF/B-VII.1: HE4__7 HF: ENDF/B-V.2: HF 5 ENDF/B-VII.0: null ENDF/B-VII.1: null HF174: ENDF/B-V.2: HF1745 ENDF/B-VII.0: HF1747 ENDF/B-VII.1: HF1747 HF176: ENDF/B-V.2: HF1765 ENDF/B-VII.0: HF1767 ENDF/B-VII.1: HF1767 HF177: ENDF/B-V.2: HF1775 ENDF/B-VII.0: HF1777 ENDF/B-VII.1: HF1777 HF178: ENDF/B-V.2: HF1785 ENDF/B-VII.0: HF1787 ENDF/B-VII.1: HF1787 HF179: ENDF/B-V.2: HF1795 ENDF/B-VII.0: HF1797 ENDF/B-VII.1: HF1797 HF180: ENDF/B-V.2: HF1805 ENDF/B-VII.0: HF1807 ENDF/B-VII.1: HF1807 HG196: ENDF/B-V.2: null ENDF/B-VII.0: HG1967 ENDF/B-VII.1: HG1967 HG198: ENDF/B-V.2: null ENDF/B-VII.0: HG1987 ENDF/B-VII.1: HG1987 HG199: ENDF/B-V.2: null ENDF/B-VII.0: HG1997 ENDF/B-VII.1: HG1997 HG200: ENDF/B-V.2: null ENDF/B-VII.0: HG2007 ENDF/B-VII.1: HG2007 HG201: ENDF/B-V.2: null ENDF/B-VII.0: HG2017 ENDF/B-VII.1: HG2017 HG202: ENDF/B-V.2: null ENDF/B-VII.0: HG2027 ENDF/B-VII.1: HG2027 HG204: ENDF/B-V.2: null ENDF/B-VII.0: HG2047 ENDF/B-VII.1: HG2047 HO165: ENDF/B-V.2: HO1655 ENDF/B-VII.0: HO1657 ENDF/B-VII.1: HO1657 HO166M: ENDF/B-V.2: null ENDF/B-VII.0: HO66M7 ENDF/B-VII.1: HO66M7 I127: ENDF/B-V.2: I-1275 ENDF/B-VII.0: I127_7 ENDF/B-VII.1: I127_7 I129: ENDF/B-V.2: I-1295 ENDF/B-VII.0: I129_7 ENDF/B-VII.1: I129_7 I130: ENDF/B-V.2: I-1305 ENDF/B-VII.0: I130_7 ENDF/B-VII.1: I130_7 I131: ENDF/B-V.2: I-1315 ENDF/B-VII.0: I131_7 ENDF/B-VII.1: I131_7 I135: ENDF/B-V.2: I-1355 ENDF/B-VII.0: I135_7 ENDF/B-VII.1: I135_7 IN113: ENDF/B-V.2: IN1135 ENDF/B-VII.0: IN1137 ENDF/B-VII.1: IN1137 IN115: ENDF/B-V.2: IN1155 ENDF/B-VII.0: IN1157 ENDF/B-VII.1: IN1157 IR191: ENDF/B-V.2: null ENDF/B-VII.0: IR1917 ENDF/B-VII.1: IR1917 IR193: ENDF/B-V.2: null ENDF/B-VII.0: IR1937 ENDF/B-VII.1: IR1937 K: ENDF/B-V.2: K 5 ENDF/B-VII.0: null ENDF/B-VII.1: null K39: ENDF/B-V.2: null ENDF/B-VII.0: K39__7 ENDF/B-VII.1: K39__7 K40: ENDF/B-V.2: null ENDF/B-VII.0: K40__7 ENDF/B-VII.1: K40__7 K41: ENDF/B-V.2: null ENDF/B-VII.0: K41__7 ENDF/B-VII.1: K41__7 KR78: ENDF/B-V.2: KR78 5 ENDF/B-VII.0: KR78_7 ENDF/B-VII.1: KR78_7 KR80: ENDF/B-V.2: KR80 5 ENDF/B-VII.0: KR80_7 ENDF/B-VII.1: KR80_7 KR82: ENDF/B-V.2: KR82 5 ENDF/B-VII.0: KR82_7 ENDF/B-VII.1: KR82_7 KR83: ENDF/B-V.2: KR83 5 ENDF/B-VII.0: KR83_7 ENDF/B-VII.1: KR83_7 KR84: ENDF/B-V.2: KR84 5 ENDF/B-VII.0: KR84_7 ENDF/B-VII.1: KR84_7 KR85: ENDF/B-V.2: KR85 5 ENDF/B-VII.0: KR85_7 ENDF/B-VII.1: KR85_7 KR86: ENDF/B-V.2: KR86 5 ENDF/B-VII.0: KR86_7 ENDF/B-VII.1: KR86_7 LA138: ENDF/B-V.2: null ENDF/B-VII.0: LA1387 ENDF/B-VII.1: LA1387 LA139: ENDF/B-V.2: LA1395 ENDF/B-VII.0: LA1397 ENDF/B-VII.1: LA1397 LA140: ENDF/B-V.2: LA1405 ENDF/B-VII.0: LA1407 ENDF/B-VII.1: LA1407 LI6: ENDF/B-V.2: LI-6 5 ENDF/B-VII.0: LI6__7 ENDF/B-VII.1: LI6__7 LI7: ENDF/B-V.2: LI-7 V ENDF/B-VII.0: LI7__7 ENDF/B-VII.1: LI7__7 LU175: ENDF/B-V.2: LU1755 ENDF/B-VII.0: LU1757 ENDF/B-VII.1: LU1757 LU176: ENDF/B-V.2: LU1765 ENDF/B-VII.0: LU1767 ENDF/B-VII.1: LU1767 MG: ENDF/B-V.2: MG 5 ENDF/B-VII.0: null ENDF/B-VII.1: null MG24: ENDF/B-V.2: null ENDF/B-VII.0: MG24_7 ENDF/B-VII.1: MG24_7 MG25: ENDF/B-V.2: null ENDF/B-VII.0: MG25_7 ENDF/B-VII.1: MG25_7 MG26: ENDF/B-V.2: null ENDF/B-VII.0: MG26_7 ENDF/B-VII.1: MG26_7 MN55: ENDF/B-V.2: MN55 S ENDF/B-VII.0: MN55_7 ENDF/B-VII.1: MN55_7 MO: ENDF/B-V.2: MO S ENDF/B-VII.0: null ENDF/B-VII.1: null MO100: ENDF/B-V.2: MO1005 ENDF/B-VII.0: MO1007 ENDF/B-VII.1: MO1007 MO92: ENDF/B-V.2: MO92 5 ENDF/B-VII.0: MO92_7 ENDF/B-VII.1: MO92_7 MO94: ENDF/B-V.2: MO94 5 ENDF/B-VII.0: MO94_7 ENDF/B-VII.1: MO94_7 MO95: ENDF/B-V.2: MO95 5 ENDF/B-VII.0: MO95_7 ENDF/B-VII.1: MO95_7 MO96: ENDF/B-V.2: MO96 5 ENDF/B-VII.0: MO96_7 ENDF/B-VII.1: MO96_7 MO97: ENDF/B-V.2: MO97 5 ENDF/B-VII.0: MO97_7 ENDF/B-VII.1: MO97_7 MO98: ENDF/B-V.2: MO98 5 ENDF/B-VII.0: MO98_7 ENDF/B-VII.1: MO98_7 MO99: ENDF/B-V.2: MO99 5 ENDF/B-VII.0: MO99_7 ENDF/B-VII.1: MO99_7 N14: ENDF/B-V.2: N-14 5 ENDF/B-VII.0: N14__7 ENDF/B-VII.1: N14__7 N15: ENDF/B-V.2: N-15 5 ENDF/B-VII.0: N15__7 ENDF/B-VII.1: N15__7 NA22: ENDF/B-V.2: null ENDF/B-VII.0: NA22_7 ENDF/B-VII.1: NA22_7 NA23: ENDF/B-V.2: NA23 S ENDF/B-VII.0: NA23_7 ENDF/B-VII.1: NA23_7 NB93: ENDF/B-V.2: NB93 5 ENDF/B-VII.0: NB93_7 ENDF/B-VII.1: NB93_7 NB94: ENDF/B-V.2: NB94 5 ENDF/B-VII.0: NB94_7 ENDF/B-VII.1: NB94_7 NB95: ENDF/B-V.2: NB95 5 ENDF/B-VII.0: NB95_7 ENDF/B-VII.1: NB95_7 ND142: ENDF/B-V.2: ND1425 ENDF/B-VII.0: ND1427 ENDF/B-VII.1: ND1427 ND143: ENDF/B-V.2: ND1435 ENDF/B-VII.0: ND1437 ENDF/B-VII.1: ND1437 ND144: ENDF/B-V.2: ND1445 ENDF/B-VII.0: ND1447 ENDF/B-VII.1: ND1447 ND145: ENDF/B-V.2: ND1455 ENDF/B-VII.0: ND1457 ENDF/B-VII.1: ND1457 ND146: ENDF/B-V.2: ND1465 ENDF/B-VII.0: ND1467 ENDF/B-VII.1: ND1467 ND147: ENDF/B-V.2: ND1475 ENDF/B-VII.0: ND1477 ENDF/B-VII.1: ND1477 ND148: ENDF/B-V.2: ND1485 ENDF/B-VII.0: ND1487 ENDF/B-VII.1: ND1487 ND150: ENDF/B-V.2: ND1505 ENDF/B-VII.0: ND1507 ENDF/B-VII.1: ND1507 NI: ENDF/B-V.2: NI S ENDF/B-VII.0: null ENDF/B-VII.1: null NI58: ENDF/B-V.2: null ENDF/B-VII.0: NI58_7 ENDF/B-VII.1: NI58_7 NI59: ENDF/B-V.2: null ENDF/B-VII.0: NI59_7 ENDF/B-VII.1: NI59_7 NI60: ENDF/B-V.2: null ENDF/B-VII.0: NI60_7 ENDF/B-VII.1: NI60_7 NI61: ENDF/B-V.2: null ENDF/B-VII.0: NI61_7 ENDF/B-VII.1: NI61_7 NI62: ENDF/B-V.2: null ENDF/B-VII.0: NI62_7 ENDF/B-VII.1: NI62_7 NI64: ENDF/B-V.2: null ENDF/B-VII.0: NI64_7 ENDF/B-VII.1: NI64_7 NP234: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: NP2347 NP235: ENDF/B-V.2: null ENDF/B-VII.0: NP2357 ENDF/B-VII.1: NP2357 NP236: ENDF/B-V.2: null ENDF/B-VII.0: NP2367 ENDF/B-VII.1: NP2367 NP237: ENDF/B-V.2: NP237V ENDF/B-VII.0: NP2377 ENDF/B-VII.1: NP2377 NP238: ENDF/B-V.2: NP2385 ENDF/B-VII.0: NP2387 ENDF/B-VII.1: NP2387 NP239: ENDF/B-V.2: null ENDF/B-VII.0: NP2397 ENDF/B-VII.1: NP2397 O16: ENDF/B-V.2: O-16 5 ENDF/B-VII.0: O16__7 ENDF/B-VII.1: O16__7 O17: ENDF/B-V.2: O-17 5 ENDF/B-VII.0: O17__7 ENDF/B-VII.1: O17__7 P31: ENDF/B-V.2: P-31 5 ENDF/B-VII.0: P31__7 ENDF/B-VII.1: P31__7 PA229: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: PA2297 PA230: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: PA2307 PA231: ENDF/B-V.2: PA2315 ENDF/B-VII.0: PA2317 ENDF/B-VII.1: PA2317 PA232: ENDF/B-V.2: null ENDF/B-VII.0: PA2327 ENDF/B-VII.1: PA2327 PA233: ENDF/B-V.2: PA2335 ENDF/B-VII.0: PA2337 ENDF/B-VII.1: PA2337 PB: ENDF/B-V.2: PB 5 ENDF/B-VII.0: null ENDF/B-VII.1: null PB204: ENDF/B-V.2: null ENDF/B-VII.0: PB2047 ENDF/B-VII.1: PB2047 PB206: ENDF/B-V.2: null ENDF/B-VII.0: PB2067 ENDF/B-VII.1: PB2067 PB207: ENDF/B-V.2: null ENDF/B-VII.0: PB2077 ENDF/B-VII.1: PB2077 PB208: ENDF/B-V.2: null ENDF/B-VII.0: PB2087 ENDF/B-VII.1: PB2087 PD102: ENDF/B-V.2: PD1025 ENDF/B-VII.0: PD1027 ENDF/B-VII.1: PD1027 PD104: ENDF/B-V.2: PD1045 ENDF/B-VII.0: PD1047 ENDF/B-VII.1: PD1047 PD105: ENDF/B-V.2: PD1055 ENDF/B-VII.0: PD1057 ENDF/B-VII.1: PD1057 PD106: ENDF/B-V.2: PD1065 ENDF/B-VII.0: PD1067 ENDF/B-VII.1: PD1067 PD107: ENDF/B-V.2: PD1075 ENDF/B-VII.0: PD1077 ENDF/B-VII.1: PD1077 PD108: ENDF/B-V.2: PD1085 ENDF/B-VII.0: PD1087 ENDF/B-VII.1: PD1087 PD110: ENDF/B-V.2: PD1105 ENDF/B-VII.0: PD1107 ENDF/B-VII.1: PD1107 PM147: ENDF/B-V.2: PM1475 ENDF/B-VII.0: PM1477 ENDF/B-VII.1: PM1477 PM148: ENDF/B-V.2: PM1485 ENDF/B-VII.0: PM1487 ENDF/B-VII.1: PM1487 PM148M: ENDF/B-V.2: PM148M ENDF/B-VII.0: PM48M7 ENDF/B-VII.1: PM48M7 PM149: ENDF/B-V.2: PM1495 ENDF/B-VII.0: PM1497 ENDF/B-VII.1: PM1497 PM151: ENDF/B-V.2: PM1515 ENDF/B-VII.0: PM1517 ENDF/B-VII.1: PM1517 PR141: ENDF/B-V.2: PR1415 ENDF/B-VII.0: PR1417 ENDF/B-VII.1: PR1417 PR142: ENDF/B-V.2: PR1425 ENDF/B-VII.0: PR1427 ENDF/B-VII.1: PR1427 PR143: ENDF/B-V.2: PR1435 ENDF/B-VII.0: PR1437 ENDF/B-VII.1: PR1437 PU236: ENDF/B-V.2: PU2365 ENDF/B-VII.0: PU2367 ENDF/B-VII.1: PU2367 PU237: ENDF/B-V.2: PU2375 ENDF/B-VII.0: PU2377 ENDF/B-VII.1: PU2377 PU238: ENDF/B-V.2: PU2385 ENDF/B-VII.0: PU2387 ENDF/B-VII.1: PU2387 PU239: ENDF/B-V.2: PU239V ENDF/B-VII.0: PU2397 ENDF/B-VII.1: PU2397 PU240: ENDF/B-V.2: PU2405 ENDF/B-VII.0: PU2407 ENDF/B-VII.1: PU2407 PU241: ENDF/B-V.2: PU2415 ENDF/B-VII.0: PU2417 ENDF/B-VII.1: PU2417 PU242: ENDF/B-V.2: PU2425 ENDF/B-VII.0: PU2427 ENDF/B-VII.1: PU2427 PU243: ENDF/B-V.2: PU2435 ENDF/B-VII.0: PU2437 ENDF/B-VII.1: PU2437 PU244: ENDF/B-V.2: PU2445 ENDF/B-VII.0: PU2447 ENDF/B-VII.1: PU2447 PU246: ENDF/B-V.2: null ENDF/B-VII.0: PU2467 ENDF/B-VII.1: PU2467 RA223: ENDF/B-V.2: null ENDF/B-VII.0: RA2237 ENDF/B-VII.1: RA2237 RA224: ENDF/B-V.2: null ENDF/B-VII.0: RA2247 ENDF/B-VII.1: RA2247 RA225: ENDF/B-V.2: null ENDF/B-VII.0: RA2257 ENDF/B-VII.1: RA2257 RA226: ENDF/B-V.2: null ENDF/B-VII.0: RA2267 ENDF/B-VII.1: RA2267 RB85: ENDF/B-V.2: RB85 5 ENDF/B-VII.0: RB85_7 ENDF/B-VII.1: RB85_7 RB86: ENDF/B-V.2: RB86 5 ENDF/B-VII.0: RB86_7 ENDF/B-VII.1: RB86_7 RB87: ENDF/B-V.2: RB87 5 ENDF/B-VII.0: RB87_7 ENDF/B-VII.1: RB87_7 RE185: ENDF/B-V.2: RE1855 ENDF/B-VII.0: RE1857 ENDF/B-VII.1: RE1857 RE187: ENDF/B-V.2: RE1875 ENDF/B-VII.0: RE1877 ENDF/B-VII.1: RE1877 RH103: ENDF/B-V.2: RH1035 ENDF/B-VII.0: RH1037 ENDF/B-VII.1: RH1037 RH105: ENDF/B-V.2: RH1055 ENDF/B-VII.0: RH1057 ENDF/B-VII.1: RH1057 RU100: ENDF/B-V.2: RU1005 ENDF/B-VII.0: RU1007 ENDF/B-VII.1: RU1007 RU101: ENDF/B-V.2: RU1015 ENDF/B-VII.0: RU1017 ENDF/B-VII.1: RU1017 RU102: ENDF/B-V.2: RU1025 ENDF/B-VII.0: RU1027 ENDF/B-VII.1: RU1027 RU103: ENDF/B-V.2: RU1035 ENDF/B-VII.0: RU1037 ENDF/B-VII.1: RU1037 RU104: ENDF/B-V.2: RU1045 ENDF/B-VII.0: RU1047 ENDF/B-VII.1: RU1047 RU105: ENDF/B-V.2: RU1055 ENDF/B-VII.0: RU1057 ENDF/B-VII.1: RU1057 RU106: ENDF/B-V.2: RU1065 ENDF/B-VII.0: RU1067 ENDF/B-VII.1: RU1067 RU96: ENDF/B-V.2: RU96 5 ENDF/B-VII.0: RU96_7 ENDF/B-VII.1: RU96_7 RU98: ENDF/B-V.2: RU98 5 ENDF/B-VII.0: RU98_7 ENDF/B-VII.1: RU98_7 RU99: ENDF/B-V.2: RU99 5 ENDF/B-VII.0: RU99_7 ENDF/B-VII.1: RU99_7 S: ENDF/B-V.2: S 5 ENDF/B-VII.0: null ENDF/B-VII.1: null S32: ENDF/B-V.2: S-32 5 ENDF/B-VII.0: S32__7 ENDF/B-VII.1: S32__7 S33: ENDF/B-V.2: null ENDF/B-VII.0: S33__7 ENDF/B-VII.1: S33__7 S34: ENDF/B-V.2: null ENDF/B-VII.0: S34__7 ENDF/B-VII.1: S34__7 S36: ENDF/B-V.2: null ENDF/B-VII.0: S36__7 ENDF/B-VII.1: S36__7 SB121: ENDF/B-V.2: SB1215 ENDF/B-VII.0: SB1217 ENDF/B-VII.1: SB1217 SB123: ENDF/B-V.2: SB1235 ENDF/B-VII.0: SB1237 ENDF/B-VII.1: SB1237 SB124: ENDF/B-V.2: SB1245 ENDF/B-VII.0: SB1247 ENDF/B-VII.1: SB1247 SB125: ENDF/B-V.2: SB1255 ENDF/B-VII.0: SB1257 ENDF/B-VII.1: SB1257 SB126: ENDF/B-V.2: SB1265 ENDF/B-VII.0: SB1267 ENDF/B-VII.1: SB1267 SC45: ENDF/B-V.2: null ENDF/B-VII.0: SC45_7 ENDF/B-VII.1: SC45_7 SE74: ENDF/B-V.2: SE74 5 ENDF/B-VII.0: SE74_7 ENDF/B-VII.1: SE74_7 SE76: ENDF/B-V.2: SE76 5 ENDF/B-VII.0: SE76_7 ENDF/B-VII.1: SE76_7 SE77: ENDF/B-V.2: SE77 5 ENDF/B-VII.0: SE77_7 ENDF/B-VII.1: SE77_7 SE78: ENDF/B-V.2: SE78 5 ENDF/B-VII.0: SE78_7 ENDF/B-VII.1: SE78_7 SE79: ENDF/B-V.2: null ENDF/B-VII.0: SE79_7 ENDF/B-VII.1: SE79_7 SE80: ENDF/B-V.2: SE80 5 ENDF/B-VII.0: SE80_7 ENDF/B-VII.1: SE80_7 SE82: ENDF/B-V.2: SE82 5 ENDF/B-VII.0: SE82_7 ENDF/B-VII.1: SE82_7 SI: ENDF/B-V.2: SI 5 ENDF/B-VII.0: null ENDF/B-VII.1: null SI28: ENDF/B-V.2: null ENDF/B-VII.0: SI28_7 ENDF/B-VII.1: SI28_7 SI29: ENDF/B-V.2: null ENDF/B-VII.0: SI29_7 ENDF/B-VII.1: SI29_7 SI30: ENDF/B-V.2: null ENDF/B-VII.0: SI30_7 ENDF/B-VII.1: SI30_7 SM144: ENDF/B-V.2: SM1445 ENDF/B-VII.0: SM1447 ENDF/B-VII.1: SM1447 SM147: ENDF/B-V.2: SM1475 ENDF/B-VII.0: SM1477 ENDF/B-VII.1: SM1477 SM148: ENDF/B-V.2: SM1485 ENDF/B-VII.0: SM1487 ENDF/B-VII.1: SM1487 SM149: ENDF/B-V.2: SM1495 ENDF/B-VII.0: SM1497 ENDF/B-VII.1: SM1497 SM150: ENDF/B-V.2: SM1505 ENDF/B-VII.0: SM1507 ENDF/B-VII.1: SM1507 SM151: ENDF/B-V.2: SM1515 ENDF/B-VII.0: SM1517 ENDF/B-VII.1: SM1517 SM152: ENDF/B-V.2: SM1525 ENDF/B-VII.0: SM1527 ENDF/B-VII.1: SM1527 SM153: ENDF/B-V.2: SM1535 ENDF/B-VII.0: SM1537 ENDF/B-VII.1: SM1537 SM154: ENDF/B-V.2: SM1545 ENDF/B-VII.0: SM1547 ENDF/B-VII.1: SM1547 SN112: ENDF/B-V.2: SN1125 ENDF/B-VII.0: SN1127 ENDF/B-VII.1: SN1127 SN113: ENDF/B-V.2: null ENDF/B-VII.0: SN1137 ENDF/B-VII.1: SN1137 SN114: ENDF/B-V.2: SN1145 ENDF/B-VII.0: SN1147 ENDF/B-VII.1: SN1147 SN115: ENDF/B-V.2: SN1155 ENDF/B-VII.0: SN1157 ENDF/B-VII.1: SN1157 SN116: ENDF/B-V.2: SN1165 ENDF/B-VII.0: SN1167 ENDF/B-VII.1: SN1167 SN117: ENDF/B-V.2: SN1175 ENDF/B-VII.0: SN1177 ENDF/B-VII.1: SN1177 SN118: ENDF/B-V.2: SN1185 ENDF/B-VII.0: SN1187 ENDF/B-VII.1: SN1187 SN119: ENDF/B-V.2: SN1195 ENDF/B-VII.0: SN1197 ENDF/B-VII.1: SN1197 SN120: ENDF/B-V.2: SN1205 ENDF/B-VII.0: SN1207 ENDF/B-VII.1: SN1207 SN122: ENDF/B-V.2: SN1225 ENDF/B-VII.0: SN1227 ENDF/B-VII.1: SN1227 SN123: ENDF/B-V.2: SN1235 ENDF/B-VII.0: SN1237 ENDF/B-VII.1: SN1237 SN124: ENDF/B-V.2: SN1245 ENDF/B-VII.0: SN1247 ENDF/B-VII.1: SN1247 SN125: ENDF/B-V.2: SN1255 ENDF/B-VII.0: SN1257 ENDF/B-VII.1: SN1257 SN126: ENDF/B-V.2: SN1265 ENDF/B-VII.0: SN1267 ENDF/B-VII.1: SN1267 SR84: ENDF/B-V.2: SR84 5 ENDF/B-VII.0: SR84_7 ENDF/B-VII.1: SR84_7 SR86: ENDF/B-V.2: SR86 5 ENDF/B-VII.0: SR86_7 ENDF/B-VII.1: SR86_7 SR87: ENDF/B-V.2: SR87 5 ENDF/B-VII.0: SR87_7 ENDF/B-VII.1: SR87_7 SR88: ENDF/B-V.2: SR88 5 ENDF/B-VII.0: SR88_7 ENDF/B-VII.1: SR88_7 SR89: ENDF/B-V.2: SR89 5 ENDF/B-VII.0: SR89_7 ENDF/B-VII.1: SR89_7 SR90: ENDF/B-V.2: SR90 5 ENDF/B-VII.0: SR90_7 ENDF/B-VII.1: SR90_7 TA180: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: TA1807 TA181: ENDF/B-V.2: TA1815 ENDF/B-VII.0: TA1817 ENDF/B-VII.1: TA1817 TA182: ENDF/B-V.2: TA1825 ENDF/B-VII.0: TA1827 ENDF/B-VII.1: TA1827 TB159: ENDF/B-V.2: TB1595 ENDF/B-VII.0: TB1597 ENDF/B-VII.1: TB1597 TB160: ENDF/B-V.2: TB1605 ENDF/B-VII.0: TB1607 ENDF/B-VII.1: TB1607 TC99: ENDF/B-V.2: TC99 5 ENDF/B-VII.0: TC99_7 ENDF/B-VII.1: TC99_7 TE120: ENDF/B-V.2: TE1205 ENDF/B-VII.0: TE1207 ENDF/B-VII.1: TE1207 TE122: ENDF/B-V.2: TE1225 ENDF/B-VII.0: TE1227 ENDF/B-VII.1: TE1227 TE123: ENDF/B-V.2: TE1235 ENDF/B-VII.0: TE1237 ENDF/B-VII.1: TE1237 TE124: ENDF/B-V.2: TE1245 ENDF/B-VII.0: TE1247 ENDF/B-VII.1: TE1247 TE125: ENDF/B-V.2: TE1255 ENDF/B-VII.0: TE1257 ENDF/B-VII.1: TE1257 TE126: ENDF/B-V.2: TE1265 ENDF/B-VII.0: TE1267 ENDF/B-VII.1: TE1267 TE127M: ENDF/B-V.2: TE127M ENDF/B-VII.0: TE27M7 ENDF/B-VII.1: TE27M7 TE128: ENDF/B-V.2: TE1285 ENDF/B-VII.0: TE1287 ENDF/B-VII.1: TE1287 TE129M: ENDF/B-V.2: TE129M ENDF/B-VII.0: TE29M7 ENDF/B-VII.1: TE29M7 TE130: ENDF/B-V.2: TE1305 ENDF/B-VII.0: TE1307 ENDF/B-VII.1: TE1307 TE132: ENDF/B-V.2: TE1325 ENDF/B-VII.0: TE1327 ENDF/B-VII.1: TE1327 TH227: ENDF/B-V.2: null ENDF/B-VII.0: TH2277 ENDF/B-VII.1: TH2277 TH228: ENDF/B-V.2: null ENDF/B-VII.0: TH2287 ENDF/B-VII.1: TH2287 TH229: ENDF/B-V.2: null ENDF/B-VII.0: TH2297 ENDF/B-VII.1: TH2297 TH230: ENDF/B-V.2: TH2305 ENDF/B-VII.0: TH2307 ENDF/B-VII.1: TH2307 TH231: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: TH2317 TH232: ENDF/B-V.2: TH2325 ENDF/B-VII.0: TH2327 ENDF/B-VII.1: TH2327 TH233: ENDF/B-V.2: null ENDF/B-VII.0: TH2337 ENDF/B-VII.1: TH2337 TH234: ENDF/B-V.2: null ENDF/B-VII.0: TH2347 ENDF/B-VII.1: TH2347 TI: ENDF/B-V.2: TI 5 ENDF/B-VII.0: null ENDF/B-VII.1: null TI46: ENDF/B-V.2: null ENDF/B-VII.0: TI46_7 ENDF/B-VII.1: TI46_7 TI47: ENDF/B-V.2: null ENDF/B-VII.0: TI47_7 ENDF/B-VII.1: TI47_7 TI48: ENDF/B-V.2: null ENDF/B-VII.0: TI48_7 ENDF/B-VII.1: TI48_7 TI49: ENDF/B-V.2: null ENDF/B-VII.0: TI49_7 ENDF/B-VII.1: TI49_7 TI50: ENDF/B-V.2: null ENDF/B-VII.0: TI50_7 ENDF/B-VII.1: TI50_7 TM168: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: TM1687 TM169: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: TM1697 TM170: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: TM1707 TL203: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: TL2037 TL205: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: TL2057 U230: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: U230_7 U231: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: U231_7 U232: ENDF/B-V.2: U-2325 ENDF/B-VII.0: U232_7 ENDF/B-VII.1: U232_7 U233: ENDF/B-V.2: U-2335 ENDF/B-VII.0: U233_7 ENDF/B-VII.1: U233_7 U234: ENDF/B-V.2: U-2345 ENDF/B-VII.0: U234_7 ENDF/B-VII.1: U234_7 U235: ENDF/B-V.2: U-2355 ENDF/B-VII.0: U235_7 ENDF/B-VII.1: U235_7 U236: ENDF/B-V.2: U-2365 ENDF/B-VII.0: U236_7 ENDF/B-VII.1: U236_7 U237: ENDF/B-V.2: U-2375 ENDF/B-VII.0: U237_7 ENDF/B-VII.1: U237_7 U238: ENDF/B-V.2: U-2385 ENDF/B-VII.0: U238_7 ENDF/B-VII.1: U238_7 U239: ENDF/B-V.2: null ENDF/B-VII.0: U239_7 ENDF/B-VII.1: U239_7 U240: ENDF/B-V.2: null ENDF/B-VII.0: U240_7 ENDF/B-VII.1: U240_7 U241: ENDF/B-V.2: null ENDF/B-VII.0: U241_7 ENDF/B-VII.1: U241_7 V: ENDF/B-V.2: V 5 ENDF/B-VII.0: V____7 ENDF/B-VII.1: null V50: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: V50__7 V51: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: V51__7 W180: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: W180_7 W182: ENDF/B-V.2: W-182V ENDF/B-VII.0: W182_7 ENDF/B-VII.1: W182_7 W183: ENDF/B-V.2: W-183V ENDF/B-VII.0: W183_7 ENDF/B-VII.1: W183_7 W184: ENDF/B-V.2: W-184V ENDF/B-VII.0: W184_7 ENDF/B-VII.1: W184_7 W186: ENDF/B-V.2: W-186V ENDF/B-VII.0: W186_7 ENDF/B-VII.1: W186_7 XE123: ENDF/B-V.2: null ENDF/B-VII.0: XE1237 ENDF/B-VII.1: XE1237 XE124: ENDF/B-V.2: XE1245 ENDF/B-VII.0: XE1247 ENDF/B-VII.1: XE1247 XE126: ENDF/B-V.2: XE1265 ENDF/B-VII.0: XE1267 ENDF/B-VII.1: XE1267 XE128: ENDF/B-V.2: XE1285 ENDF/B-VII.0: XE1287 ENDF/B-VII.1: XE1287 XE129: ENDF/B-V.2: XE1295 ENDF/B-VII.0: XE1297 ENDF/B-VII.1: XE1297 XE130: ENDF/B-V.2: XE1305 ENDF/B-VII.0: XE1307 ENDF/B-VII.1: XE1307 XE131: ENDF/B-V.2: XE1315 ENDF/B-VII.0: XE1317 ENDF/B-VII.1: XE1317 XE132: ENDF/B-V.2: XE1325 ENDF/B-VII.0: XE1327 ENDF/B-VII.1: XE1327 XE133: ENDF/B-V.2: XE1335 ENDF/B-VII.0: XE1337 ENDF/B-VII.1: XE1337 XE134: ENDF/B-V.2: XE1345 ENDF/B-VII.0: XE1347 ENDF/B-VII.1: XE1347 XE135: ENDF/B-V.2: XE1355 ENDF/B-VII.0: XE1357 ENDF/B-VII.1: XE1357 XE136: ENDF/B-V.2: XE1365 ENDF/B-VII.0: XE1367 ENDF/B-VII.1: XE1367 Y89: ENDF/B-V.2: Y89 5 ENDF/B-VII.0: Y89__7 ENDF/B-VII.1: Y89__7 Y90: ENDF/B-V.2: Y90 5 ENDF/B-VII.0: null ENDF/B-VII.1: Y90__7 Y91: ENDF/B-V.2: Y91 5 ENDF/B-VII.0: Y91__7 ENDF/B-VII.1: Y91__7 ZN: ENDF/B-V.2: null ENDF/B-VII.0: ZN___7 ENDF/B-VII.1: null ZN64: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: ZN64_7 ZN65: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: ZN65_7 ZN66: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: ZN66_7 ZN67: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: ZN67_7 ZN68: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: ZN68_7 ZN70: ENDF/B-V.2: null ENDF/B-VII.0: null ENDF/B-VII.1: ZN70_7 ZR: ENDF/B-V.2: ZIRCSV ENDF/B-VII.0: null ENDF/B-VII.1: null ZR90: ENDF/B-V.2: ZR90SV ENDF/B-VII.0: ZR90_7 ENDF/B-VII.1: ZR90_7 ZR91: ENDF/B-V.2: ZR91SV ENDF/B-VII.0: ZR91_7 ENDF/B-VII.1: ZR91_7 ZR92: ENDF/B-V.2: ZR92SV ENDF/B-VII.0: ZR92_7 ENDF/B-VII.1: ZR92_7 ZR93: ENDF/B-V.2: ZR93 5 ENDF/B-VII.0: ZR93_7 ENDF/B-VII.1: ZR93_7 ZR94: ENDF/B-V.2: ZR94SV ENDF/B-VII.0: ZR94_7 ENDF/B-VII.1: ZR94_7 ZR95: ENDF/B-V.2: ZR95 5 ENDF/B-VII.0: ZR95_7 ENDF/B-VII.1: ZR95_7 ZR96: ENDF/B-V.2: ZR96 5 ENDF/B-VII.0: ZR96_7 ENDF/B-VII.1: ZR96_7 ================================================ FILE: armi/runLog.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module handles logging of console during a simulation. The default way of calling and the global armi logger is to just import it: .. code:: from armi import runLog You may want a logger specific to a single module, say to provide debug logging for only one module. That functionality is provided by a global override of logging imports: .. code:: import logging runLog = logging.getLogger(__name__) In either case, you can then log things the same way: .. code:: runLog.info('information here') runLog.error('extra error info here') raise SomeException # runLog.error() implies that the code will crash! Or change the log level the same way: .. code:: runLog.setVerbosity('debug') """ import collections import logging import operator import os import sys import time from glob import glob from armi import context # global constants _ADD_LOG_METHOD_STR = """def {0}(self, message, *args, **kws): if self.isEnabledFor({1}): self._log({1}, message, args, **kws) logging.Logger.{0} = {0}""" OS_SECONDS_TIMEOUT = 2 * 60 SEP = "|" STDERR_LOGGER_NAME = "ARMI_ERROR" STDOUT_LOGGER_NAME = "ARMI" class _RunLog: """ Handles all the logging. For the parent process, things are allowed to print to stdout and stderr, but the stdout prints are formatted like log statements. For the child processes, everything is piped to log files. """ STDERR_NAME = "{0}.{1:04d}.stderr" STDOUT_NAME = "{0}.{1:04d}.stdout" def __init__(self, mpiRank=0): """ Build a log object. Parameters ---------- mpiRank : int If this is zero, we are in the parent process, otherwise child process. This should not be adjusted after instantiation. """ self._mpiRank = mpiRank self._verbosity = logging.INFO self.initialErr = None self.logLevels = None self._logLevelNumbers = [] self.logger = None self.stderrLogger = None self.setNullLoggers() self._setLogLevels() def setNullLoggers(self): """Helper method to set both of our loggers to Null handlers.""" self.logger = NullLogger("NULL") self.stderrLogger = NullLogger("NULL2", isStderr=True) @staticmethod def getLogLevels(mpiRank): """Helper method to build an important data object this class needs. Parameters ---------- mpiRank : int If this is zero, we are in the parent process, otherwise child process. This should not be adjusted after instantiation. """ rank = "" if mpiRank == 0 else f"-{mpiRank:>03d}" # NOTE: using ordereddict so we can get right order of options in GUI return collections.OrderedDict( [ ("debug", (logging.DEBUG, f"[dbug{rank}] ")), ("extra", (15, f"[xtra{rank}] ")), ("info", (logging.INFO, f"[info{rank}] ")), ("important", (25, f"[impt{rank}] ")), ("prompt", (27, f"[prmt{rank}] ")), ("warning", (logging.WARNING, f"[warn{rank}] ")), ("error", (logging.ERROR, f"[err {rank}] ")), ("header", (100, f"{rank}")), ] ) @staticmethod def getWhiteSpace(mpiRank): """Helper method to build the white space used to left-adjust the log lines. Parameters ---------- mpiRank : int If this is zero, we are in the parent process, otherwise child process. This should not be adjusted after instantiation. """ logLevels = _RunLog.getLogLevels(mpiRank) return " " * len(max([ll[1] for ll in logLevels.values()])) def _setLogLevels(self): """Here we fill the logLevels dict with custom strings that depend on the MPI rank.""" self.logLevels = self.getLogLevels(self._mpiRank) self._logLevelNumbers = sorted([ll[0] for ll in self.logLevels.values()]) # modify the logging module strings for printing for longLogString, (logValue, shortLogString) in self.logLevels.items(): # add the log string name (upper and lower) to logging module logging.addLevelName(logValue, shortLogString.upper()) logging.addLevelName(logValue, shortLogString) # ensure that we add any custom logging levels as constants to the module, e.g. logging.HEADER try: getattr(logging, longLogString.upper()) except AttributeError: setattr(logging, longLogString.upper(), logValue) # Add logging methods for our new custom levels: LOG.extra("message") try: getattr(logging, longLogString) except AttributeError: exec(_ADD_LOG_METHOD_STR.format(longLogString, logValue)) def log(self, msgType, msg, single=False, label=None, **kwargs): """ This is a wrapper around logger.log() that does most of the work and is used by all message passers (e.g. info, warning, etc.). In this situation, we do the mangling needed to get the log level to the correct number. And we do some custom string manipulation so we can handle de-duplicating warnings. """ # Determine the log level: users can optionally pass in custom strings ("debug") msgLevel = msgType if isinstance(msgType, int) else self.logLevels[msgType][0] # If this is a special "don't duplicate me" string, we need to add that info to the msg temporarily msg = str(msg) # Do the actual logging self.logger.log(msgLevel, msg, single=single, label=label) def getDuplicatesFilter(self): """If it exists, find the top-level ARMI logger 'should have a no duplicates' filter.""" if not self.logger or not isinstance(self.logger, logging.Logger): return None return self.logger.getDuplicatesFilter() def clearSingleLogs(self): """Reset the list of de-duplicated warnings, so users can see those warnings again.""" dupsFilter = self.getDuplicatesFilter() if dupsFilter: dupsFilter.singleMessageLabels.clear() def warningReport(self): """Summarize all warnings for the run.""" self.logger.warningReport() def getLogVerbosityRank(self, level): """Return integer verbosity rank given the string verbosity name.""" try: return self.logLevels[level][0] except KeyError: log_strs = list(self.logLevels.keys()) raise KeyError(f"{level} is not a valid verbosity level: {log_strs}") def setVerbosity(self, level): """ Sets the minimum output verbosity for the logger. Any message with a higher verbosity than this will be emitted. Parameters ---------- level : int or str The level to set the log output verbosity to. Valid numbers are 0-50 and valid strings are keys of logLevels Examples -------- >>> setVerbosity('debug') -> sets to 0 >>> setVerbosity(0) -> sets to 0 """ # first, we have to get a valid integer from the input level if isinstance(level, str): self._verbosity = self.getLogVerbosityRank(level) elif isinstance(level, int): # The logging module does strange things if you set the log level to something other # than DEBUG, INFO, etc. So, if someone tries, we HAVE to set the log level at a # canonical value. Otherwise, nearly all log statements will be silently dropped. if level in self._logLevelNumbers: self._verbosity = level elif level < self._logLevelNumbers[0]: self._verbosity = self._logLevelNumbers[0] else: for i in range(len(self._logLevelNumbers) - 1, -1, -1): if level >= self._logLevelNumbers[i]: self._verbosity = self._logLevelNumbers[i] break else: raise TypeError(f"Invalid verbosity rank {level}.") # Finally, set the log level if self.logger is not None: for handler in self.logger.handlers: handler.setLevel(self._verbosity) self.logger.setLevel(self._verbosity) def getVerbosity(self): """Return the global runLog verbosity.""" return self._verbosity def restoreStandardStreams(self): """Set the system stderr back to its default (as it was when the run started).""" if self.initialErr is not None and self._mpiRank > 0: sys.stderr = self.initialErr def startLog(self, name): """Initialize the streams when parallel processing.""" # open the main logger self.logger = logging.getLogger(STDOUT_LOGGER_NAME + SEP + name + SEP + str(self._mpiRank)) # if there was a pre-existing _verbosity, use it now if self._verbosity != logging.INFO: self.setVerbosity(self._verbosity) if self._mpiRank != 0: # init stderr intercepting logging filePath = os.path.join(getLogDir(), _RunLog.STDERR_NAME.format(name, self._mpiRank)) self.stderrLogger = logging.getLogger(STDERR_LOGGER_NAME) h = logging.FileHandler(filePath, delay=True) fmt = "%(message)s" form = logging.Formatter(fmt) h.setFormatter(form) h.setLevel(logging.WARNING) self.stderrLogger.handlers = [h] self.stderrLogger.setLevel(logging.WARNING) # force the error logger onto stderr self.initialErr = sys.stderr sys.stderr = self.stderrLogger def getLogDir(): """This returns a file path for the `logs` directory, first checking if the user set the ARMI_TEMP_ROOT_PATH environment variable. """ if os.environ.get("ARMI_TEMP_ROOT_PATH"): return os.path.join(os.environ["ARMI_TEMP_ROOT_PATH"], "logs") else: return os.path.join(os.getcwd(), "logs") def close(mpiRank=None): """End use of the log. Concatenate if needed and restore defaults.""" mpiRank = context.MPI_RANK if mpiRank is None else mpiRank if mpiRank == 0: try: concatenateLogs() except IOError as ee: warning("Failed to concatenate logs due to IOError.") error(ee) else: if LOG.stderrLogger: _ = [h.close() for h in LOG.stderrLogger.handlers] if LOG.logger: _ = [h.close() for h in LOG.logger.handlers] LOG.setNullLoggers() LOG.restoreStandardStreams() def concatenateLogs(logDir=None): """ Concatenate the armi run logs and delete them. Should only ever be called by parent. .. impl:: Log files from different processes are combined. :id: I_ARMI_LOG_MPI :implements: R_ARMI_LOG_MPI The log files are plain text files. Since ARMI is frequently run in parallel, the situation arises where each ARMI process generates its own plain text log file. This function combines the separate log files, per process, into one log file. The files are written in numerical order, with the lead process stdout first then the lead process stderr. Then each other process is written to the combined file, in order, stdout then stderr. Finally, the original stdout and stderr files are deleted. """ if logDir is None: logDir = getLogDir() # find all the logging-module-based log files stdoutFiles = sorted(glob(os.path.join(logDir, "*.stdout"))) if not len(stdoutFiles): info("No log files found to concatenate.") return info(f"Concatenating {len(stdoutFiles)} log files") # default worker log name if none is found caseTitle = "armi-workers" for stdoutPath in stdoutFiles: stdoutFile = os.path.normpath(stdoutPath).split(os.sep)[-1] prefix = STDOUT_LOGGER_NAME + "." if stdoutFile[0 : len(prefix)] == prefix: candidate = stdoutFile.split(".")[-3] if len(candidate) > 0: caseTitle = candidate break combinedLogName = os.path.join(logDir, f"{caseTitle}-mpi.log") with open(combinedLogName, "w") as workerLog: workerLog.write("\n{0} CONCATENATED WORKER LOG FILES {1}\n".format("-" * 10, "-" * 10)) for stdoutName in stdoutFiles: # NOTE: If the log file name format changes, this will need to change. rank = int(stdoutName.split(".")[-2]) with open(stdoutName, "r") as logFile: data = logFile.read() # only write if there's something to write if data: rankId = "\n{0} RANK {1:03d} STDOUT {2}\n".format("-" * 10, rank, "-" * 60) if rank == 0: print(rankId, file=sys.stdout) print(data, file=sys.stdout) else: workerLog.write(rankId) workerLog.write(data) try: os.remove(stdoutName) except OSError: warning(f"Could not delete {stdoutName}") # then print the stderr messages for that child process stderrName = stdoutName[:-3] + "err" if os.path.exists(stderrName): with open(stderrName) as logFile: data = logFile.read() if data: # only write if there's something to write. rankId = "\n{0} RANK {1:03d} STDERR {2}\n".format("-" * 10, rank, "-" * 60) print(rankId, file=sys.stderr) print(data, file=sys.stderr) try: os.remove(stderrName) except OSError: warning(f"Could not delete {stderrName}") # Here are all the module-level functions that should be used for most outputs. They use the Log # object behind the scenes. def raw(msg): """Print raw text without any special functionality.""" LOG.log("header", msg, single=False) def extra(msg, single=False, label=None): LOG.log("extra", msg, single=single, label=label) def debug(msg, single=False, label=None): LOG.log("debug", msg, single=single, label=label) def info(msg, single=False, label=None): LOG.log("info", msg, single=single, label=label) def important(msg, single=False, label=None): LOG.log("important", msg, single=single, label=label) def warning(msg, single=False, label=None): LOG.log("warning", msg, single=single, label=label) def error(msg, single=False, label=None): LOG.log("error", msg, single=single, label=label) def header(msg, single=False, label=None): LOG.log("header", msg, single=single, label=label) def warningReport(): LOG.warningReport() def setVerbosity(level): LOG.setVerbosity(level) def getVerbosity(): return LOG.getVerbosity() class DeduplicationFilter(logging.Filter): """ Important logging filter. * allow users to turn off duplicate warnings * handles special indentation rules for our logs """ def __init__(self, *args, **kwargs): logging.Filter.__init__(self, *args, **kwargs) self.singleMessageLabels = set() self.warningCounts = {} def filter(self, record): # determine if this is a "do not duplicate" message msg = str(record.msg) single = getattr(record, "single", False) # grab the label if it exist, otherwise use the message itself as the label label = getattr(record, "label", msg) label = msg if label is None else label # Track all warnings, for warning report if record.levelno in (logging.WARNING, logging.CRITICAL): if label not in self.warningCounts: self.warningCounts[label] = 1 else: self.warningCounts[label] += 1 if single: return False # If the message is set to "do not duplicate" we may filter it out if single: # in sub-warning cases, hash the label, for faster lookup label = hash(label) if label not in self.singleMessageLabels: self.singleMessageLabels.add(label) else: return False # Handle some special string-mangling we want to do, for multi-line messages whiteSpace = _RunLog.getWhiteSpace(context.MPI_RANK) record.msg = msg.rstrip().replace("\n", "\n" + whiteSpace) return True class RunLogger(logging.Logger): """Custom Logger to support our specific desires. 1. Giving users the option to de-duplicate warnings 2. Piping stderr to a log file .. impl:: A simulation-wide log, with user-specified verbosity. :id: I_ARMI_LOG :implements: R_ARMI_LOG Log statements are any text a user wants to record during a run. For instance, basic notifications of what is happening in the run, simple warnings, or hard errors. Every log message has an associated log level, controlled by the "verbosity" of the logging statement in the code. In the ARMI codebase, you can see many examples of logging: .. code-block:: python runLog.error("This sort of error might usually terminate the run.") runLog.warning("Users probably want to know.") runLog.info("This is the usual verbosity.") runLog.debug("This is only logged during a debug run.") The full list of logging levels is defined in ``_RunLog.getLogLevels()``, and the developer specifies the verbosity of a run via ``_RunLog.setVerbosity()``. At the end of the ARMI-based simulation, the analyst will have a full record of potentially interesting information they can use to understand their run. .. impl:: Logging is done to the screen and to file. :id: I_ARMI_LOG_IO :implements: R_ARMI_LOG_IO This logger makes it easy for users to add log statements to and ARMI application, and ARMI will control the flow of those log statements. In particular, ARMI overrides the normal Python logging tooling, to allow developers to pipe their log statements to both screen and file. This works for stdout and stderr. At any place in the ARMI application, developers can interject a plain text logging message, and when that code is hit during an ARMI simulation, the text will be piped to screen and a log file. By default, the ``logging`` module only logs to screen, but ARMI adds a ``FileHandler`` in the ``RunLog`` constructor and in ``_RunLog.startLog``. """ FMT = "%(levelname)s%(message)s" # This is being set as a class attribute so it only runs once, before the class is initialized. For some bespoke # MPI use cases, calling the function when setting the `filePath` causes issues. This sidesteps the problem. LOG_DIR = getLogDir() def __init__(self, *args, **kwargs): # optionally, the user can pass in the MPI_RANK by putting it in the logger name after a separator string # args[0].split(SEP): 0 = "ARMI", 1 = caseTitle, 2 = MPI_RANK if SEP in args[0]: mpiRank = int(args[0].split(SEP)[-1].strip()) args = (".".join(args[0].split(SEP)[0:2]),) else: mpiRank = context.MPI_RANK logging.Logger.__init__(self, *args, **kwargs) self.allowStopDuplicates() if mpiRank == 0: handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.INFO) self.setLevel(logging.INFO) else: filePath = os.path.join(RunLogger.LOG_DIR, _RunLog.STDOUT_NAME.format(args[0], mpiRank)) handler = logging.FileHandler(filePath, delay=True) handler.setLevel(logging.WARNING) self.setLevel(logging.WARNING) form = logging.Formatter(RunLogger.FMT) handler.setFormatter(form) self.addHandler(handler) def log(self, msgType, msg, single=False, label=None, *args, **kwargs): """ This is a wrapper around logger.log() that does most of the work. This is used by all message passers (e.g. info, warning, etc.). In this situation, we do the mangling needed to get the log level to the correct number. And we do some custom string manipulation so we can handle de-duplicating warnings. """ # Determine the log level: users can optionally pass in custom strings ("debug") msgLevel = msgType if isinstance(msgType, int) else LOG.logLevels[msgType][0] # Do the actual logging logging.Logger.log(self, msgLevel, str(msg), extra={"single": single, "label": label}) def _log(self, *args, **kwargs): """ Wrapper around the standard library Logger._log() method. The primary goal here is to allow us to support the deduplication of warnings. Notes ----- All of the ``*args`` and ``**kwargs`` logic here are mandatory, as the standard library implementation of this method changed the number of kwargs between Python v3.4 and v3.9. """ # we need 'extra' as an output keyword, even if empty if "extra" not in kwargs: kwargs["extra"] = {} # make sure to populate the single/label data for de-duplication if "single" not in kwargs["extra"]: msg = args[1] single = kwargs.pop("single", False) label = kwargs.pop("label", None) label = msg if label is None else label kwargs["extra"]["single"] = single kwargs["extra"]["label"] = label logging.Logger._log(self, *args, **kwargs) def allowStopDuplicates(self): """Helper method to allow us to safely add the deduplication filter at any time.""" for f in self.filters: if isinstance(f, DeduplicationFilter): return self.addFilter(DeduplicationFilter()) def write(self, msg, **kwargs): """The redirect method that allows to do stderr piping.""" self.error(msg) def flush(self, *args, **kwargs): """Stub, purely to allow stderr piping.""" pass def close(self): """Helper method, to shutdown and delete a Logger.""" self.handlers.clear() del self def getDuplicatesFilter(self): """This object should have a no-duplicates filter. If it exists, find it.""" for f in self.filters: if isinstance(f, DeduplicationFilter): return f return None def warningReport(self): """Summarize all warnings for the run.""" self.info("----- Final Warning Count --------") self.info(" {0:^10s} {1:^25s}".format("COUNT", "LABEL")) # grab the no-duplicates filter, and exit early if it doesn't exist dupsFilter = self.getDuplicatesFilter() if dupsFilter is None: self.info(" {0:^10s} {1:^25s}".format(str(0), str("None Found"))) self.info("------------------------------------") return # sort by labcollections.defaultdict(lambda: 1) total = 0 for label, count in sorted(dupsFilter.warningCounts.items(), key=operator.itemgetter(1), reverse=True): self.info(f" {str(count):^10s} {str(label):^25s}") total += count self.info("------------------------------------") # add a totals line self.info(f" {str(total):^10s} Total Number of Warnings") self.info("------------------------------------") def setVerbosity(self, intLevel): """A helper method to try to partially support the local, historical method of the same name.""" self.setLevel(intLevel) class NullLogger(RunLogger): """This is really just a placeholder for logging before or after the span of a normal armi run. It will forward all logging to stdout/stderr, as you'd normally expect. But it will preserve the formatting and duplication tools of the armi library. """ def __init__(self, name, isStderr=False): RunLogger.__init__(self, name) if isStderr: self.handlers = [logging.StreamHandler(sys.stderr)] else: self.handlers = [logging.StreamHandler(sys.stdout)] def addHandler(self, *args, **kwargs): """Ensure this STAYS a null logger.""" pass # Setting the default logging class to be ours logging.RunLogger = RunLogger logging.setLoggerClass(RunLogger) def createLogDir(logDir: str = None) -> None: """A helper method to create the log directory.""" # the usual case is the user does not pass in a log dir path, so we use the global one if logDir is None: logDir = getLogDir() # create the directory if not os.path.exists(logDir): try: os.makedirs(logDir) except FileExistsError: # If we hit this race condition, we still win. return # potentially, wait for directory to be created secondsWait = 0.5 loopCounter = 0 while not os.path.exists(logDir): loopCounter += 1 if loopCounter > (OS_SECONDS_TIMEOUT / secondsWait): raise OSError(f"Was unable to create the log directory: {logDir}") time.sleep(secondsWait) if not os.path.exists(getLogDir()): createLogDir(getLogDir()) def logFactory(): """Create the default logging object.""" return _RunLog(int(context.MPI_RANK)) LOG = logFactory() ================================================ FILE: armi/settings/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Settings are various key-value pairs that determine a bunch of modeling and simulation behaviors. They are one of the key inputs to an ARMI run. They say which modules to run and which modeling approximations to apply and how many cycles to run and at what power and availability fraction and things like that. The ARMI Framework itself has many settings of its own, and plugins typically register some of their own settings as well. """ import fnmatch import glob import os from typing import List from ruamel import yaml from armi import runLog from armi.settings.caseSettings import Settings from armi.settings.setting import ( Default, # noqa: F401 Option, # noqa: F401 Setting, ) from armi.utils.customExceptions import InvalidSettingsFileError NOT_ENABLED = "" # An empty setting value implies that the feature def isBoolSetting(setting: Setting) -> bool: """Return whether the passed setting represents a boolean value.""" return isinstance(setting.default, bool) def recursivelyLoadSettingsFiles( rootDir, patterns: List[str], recursive=True, ignorePatterns: List[str] = None, handleInvalids=True, ): """ Scans path for valid xml files and returns their paths. Parameters ---------- rootDir : str The base path to scan for settings files patterns : list file patterns to match file names recursive : bool (optional) load files recursively ignorePatterns : list (optional) list of filename patterns to ignore handleInvalids : bool option to suppress errors generated when finding files that appear to be settings files but fail to load. This may happen when old settings are present. Returns ------- csFiles : list list of :py:class:`~armi.settings.caseSettings.Settings` objects. """ assert not isinstance(ignorePatterns, str), "Bare string passed as ignorePatterns. Make sure to pass a list" assert not isinstance(patterns, str), "Bare string passed as patterns. Make sure to pass a list" possibleSettings = [] runLog.info("Finding potential settings files matching {}.".format(patterns)) if recursive: for directory, _list, files in os.walk(rootDir): matches = set() for pattern in patterns: matches |= set(fnmatch.filter(files, pattern)) if ignorePatterns is not None: for ignorePattern in ignorePatterns: matches -= set(fnmatch.filter(files, ignorePattern)) possibleSettings.extend([os.path.join(directory, fname) for fname in matches]) else: for pattern in patterns: possibleSettings.extend(glob.glob(pattern)) csFiles = [] runLog.info("Checking for valid settings files.") for possibleSettingsFile in possibleSettings: if os.path.getsize(possibleSettingsFile) > 1e6: runLog.info("skipping {} -- looks too big".format(possibleSettingsFile)) continue try: cs = Settings() cs.loadFromInputFile(possibleSettingsFile, handleInvalids=handleInvalids) csFiles.append(cs) runLog.extra("loaded {}".format(possibleSettingsFile)) except InvalidSettingsFileError as ee: runLog.info("skipping {}\n {}".format(possibleSettingsFile, ee)) except yaml.composer.ComposerError as ee: runLog.info( "skipping {}; it appears to be an incomplete YAML snippet\n {}".format(possibleSettingsFile, ee) ) except Exception as ee: runLog.error( "Failed to parse {}.\nIt looked like a settings file but gave this exception:\n{}: {}".format( possibleSettingsFile, type(ee).__name__, ee ) ) raise csFiles.sort(key=lambda csFile: csFile.caseTitle) return csFiles def promptForSettingsFile(choice=None): """ Allows the user to select an ARMI input from the input files in the directory. Parameters ---------- choice : int, optional The item in the list of valid YAML files to load """ runLog.info("Welcome to the ARMI Loader") runLog.info("Scanning for ARMI settings files...") files = sorted(glob.glob("*.yaml")) if not files: runLog.info("No eligible settings files found. Creating settings without choice") return None if choice is None: for i, pathToFile in enumerate(files): runLog.info("[{0}] - {1}".format(i, os.path.split(pathToFile)[-1])) choice = int(input("Enter choice: ")) return files[choice] ================================================ FILE: armi/settings/caseSettings.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This defines a Settings object that acts mostly like a dictionary. It is meant so that each ARMI run has one-and-only-one Settings object. It records user settings like the core power level, the input file names, the number of cycles to run, the run type, the environment setup, and hundreds of other things. A Settings object can be saved as or loaded from an YAML file. The ARMI GUI is designed to create this settings file, which is then loaded by an ARMI process on the cluster. """ import io import logging import os from copy import copy, deepcopy from ruamel.yaml import YAML from armi import context, runLog from armi.settings import settingsIO from armi.settings.setting import Setting from armi.utils import pathTools from armi.utils.customExceptions import NonexistentSetting SIMPLE_CYCLES_INPUTS = { "availabilityFactor", "availabilityFactors", "powerFractions", "burnSteps", "cycleLength", "cycleLengths", } class Settings: """ A container for run settings, such as case title, power level, and many more. .. impl:: Settings are used to define an ARMI run. :id: I_ARMI_SETTING0 :implements: R_ARMI_SETTING The Settings object is accessible to most ARMI objects through self.cs (for 'case settings'). It acts largely as a dictionary, and setting values are accessed by keys. The Settings object has a 1-to-1 correspondence with the ARMI settings input file. This file may be created by hand or by a GUI. Notes ----- While it is possible to modify case settings during the course of a run, this is highly discouraged because there will be no record of this happening in your results or in the database produced from your run. There is no guarantee that doing so will not cause unexpected problems with your calculation. """ defaultCaseTitle = "armi" def __init__(self, fName=None): """ Instantiate a Settings object. Parameters ---------- fName : str, optional Path to a valid yaml settings file that will be loaded """ # if the "loadingFile" is not set, this better be True, or there are no blueprints at all self.filelessBP = False self._failOnLoad = False """This is state information. The command line can take settings, which override a value in the current settings file; however, if the settings file is listed after a setting value, the setting from the settings file will be used rather than the one explicitly provided by the user on the command line. Therefore, _failOnLoad is used to prevent this from happening. """ from armi import getApp self.path = "" app = getApp() assert app is not None self.__settings = app.getSettings() if fName: self.loadFromInputFile(fName) @property def inputDirectory(self): """Getter for settings file path.""" if self.path is None: return os.getcwd() else: return os.path.dirname(self.path) @property def caseTitle(self): """Getter for settings case title. .. impl:: Define a case title to go with the settings. :id: I_ARMI_SETTINGS_META0 :implements: R_ARMI_SETTINGS_META Every Settings object has a "case title"; a string for users to help identify their run. This case title is used in log file names, it is printed during a run, it is frequently used to name the settings file. It is designed to be an easy-to-use and easy-to-understand way to keep track of simulations. The general idea here is that the average analyst that is using ARMI will run many ARMI-based simulations, and there needs to be an easy to identify them all. """ if not self.path: return self.defaultCaseTitle else: return os.path.splitext(os.path.basename(self.path))[0] @caseTitle.setter def caseTitle(self, value): """Setter for the case title.""" self.path = os.path.join(self.inputDirectory, value + ".yaml") @property def environmentSettings(self): """Getter for environment settings.""" return [setting.name for setting in self.__settings.values() if setting.isEnvironment] def __contains__(self, key): return key in self.__settings def __repr__(self): total = len(self.__settings.keys()) isAltered = lambda s: 1 if s.value != s.default else 0 altered = sum([isAltered(setting) for setting in self.__settings.values()]) return "<{} name:{} total:{} altered:{}>".format(self.__class__.__name__, self.caseTitle, total, altered) def _directAccessOfSettingAllowed(self, key): """ A way to check if specific settings can be grabbed out of the case settings. Could be updated with other specific instances as necessary. Notes ----- Checking the validity of grabbing specific settings at this point, as is done for the SIMPLE_CYCLES_INPUT's, feels a bit intrusive and out of place. In particular, the fact that the check is done every time that a setting is reached for, no matter if it is the setting in question, is quite clunky. In the future, it would be desirable if the settings system were more flexible to control this type of thing at a deeper level. """ if key not in self.__settings: return False, NonexistentSetting(key) if key in SIMPLE_CYCLES_INPUTS and self.__settings["cycles"].value != []: err = ValueError( "Cannot grab simple cycles information from the case settings when detailed cycles " "information is also entered. In general cycles information should be pulled off " "the operator or parsed using the appropriate getter in the utils." ) return False, err return True, None def __getitem__(self, key): settingIsOkayToGrab, err = self._directAccessOfSettingAllowed(key) if settingIsOkayToGrab: return self.__settings[key].value else: raise err def getSetting(self, key, default=None): """ Return a copy of an actual Setting object, instead of just its value. Notes ----- This is used very rarely, try to organize your code to only need a Setting value. """ if key in self.__settings: return copy(self.__settings[key]) elif default is not None: return default else: raise NonexistentSetting(key) def __setitem__(self, key, val): """ Notes ----- This potentially allows for invisible settings mutations. """ if key in self.__settings: self.__settings[key].setValue(val) else: raise NonexistentSetting(key) def __setstate__(self, state): """ Rebuild schema upon unpickling since schema is unpickleable. Pickling happens during mpi broadcasts and also during testing where the test reactor is cached. See Also -------- armi.settings.setting.Setting.__getstate__ : removes schema """ from armi import getApp self.__settings = getApp().getSettings() # restore non-setting instance attrs for key, val in state.items(): if key != "_Settings__settings": setattr(self, key, val) # with schema restored, restore all setting values for name, settingState in state["_Settings__settings"].items(): if name in self.__settings: self.__settings[name]._value = settingState.value elif isinstance(settingState, Setting): self.__settings[name] = copy(settingState) else: raise NonexistentSetting(name) def keys(self): return self.__settings.keys() def values(self): return self.__settings.values() def items(self): return self.__settings.items() def duplicate(self): """Return a duplicate copy of this settings object.""" cs = deepcopy(self) cs._failOnLoad = False # It's not really protected access since it is a new Settings object. _failOnLoad is set to # false, because this new settings object should be independent of the command line return cs def revertToDefaults(self): """Sets every setting back to its default value.""" for setting in self.__settings.values(): setting.revertToDefault() def failOnLoad(self): """This method is used to force loading a file to fail. After command line processing of settings has begun, the settings should be fully defined. If the settings are loaded """ self._failOnLoad = True def loadFromInputFile(self, fName, handleInvalids=True, setPath=True): """ Read in settings from an input YAML file. Passes the reader back out in case you want to know something about how the reading went like for knowing if a file contained deprecated settings, etc. """ reader, path = self._prepToRead(fName) reader.readFromFile(fName, handleInvalids) self._applyReadSettings(path if setPath else None) self.registerUserPlugins() return reader def registerUserPlugins(self): """Add any ad-hoc 'user' plugins that are referenced in the settings file.""" userPlugins = self["userPlugins"] if len(userPlugins): from armi import getApp app = getApp() app.registerUserPlugins(userPlugins) def _prepToRead(self, fName): if self._failOnLoad: raise RuntimeError( "Cannot load settings file after processing of command line options begins.\nYou " "may be able to fix this by reordering the command line arguments, and making sure " f"the settings file `{fName}` comes before any modified settings." ) path = pathTools.armiAbsPath(fName) return settingsIO.SettingsReader(self), path def loadFromString(self, string, handleInvalids=True): """Read in settings from a YAML string. Passes the reader back out in case you want to know something about how the reading went like for knowing if a file contained deprecated settings, etc. """ if self._failOnLoad: raise RuntimeError( "Cannot load settings after processing of command line options begins.\nYou may be " "able to fix this by reordering the command line arguments." ) reader = settingsIO.SettingsReader(self) reader.readFromStream(io.StringIO(string), handleInvalids=handleInvalids) self.initLogVerbosity() return reader def _applyReadSettings(self, path=None): self.initLogVerbosity() if path: self.path = path # can't set this before a chance to fail occurs def initLogVerbosity(self): """ Central location to init logging verbosity. Notes ----- This means that creating a Settings object sets the global logging level of the entire code base. """ if context.MPI_RANK == 0: runLog.setVerbosity(self["verbosity"]) else: runLog.setVerbosity(self["branchVerbosity"]) self.setModuleVerbosities(force=True) def writeToYamlFile(self, fName, style="short", fromFile=None): """ Write settings to a yaml file. Notes ----- This resets the current CS's path to the newly written absolute path. Parameters ---------- fName : str the file to write to style : str (optional) the method of output to be used when creating the file for the current state of settings (short, medium, or full) fromFile : str (optional) if the source file and destination file are different (i.e. for cloning) and the style argument is ``medium``, then this arg is used """ self.path = pathTools.armiAbsPath(fName) if style == "medium": getSettingsPath = self.path if fromFile is None else pathTools.armiAbsPath(fromFile) settingsSetByUser = self.getSettingsSetByUser(getSettingsPath) else: settingsSetByUser = [] with open(self.path, "w") as stream: writer = self.writeToYamlStream(stream, style, settingsSetByUser) return writer def getSettingsSetByUser(self, fPath): """ Grabs the list of settings in the user-defined input file so that the settings can be tracked outside of a Settings object. Parameters ---------- fPath : str The absolute file path of the settings file Returns ------- userSettingsNames : list The settings names read in from a yaml settings file """ # We do not want to load these as settings, but just grab the dictionary straight from the # settings file to know which settings are user-defined. with open(fPath, "r") as stream: yaml = YAML() yaml.allow_duplicate_keys = False tree = yaml.load(stream) userSettings = tree[settingsIO.Roots.CUSTOM] userSettingsNames = list(userSettings.keys()) return userSettingsNames def writeToYamlStream(self, stream, style="short", settingsSetByUser=[]): """ Write settings in yaml format to an arbitrary stream. Parameters ---------- stream : file object Writable file stream style : str (optional) Writing style for settings file. Can be short, medium, or full. settingsSetByUser : list List of settings names in user-defined settings file Returns ------- writer : SettingsWriter """ writer = settingsIO.SettingsWriter(self, style=style, settingsSetByUser=settingsSetByUser) writer.writeYaml(stream) return writer def updateEnvironmentSettingsFrom(self, otherCs): """Updates the environment settings in this object based on some other cs (from the GUI, most likely). Parameters ---------- otherCs : Settings A cs object that environment settings will be inherited from. This enables users to run tests with their environment rather than the reference environment """ for replacement in self.environmentSettings: self[replacement] = otherCs[replacement] def modified(self, caseTitle=None, newSettings=None): """Return a new Settings object containing the provided modifications.""" settings = self.duplicate() if caseTitle: settings.caseTitle = caseTitle if newSettings: for key, val in newSettings.items(): if isinstance(val, Setting): settings.__settings[key] = copy(val) elif key in settings.__settings: settings.__settings[key].setValue(val) else: settings.__settings[key] = Setting(key, val, description="Description from cs.modified()") return settings def setModuleVerbosities(self, force=False): """Attempt to grab the module-level logger verbosities from the settings file, and then set their log levels (verbosities). Parameters ---------- force : bool, optional If force is False, don't overwrite the log verbosities if the logger already exists. IF this needs to be used mid-run, force=False is safer. Notes ----- This method is only meant to be called once per run. """ # try to get the setting dict verbs = self["moduleVerbosity"] # set, but don't use, the module-level loggers for mName, mLvl in verbs.items(): # by default, we init module-level logging, not change it mid-run if force or mName not in logging.Logger.manager.loggerDict: # cast verbosity to integer lvl = int(mLvl) if mLvl.isnumeric() else runLog.LOG.logLevels[mLvl][0] log = logging.getLogger(mName) log.setVerbosity(lvl) ================================================ FILE: armi/settings/fwSettings/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This package contains the settings that control the base/framework-level ARMI functionality.""" from typing import List from armi.settings import setting from armi.settings.fwSettings import databaseSettings, globalSettings, reportSettings def getFrameworkSettings() -> List[setting.Setting]: settings = [] for mod in ( globalSettings, databaseSettings, reportSettings, ): settings.extend(mod.defineSettings()) return settings ================================================ FILE: armi/settings/fwSettings/databaseSettings.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Settings related to the ARMI database.""" from armi.settings import setting CONF_DB = "db" CONF_RELOAD_DB_NAME = "reloadDBName" CONF_LOAD_FROM_DB_EVERY_NODE = "loadFromDBEveryNode" CONF_SYNC_AFTER_WRITE = "syncDbAfterWrite" CONF_FORCE_DB_PARAMS = "forceDbParams" def defineSettings(): """Define settings for the interface.""" settings = [ setting.Setting( CONF_DB, default=True, label="Activate Database", description="Write the state information to a database at every timestep", ), setting.Setting( CONF_RELOAD_DB_NAME, default="", label="Database Input File", description="Name of the database file to load initial conditions from", oldNames=[("snapShotDB", None)], ), setting.Setting( CONF_LOAD_FROM_DB_EVERY_NODE, default=False, label="Load Database at EveryNode", description="Every node loaded from reference database", ), setting.Setting( CONF_SYNC_AFTER_WRITE, default=True, label="Sync Database After Write", description=( "Copy the output database from the fast scratch space to the shared network drive after each write." ), ), setting.Setting( CONF_FORCE_DB_PARAMS, default=[], label="Force Database Write of Parameters", description=( "A list of parameter names that should always be written to the " "database, regardless of their Parameter Definition's typical saveToDB " "status. This is only honored if the DatabaseInterface is used." ), ), ] return settings ================================================ FILE: armi/settings/fwSettings/globalSettings.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Framework-wide settings definitions and constants. This should contain Settings definitions for general-purpose "framework" settings. These should only include settings that are not related to any particular physics or plugins. """ import os from typing import List import voluptuous as vol from armi import context from armi.settings import setting from armi.settings.fwSettings import tightCouplingSettings from armi.utils.mathematics import isMonotonic CONF_ACCEPTABLE_BLOCK_AREA_ERROR = "acceptableBlockAreaError" CONF_ASSEM_FLAGS_SKIP_AXIAL_EXP = "assemFlagsToSkipAxialExpansion" CONF_AVAILABILITY_FACTOR = "availabilityFactor" CONF_AVAILABILITY_FACTORS = "availabilityFactors" CONF_AXIAL_MESH_REFINEMENT_FACTOR = "axialMeshRefinementFactor" CONF_BETA = "beta" CONF_BRANCH_VERBOSITY = "branchVerbosity" CONF_BU_GROUPS = "buGroups" CONF_BURN_CHAIN_FILE_NAME = "burnChainFileName" CONF_BURN_STEPS = "burnSteps" CONF_BURNUP_PEAKING_FACTOR = "burnupPeakingFactor" CONF_CIRCULAR_RING_PITCH = "circularRingPitch" CONF_COMMENT = "comment" CONF_COPY_FILES_FROM = "copyFilesFrom" CONF_COPY_FILES_TO = "copyFilesTo" CONF_COVERAGE = "coverage" CONF_COVERAGE_CONFIG_FILE = "coverageConfigFile" CONF_CYCLE_LENGTH = "cycleLength" CONF_CYCLE_LENGTHS = "cycleLengths" CONF_CYCLES = "cycles" CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION = "cyclesSkipTightCouplingInteraction" CONF_DEBUG_MEM = "debugMem" CONF_DEBUG_MEM_SIZE = "debugMemSize" CONF_DECAY_CONSTANTS = "decayConstants" CONF_DEFAULT_SNAPSHOTS = "defaultSnapshots" CONF_DEFERRED_INTERFACE_NAMES = "deferredInterfaceNames" CONF_DEFERRED_INTERFACES_CYCLE = "deferredInterfacesCycle" CONF_DETAIL_ALL_ASSEMS = "detailAllAssems" CONF_DETAIL_ASSEM_LOCATIONS_BOL = "detailAssemLocationsBOL" CONF_DETAIL_ASSEM_NUMS = "detailAssemNums" CONF_DETAILED_AXIAL_EXPANSION = "detailedAxialExpansion" CONF_DUMP_SNAPSHOT = "dumpSnapshot" CONF_EQ_DIRECT = "eqDirect" # fuelCycle/equilibrium coupling CONF_EXPLICIT_REPEAT_SHUFFLES = "explicitRepeatShuffles" CONF_FLUX_RECON = "fluxRecon" # strange coupling in fuel handlers CONF_FRESH_FEED_TYPE = "freshFeedType" CONF_GROW_TO_FULL_CORE_AFTER_LOAD = "growToFullCoreAfterLoad" CONF_INDEPENDENT_VARIABLES = "independentVariables" CONF_INITIALIZE_BURN_CHAIN = "initializeBurnChain" CONF_INPUT_HEIGHTS_HOT = "inputHeightsConsideredHot" CONF_LOAD_STYLE = "loadStyle" CONF_LOADING_FILE = "loadingFile" CONF_MATERIAL_NAMESPACE_ORDER = "materialNamespaceOrder" CONF_MIN_MESH_SIZE_RATIO = "minMeshSizeRatio" CONF_MODULE_VERBOSITY = "moduleVerbosity" CONF_N_CYCLES = "nCycles" CONF_N_TASKS = "nTasks" CONF_NON_UNIFORM_ASSEM_FLAGS = "nonUniformAssemFlags" CONF_OUTPUT_CACHE_LOCATION = "outputCacheLocation" CONF_OUTPUT_FILE_EXTENSION = "outputFileExtension" CONF_PHYSICS_FILES = "savePhysicsFiles" CONF_PLOTS = "plots" CONF_POWER = "power" CONF_POWER_DENSITY = "powerDensity" CONF_POWER_FRACTIONS = "powerFractions" CONF_PROFILE = "profile" CONF_REMOVE_PER_CYCLE = "removePerCycle" CONF_RM_EXT_FILES_AT_BOC = "rmExternalFilesAtBOC" CONF_RUN_TYPE = "runType" CONF_SKIP_CYCLES = "skipCycles" CONF_SMALL_RUN = "rmExternalFilesAtEOL" CONF_SORT_REACTOR = "sortReactor" CONF_START_CYCLE = "startCycle" CONF_START_NODE = "startNode" CONF_STATIONARY_BLOCK_FLAGS = "stationaryBlockFlags" CONF_T_IN = "Tin" CONF_T_OUT = "Tout" CONF_TARGET_K = "targetK" # lots of things use this CONF_TEMP_GROUPS = "tempGroups" CONF_TIGHT_COUPLING = "tightCoupling" CONF_TIGHT_COUPLING_MAX_ITERS = "tightCouplingMaxNumIters" CONF_TIGHT_COUPLING_SETTINGS = "tightCouplingSettings" CONF_TRACE = "trace" CONF_TRACK_ASSEMS = "trackAssems" CONF_UNIFORM_MESH_MINIMUM_SIZE = "uniformMeshMinimumSize" CONF_USER_PLUGINS = "userPlugins" CONF_VERBOSITY = "verbosity" CONF_VERSIONS = "versions" CONF_ZONE_DEFINITIONS = "zoneDefinitions" CONF_ZONES_FILE = "zonesFile" def defineSettings() -> List[setting.Setting]: """ Return a list of global framework settings. .. impl:: There is a setting for total core power. :id: I_ARMI_SETTINGS_POWER :implements: R_ARMI_SETTINGS_POWER ARMI defines a collection of settings by default to be associated with all runs, and one such setting is ``power``. This is the total thermal power of the reactor. This is designed to be the standard power of the reactor core, to be easily set by the user. There is frequently the need to adjust the power of the reactor at different cycles. That is done by setting the ``powerFractions`` setting to a list of fractions of this power. .. impl:: Define a comment and a versions list to go with the settings. :id: I_ARMI_SETTINGS_META1 :implements: R_ARMI_SETTINGS_META Because nuclear analysts have a lot to keep track of when doing various simulations of a reactor, ARMI provides a ``comment`` setting that takes an arbitrary string and stores it. This string will be preserved in the settings file and thus in the database, and can provide helpful notes for analysts in the future. Likewise, it is helpful to know what versions of software were used in an ARMI application. There is a dictionary-like setting called ``versions`` that allows users to track the versions of: ARMI, their ARMI application, and the versions of all the plugins in their simulation. While it is always helpful to know what versions of software you run, it is particularly needed in nuclear engineering where demands will be made to track the exact versions of code used in simulations. """ settings = [ setting.Setting( CONF_N_TASKS, default=1, label="parallel tasks", description="Number of parallel tasks to request on the cluster", schema=vol.All(vol.Coerce(int), vol.Range(min=1)), oldNames=[("numProcessors", None)], ), setting.Setting( CONF_INITIALIZE_BURN_CHAIN, default=True, label="Initialize Burn Chain", description=( f"This setting is paired with the `{CONF_BURN_CHAIN_FILE_NAME}` setting. " "When enabled, this will initialize the burn-chain on initializing the case and " "is required for running depletion calculations where the transmutations and decays " "are controlled by the framework. If an external software, such as ORIGEN, contains " "data for the burn-chain already embedded then this may be disabled." ), ), setting.Setting( CONF_BURN_CHAIN_FILE_NAME, default=os.path.join(context.RES, "burn-chain.yaml"), label="Burn Chain File", description="Path to YAML file that has the depletion chain defined in it", ), setting.Setting( CONF_AXIAL_MESH_REFINEMENT_FACTOR, default=1, label="Axial Mesh Refinement Factor", description="Multiplicative factor on the Global Flux number of mesh per " "block. Used for axial mesh refinement.", schema=vol.All(vol.Coerce(int), vol.Range(min=0, min_included=False)), ), setting.Setting( CONF_UNIFORM_MESH_MINIMUM_SIZE, default=None, label="Minimum axial mesh size in cm for uniform mesh", description="Minimum mesh size used when generating an axial mesh for the " "uniform mesh converter. Providing a value for this setting allows fuel " "and control material boundaries to be enforced better in uniform mesh.", schema=vol.All(vol.Coerce(float), vol.Range(min=0.0, min_included=False)), ), setting.Setting( CONF_DETAILED_AXIAL_EXPANSION, default=False, label="Detailed Axial Expansion", description=( "Allow each assembly to expand independently of the others. Results in non-uniform " "axial mesh. Neutronics kernel must be able to handle." ), ), setting.Setting( CONF_NON_UNIFORM_ASSEM_FLAGS, default=[], label="Non Uniform Assem Flags", description=( "Assemblies that match a flag group on this list will not have their " "mesh changed with the reference mesh of the core for uniform mesh cases (non-" "detailed axial expansion). Another plugin may need to make the mesh uniform if " "necessary." ), ), setting.Setting( CONF_INPUT_HEIGHTS_HOT, default=True, label="Input Height Considered Hot", description=( "This is a flag to determine if block heights, as provided in blueprints, are at " "hot dimensions. If false, block heights are at cold/as-built dimensions and will " "be thermally expanded as appropriate." ), ), setting.Setting( CONF_TRACE, default=False, label="Use the Python Tracer", description="Activate Python trace module to print out each line as it's executed", isEnvironment=True, ), setting.Setting( CONF_PROFILE, default=False, label="Turn On the Profiler", description="Turn on the profiler for the submitted case. The profiler " "results will not include all import times.", isEnvironment=True, oldNames=[ ("turnOnProfiler", None), ], ), setting.Setting( CONF_COVERAGE, default=False, label="Turn On Coverage Report Generation", description="Turn on coverage report generation which tracks all the lines " "of code that execute during a run", isEnvironment=True, ), setting.Setting( CONF_COVERAGE_CONFIG_FILE, default="", label="File to Define Coverage Configuration", description="User-defined coverage configuration file", ), setting.Setting( CONF_MIN_MESH_SIZE_RATIO, default=0.15, label="Minimum Mesh Size Ratio", description="This is the minimum ratio of mesh sizes (dP1/(dP1 + dP2)) " "allowable -- only active if automaticVariableMesh flag is set to True", schema=vol.All(vol.Coerce(float), vol.Range(min=0, min_included=False)), ), setting.Setting( CONF_CYCLE_LENGTH, default=365.242199, label="Cycle Length", description="Duration of one single cycle in days. If `availabilityFactor` is below " "1, the reactor will be at power less than this. If variable, use " "`cycleLengths` setting.", oldNames=[ ("burnTime", None), ], schema=( vol.Any( vol.All(vol.Coerce(float), vol.Range(min=0, min_included=False)), None, ) ), ), setting.Setting( CONF_CYCLE_LENGTHS, default=[], label="Cycle Durations", description="List of durations of each cycle in days. The at-power " "duration will be affected by `availabilityFactor`. R is repeat. For " "example [100, 150, '9R'] is 1 100 day cycle followed by 10 150 day " "cycles. Empty list is constant duration set by `cycleLength`.", schema=vol.Any([vol.Coerce(str)], None), ), setting.Setting( CONF_AVAILABILITY_FACTOR, default=1.0, label="Plant Availability Factor", description="Availability factor of the plant. This is the fraction of the " "time that the plant is operating. If variable, use `availabilityFactors` setting.", oldNames=[ ("capacityFactor", None), ], schema=(vol.Any(vol.All(vol.Coerce(float), vol.Range(min=0)), None)), ), setting.Setting( CONF_AVAILABILITY_FACTORS, default=[], label="Availability Factors", description="List of availability factor of each cycle as a fraction " "(fraction of time plant is not in an outage). R is repeat. For example " "[0.5, 1.0, '9R'] is 1 50% followed by 10 100%. Empty list is " "constant duration set by `availabilityFactor`.", schema=vol.Any([vol.Coerce(str)], None), ), setting.Setting( CONF_POWER_FRACTIONS, default=[], label="Power Fractions", description="List of power fractions at each cycle (fraction of rated " "thermal power the plant achieves). R is repeat. For example [0.5, 1.0, " "'9R'] is 1 50% followed by 10 100%. Specify zeros to indicate " "decay-only cycles (i.e. for decay heat analysis). None implies " "always full rated power.", schema=vol.Any([vol.Coerce(str)], None), ), setting.Setting( CONF_BURN_STEPS, default=4, label="Burnup Steps per Cycle", description="Number of depletion substeps, n, in one cycle. Note: There " "will be n+1 time nodes and the burnup step time will be computed as cycle " "length/n when the simple cycles input format is used.", schema=(vol.Any(vol.All(vol.Coerce(int), vol.Range(min=0)), None)), ), setting.Setting( CONF_BETA, default=None, label="Delayed Neutron Fraction", description="Individual precursor group delayed neutron fractions", schema=vol.Any( [ vol.All( vol.Coerce(float), vol.Range(min=0, min_included=True, max=1, max_included=True), ) ], None, vol.All( vol.Coerce(float), vol.Range(min=0, min_included=True, max=1, max_included=True), ), msg="Expected NoneType, float, or list of floats.", ), oldNames=[ ("betaComponents", None), ], ), setting.Setting( CONF_DECAY_CONSTANTS, default=None, label="Decay Constants", description="Individual precursor group delayed neutron decay constants", schema=vol.Any( [vol.All(vol.Coerce(float), vol.Range(min=0, min_included=True))], None, vol.All(vol.Coerce(float), vol.Range(min=0, min_included=True)), msg="Expected NoneType, float, or list of floats.", ), ), setting.Setting( CONF_BRANCH_VERBOSITY, default="error", label="Worker Log Verbosity", description="Verbosity of the non-primary MPI nodes", options=[ "debug", "extra", "info", "important", "prompt", "warning", "error", ], isEnvironment=True, ), setting.Setting( CONF_MODULE_VERBOSITY, default={}, label="Module-Level Verbosity", description="Verbosity of any module-specific loggers that are set", isEnvironment=True, ), setting.Setting( CONF_VERSIONS, default={}, label="Versions of Code Used", description="Versions of ARMI, and any Apps or Plugins that register a version here.", ), setting.Setting( CONF_BU_GROUPS, default=[10, 20, 30], label="Burnup XS Groups", description="The range of burnups where cross-sections will be the same " "for a given cross section type (units of %FIMA)", schema=vol.Schema( [ vol.All( vol.Coerce(int), vol.Range( min=0, min_included=False, ), ) ] ), ), setting.Setting( CONF_TEMP_GROUPS, default=[], label="Temperature XS Groups", description="The range of fuel temperatures where cross-sections will be the same " "for a given cross section type (units of degrees C)", schema=vol.Schema([vol.All(vol.Coerce(int), vol.Range(min=0, min_included=False))]), ), setting.Setting( CONF_BURNUP_PEAKING_FACTOR, default=0.0, label="Burn-up Peaking Factor", description="The peak/avg factor for burnup and DPA. If it is not set the current flux " "peaking is used (this is typically conservatively high).", schema=vol.All(vol.Coerce(float), vol.Range(min=0)), ), setting.Setting( CONF_CIRCULAR_RING_PITCH, default=1.0, label="Circular Ring Relative Pitch", description="The relative pitch to be used to define a single circular ring in circular shuffling", ), setting.Setting( CONF_COMMENT, default="", label="Case Comments", description="A comment describing this case", ), setting.Setting( CONF_COPY_FILES_FROM, default=[], label="Copy These Files", description="A list of files that need to be copied at the start of a run.", ), setting.Setting( CONF_COPY_FILES_TO, default=[], label="Copy to These Directories", description="A list of directories to copy provided files into at the start of a run." "This list can be of length zero (copy to working dir), 1 (copy all files to the same " f"place), or it must be the same length as {CONF_COPY_FILES_FROM}", ), setting.Setting( CONF_DEBUG_MEM, default=False, label="Debug Memory", description="Turn on memory debugging options to help find problems with the code", ), setting.Setting( CONF_DEBUG_MEM_SIZE, default=False, label="Debug Memory Size", description="Show size of objects during memory debugging", ), setting.Setting( CONF_DEFAULT_SNAPSHOTS, default=False, label="Basic Reactor Snapshots", description="Generate snapshots at BOL, MOL, and EOL.", ), setting.Setting( CONF_DETAIL_ALL_ASSEMS, default=False, label="Detailed Assems - All", description="All assemblies will have 'detailed' treatment. Note: This " "option is interpreted differently by different modules.", ), setting.Setting( CONF_DETAIL_ASSEM_LOCATIONS_BOL, default=[], label="Detailed Assems - BOL Location", description="Assembly locations for assemblies that will have 'detailed' " "treatment. This option will track assemblies in the core at BOL. Note: " "This option is interpreted differently by different modules.", ), setting.Setting( CONF_DETAIL_ASSEM_NUMS, default=[], label="Detailed Assems - ID", description="Assembly numbers(IDs) for assemblies that will have " "'detailed' treatment. This option will track assemblies that not in the " "core at BOL. Note: This option is interpreted differently by different modules.", schema=vol.Schema([int]), ), setting.Setting( CONF_DUMP_SNAPSHOT, default=[], label="Detailed Reactor Snapshots", description="List of snapshots to perform detailed reactor analysis, " "such as reactivity coefficient generation.", ), setting.Setting( CONF_PHYSICS_FILES, default=[], label="Dump Snapshot Files", description="List of snapshots to dump reactor physics kernel input and " "output files. Can be used to perform follow-on analysis.", ), setting.Setting( CONF_EQ_DIRECT, default=False, label="Direct Eq Shuffling", description="Does the equilibrium search with repetitive shuffing but with " "direct shuffling rather than the fast way", ), setting.Setting( CONF_FLUX_RECON, default=False, label="Flux/Power Reconstruction", description="Perform detailed flux and power reconstruction", ), setting.Setting( CONF_FRESH_FEED_TYPE, default="feed fuel", label="Fresh Feed Type", description="The type of fresh fuel added to the core, used in certain pre-defined " "fuel shuffling logic sequences.", options=["feed fuel", "igniter fuel", "inner driver fuel"], ), setting.Setting( CONF_GROW_TO_FULL_CORE_AFTER_LOAD, default=False, label="Expand to Full Core on Snapshot Load", description="Grows from 1/3 to full core after loading a 1/3 " "symmetric snapshot. Note: This is needed when a full core model is needed " "and the database was produced using a third core model.", ), setting.Setting( CONF_START_CYCLE, default=0, label="Start Cycle", description="Cycle number to continue calculation from. Database will " "load from the time step just before. For snapshots use `dumpSnapshot`.", oldNames=[ ("loadCycle", None), ], schema=vol.All(vol.Coerce(int), vol.Range(min=0)), ), setting.Setting( CONF_LOADING_FILE, default="", label="Blueprints File", description="The blueprints/loading input file path containing component dimensions, materials, etc.", ), setting.Setting( CONF_START_NODE, default=0, label="Start Node", description="Timenode number (0 for BOC, etc.) to continue calculation from. " "Database will load from the time step just before.", oldNames=[ ("loadNode", None), ], schema=vol.All(vol.Coerce(int), vol.Range(min=0)), ), setting.Setting( CONF_LOAD_STYLE, default="fromInput", label="Load Style", description="Description of how the ARMI case will be initialized", options=["fromInput", "fromDB"], ), setting.Setting( CONF_N_CYCLES, default=1, label="Number of Cycles", description="Number of cycles that will be simulated. Fuel management " "happens at the beginning of each cycle. Can include active (full-power) " "cycles as well as post-shutdown decay-heat steps. For restart cases, " "this value should include both cycles from the restart plus any additional " "cycles to be run after `startCycle`.", schema=vol.All(vol.Coerce(int), vol.Range(min=1)), ), setting.Setting( CONF_TIGHT_COUPLING, default=False, label="Tight Coupling", description="Boolean to turn on/off tight coupling", ), setting.Setting( CONF_TIGHT_COUPLING_MAX_ITERS, default=4, label="Maximum number of iterations for tight coupling.", description="Maximum number of iterations for tight coupling.", ), setting.Setting( CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION, default=[], label="Cycles to skip the tight coupling interaction.", description="List of cycle numbers skip tight coupling interaction for. " "Will still update component temps, etc during these cycles, will just " "not iterate a second (or more) time.", ), tightCouplingSettings.TightCouplingSettingDef( CONF_TIGHT_COUPLING_SETTINGS, ), setting.Setting( CONF_OUTPUT_FILE_EXTENSION, default="jpg", label="Plot File Extension", description="The default extension for plots", options=["jpg", "png", "svg", "pdf"], ), setting.Setting( CONF_PLOTS, default=False, label="Plot Results", description="Generate additional plots throughout the ARMI analysis", ), setting.Setting( CONF_POWER, default=0.0, label="Reactor Thermal Power (W)", description="Nameplate thermal power of the reactor. Can be varied by setting the powerFractions setting.", schema=vol.All(vol.Coerce(float), vol.Range(min=0)), ), setting.Setting( CONF_POWER_DENSITY, default=0.0, label="Reactor Thermal Power Density (W/HMM)", description="Thermal power of the Reactor, per gram of Heavy metal " "mass. Ignore this setting if the `power` setting is non-zero.", schema=vol.All(vol.Coerce(float), vol.Range(min=0)), ), setting.Setting( CONF_REMOVE_PER_CYCLE, default=3, label="Remove per Cycle", description="The number of fuel assemblies removed per cycle at equilibrium.", ), setting.Setting( CONF_RUN_TYPE, default="Standard", label="Run Type", description="Type of run that this is, e.g. a normal run through all " "cycles, a snapshot-loaded reactivity coefficient run, etc.", options=["Standard", "Equilibrium", "Snapshots"], ), setting.Setting( CONF_EXPLICIT_REPEAT_SHUFFLES, default="", label="Explicit Shuffles File", description="Path to file that contains a detailed shuffling history that is to be repeated exactly.", oldNames=[("movesFile", None), ("shuffleFileName", None)], ), setting.Setting( CONF_SKIP_CYCLES, default=0, label="Number of Cycles to Skip", description="Number of cycles to be skipped during the calculation. Note: " "This is typically used when repeating only a portion of a calculation or " "repeating a run.", schema=vol.All(vol.Coerce(int), vol.Range(min=0)), ), setting.Setting( CONF_SMALL_RUN, default=False, label="Clean Up Files at EOL", description="Clean up intermediate files after the run completes (EOL)", ), setting.Setting( CONF_SORT_REACTOR, default=True, label="Do we want to automatically sort the Reactor?", description="If unsorted, ArmiObject IDs will be by the order they were added to the Reactor.", ), setting.Setting( CONF_RM_EXT_FILES_AT_BOC, default=False, label="Clean Up Files at BOC", description="Clean up files at the beginning of each cycle (BOC)", ), setting.Setting( CONF_STATIONARY_BLOCK_FLAGS, default=["GRID_PLATE"], label="stationary Block Flags", description="Blocks with these flags will not move in moves. Used for fuel management.", ), setting.Setting( CONF_TARGET_K, default=1.005, label="Criticality Search Target (k-effective)", description="Target criticality (k-effective) for cycle length, branch, and equilibrium search", schema=vol.All(vol.Coerce(float), vol.Range(min=0)), ), setting.Setting( CONF_TRACK_ASSEMS, default=False, label="Save Discharged Assemblies", description="Retain discharged assemblies in a spent fuel pool so their histories " "can be analyzed or the assemblies reused. Turning this off removes " "discharged assemblies to minimize memory and database size. " "Assemblies explicitly discharged to the spent fuel pool remain " "regardless, allowing selective tracking.", ), setting.Setting( CONF_VERBOSITY, default="info", label="Primary Log Verbosity", description="How verbose the output will be", options=[ "debug", "extra", "info", "important", "prompt", "warning", "error", ], isEnvironment=True, ), setting.Setting( CONF_ZONE_DEFINITIONS, default=[], label="Zone Definitions", description="Manual definitions of zones as lists of assembly locations " '(e.g. "zoneName: loc1, loc2, loc3") . Zones are groups of assemblies used ' f"by various summary and calculation routines. See also {CONF_ZONES_FILE} " "for an alternative method of specifying zones.", ), setting.Setting( CONF_ZONES_FILE, default="", label="Zones definition file", description="Input file containing the definition of Zones to be applied to the reactor. " f"See also {CONF_ZONE_DEFINITIONS} for an alternative method of specifying zones.", ), setting.Setting( CONF_ACCEPTABLE_BLOCK_AREA_ERROR, default=1e-05, label="Acceptable Block Area Error", description="The limit of error between a block's cross-" "sectional area and the reference block used during the assembly area " "consistency check", schema=vol.All(vol.Coerce(float), vol.Range(min=0, min_included=False)), ), setting.Setting( CONF_INDEPENDENT_VARIABLES, default=[], label="Independent Variables", description="List of (independentVarName, value) tuples to inform optimization post-processing", ), setting.Setting( CONF_T_IN, default=360.0, label="Inlet Temperature", description="The inlet temperature of the reactor in C", schema=vol.All(vol.Coerce(float), vol.Range(min=-273.15)), ), setting.Setting( CONF_T_OUT, default=510.0, label="Outlet Temperature", description="The outlet temperature of the reactor in C", schema=vol.All(vol.Coerce(float), vol.Range(min=-273.15)), ), setting.Setting( CONF_DEFERRED_INTERFACES_CYCLE, default=0, label="Deferred Interface Start Cycle", description="The supplied list of interface names in deferredInterfaceNames" " will begin normal operations on this cycle number", ), setting.Setting( CONF_DEFERRED_INTERFACE_NAMES, default=[], label="Deferred Interface Names", description="Interfaces to delay the normal operations of for special circumstance problem avoidance", ), setting.Setting( CONF_OUTPUT_CACHE_LOCATION, default="", label="Location of Output Cache", description="Location where cached calculations are stored and " "retrieved if exactly the same as the calculation requested. Empty " "string will not cache.", isEnvironment=True, ), setting.Setting( CONF_MATERIAL_NAMESPACE_ORDER, default=[], label="Material Namespace Order", description=( "Ordered list of Python namespaces for finding materials by class name. " "This allows users to choose between different implementations of reactor " "materials. For example, the framework comes with a basic UZr material, " "but power users will want to override it with their own UZr subclass. " "This allows users to specify to get materials out of a plugin rather " "than from the framework." ), ), setting.Setting( CONF_CYCLES, default=[], label="Cycle information", description="YAML dict defining the cycle history of the case. Options at each cycle " "include: `name`, `cumulative days`, `step days`, `availability factor`, " "`cycle length`, `burn steps`, and `power fractions`. If specified, do not use any of " "the case settings `cycleLength(s)`, `availabilityFactor(s)`, `powerFractions`, or " "`burnSteps`. Must also specify `nCycles` and `power`.", schema=vol.Schema( [ vol.All( { "name": str, "cumulative days": vol.All([vol.Any(float, int)], _isMonotonicIncreasing), "step days": [vol.Coerce(str)], "power fractions": [vol.Coerce(str)], "availability factor": vol.All(vol.Coerce(float), vol.Range(min=0, max=1)), "cycle length": vol.All(vol.Coerce(float), vol.Range(min=0)), "burn steps": vol.All(vol.Coerce(int), vol.Range(min=0)), }, _mutuallyExclusiveCyclesInputs, ) ] ), ), setting.Setting( CONF_USER_PLUGINS, default=[], label=CONF_USER_PLUGINS, description="YAML list defining the locations of UserPlugin subclasses. You can enter " "the full ARMI import path: armi.test.test_what.MyPlugin, or you can enter the full " "file path: /path/to/my/pluginz.py:MyPlugin ", schema=vol.Any([vol.Coerce(str)], None), ), setting.Setting( CONF_ASSEM_FLAGS_SKIP_AXIAL_EXP, default=[], label="Assembly Flags to Skip Axial Expansion", description=("Assemblies that match a flag on this list will not be axially expanded."), ), ] return settings def _isMonotonicIncreasing(inputList): if isMonotonic(inputList, "<"): return inputList else: raise vol.error.Invalid(f"List must be monotonicically increasing: {inputList}") def _mutuallyExclusiveCyclesInputs(cycle): """Helper for `cycles` setting. There are multiple different ways to define the time nodes of the simulation, but they are exclusive, and you have to pick one. Here we verify it was done correcty. """ cycleKeys = cycle.keys() if ( sum( [ "cumulative days" in cycleKeys, "step days" in cycleKeys, "cycle length" in cycleKeys or "burn steps" in cycleKeys, ] ) != 1 ): baseErrMsg = ( "Must have exactly one of either 'cumulative days', 'step days', or 'cycle length' + " "'burn steps' in each cycle definition." ) raise vol.Invalid((baseErrMsg + f" Check cycle {cycle['name']}.") if "name" in cycleKeys else baseErrMsg) return cycle ================================================ FILE: armi/settings/fwSettings/reportSettings.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Settings related to the report generation.""" from armi.settings import setting CONF_GEN_REPORTS = "genReports" CONF_SUMMARIZE_ASSEM_DESIGN = "summarizeAssemDesign" def defineSettings(): """Define settings for the interface.""" settings = [ setting.Setting( CONF_GEN_REPORTS, default=True, label="Enable Reports", description="Employ the use of the reporting utility for ARMI, generating " "HTML and ASCII summaries of the run", oldNames=[("summarizer", None)], ), setting.Setting( CONF_SUMMARIZE_ASSEM_DESIGN, default=True, label="Summarize Assembly Design", description="Print a summary of the assembly design details at BOL", ), ] return settings ================================================ FILE: armi/settings/fwSettings/tests/__init__.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/settings/fwSettings/tests/test_fwSettings.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the framework settings.""" import unittest import voluptuous as vol from armi.settings import caseSettings class TestSchema(unittest.TestCase): """Test that the implemented schema are doing what we think they are.""" def setUp(self): self.cs = caseSettings.Settings() self.settings = { "nTasks": { "valid": 1, "invalid": -1, "error": vol.error.MultipleInvalid, }, "axialMeshRefinementFactor": { "valid": 1, "invalid": 0, "error": vol.error.MultipleInvalid, }, "minMeshSizeRatio": { "valid": 1, "invalid": 0, "error": vol.error.MultipleInvalid, }, "cycleLength": { "valid": 1, "invalid": -1, "error": vol.error.MultipleInvalid, }, "availabilityFactor": { "valid": 0, "invalid": -1, "error": vol.error.MultipleInvalid, }, "burnSteps": { "valid": 0, "invalid": -1, "error": vol.error.MultipleInvalid, }, "beta": { "valid": [0.5, 0.5], "invalid": [0.5, 2], "error": vol.error.AnyInvalid, }, "decayConstants": { "valid": [1, 1], "invalid": [-1, 1], "error": vol.error.AnyInvalid, }, "buGroups": { "valid": [1, 5], "invalid": [-1, 200], "error": vol.error.MultipleInvalid, }, "burnupPeakingFactor": { "valid": 0, "invalid": -1, "error": vol.error.MultipleInvalid, }, "startCycle": { "valid": 1, "invalid": -1, "error": vol.error.MultipleInvalid, }, "startNode": { "valid": 0, "invalid": -1, "error": vol.error.MultipleInvalid, }, "nCycles": {"valid": 1, "invalid": -1, "error": vol.error.MultipleInvalid}, "power": {"valid": 0, "invalid": -1, "error": vol.error.MultipleInvalid}, "skipCycles": { "valid": 0, "invalid": -1, "error": vol.error.MultipleInvalid, }, "targetK": {"valid": 1, "invalid": -1, "error": vol.error.MultipleInvalid}, "acceptableBlockAreaError": { "valid": 1, "invalid": 0, "error": vol.error.MultipleInvalid, }, "Tin": {"valid": -272, "invalid": -274, "error": vol.error.MultipleInvalid}, "Tout": { "valid": -272, "invalid": -274, "error": vol.error.MultipleInvalid, }, } def test_schema(self): # first test that a valid case goes through without error for settingName, settingVal in self.settings.items(): validOption = settingVal["valid"] self.cs = self.cs.modified(newSettings={settingName: validOption}) invalidOption = settingVal["invalid"] expectedError = settingVal["error"] with self.assertRaises(expectedError): self.cs = self.cs.modified(newSettings={settingName: invalidOption}) ================================================ FILE: armi/settings/fwSettings/tests/test_tightCouplingSettings.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Unit testing for tight coupling settings. - The settings example below shows the intended use for these settings in an ARMI yaml input file. - Note, for these to be recognized, they need to be prefixed with "tightCouplingSettings:". """ import io import unittest import voluptuous as vol from ruamel.yaml import YAML from armi.settings.fwSettings.tightCouplingSettings import ( TightCouplingSettingDef, tightCouplingSettingsValidator, ) TIGHT_COUPLING_SETTINGS_EXAMPLE = """ globalFlux: parameter: keff convergence: 1e-05 fuelPerformance: parameter: peakFuelTemperature convergence: 1e-02 """ class TestTightCouplingSettings(unittest.TestCase): def test_validAssignments(self): """Tests that the tight coupling settings dictionary can be added to.""" tc = {} tc["globalFlux"] = {"parameter": "keff", "convergence": 1e-05} tc["thermalHydraulics"] = { "parameter": "peakCladdingTemperature", "convergence": 1e-02, } tc = tightCouplingSettingsValidator(tc) self.assertEqual(tc["globalFlux"]["parameter"], "keff") self.assertEqual(tc["globalFlux"]["convergence"], 1e-05) self.assertEqual(tc["thermalHydraulics"]["parameter"], "peakCladdingTemperature") self.assertEqual(tc["thermalHydraulics"]["convergence"], 1e-02) def test_incompleteAssignment(self): """Tests that the tight coupling settings is rendered empty if a complete dictionary is not provided.""" tc = {} tc["globalFlux"] = None tc = tightCouplingSettingsValidator(tc) self.assertNotIn("globalFlux", tc.keys()) tc = {} tc["globalFlux"] = {} tc = tightCouplingSettingsValidator(tc) self.assertNotIn("globalFlux", tc.keys()) def test_missingAssignments(self): """Tests failure if not all keys/value pairs are provided on initialization.""" # Fails because `convergence` is not assigned at the same # time as the `parameter` assignment. with self.assertRaises(vol.MultipleInvalid): tc = {} tc["globalFlux"] = {"parameter": "keff"} tc = tightCouplingSettingsValidator(tc) # Fails because `parameter` is not assigned at the same # time as the `convergence` assignment. with self.assertRaises(vol.MultipleInvalid): tc = {} tc["globalFlux"] = {"convergence": 1e-08} tc = tightCouplingSettingsValidator(tc) def test_invalidArgumentTypes(self): """Tests failure when the values of the parameters do not match the expected schema.""" # Fails because `parameter` value is required to be a string with self.assertRaises(vol.MultipleInvalid): tc = {} tc["globalFlux"] = {"parameter": 1.0} tc = tightCouplingSettingsValidator(tc) # Fails because `convergence` value is required to be something can be coerced into a float with self.assertRaises(vol.MultipleInvalid): tc = {} tc["globalFlux"] = {"convergence": "keff"} tc = tightCouplingSettingsValidator(tc) def test_extraAssignments(self): """ Tests failure if additional keys are supplied that do not match the expected schema or if there are any typos in the expected keys. """ # Fails because the `parameter` key is misspelled. with self.assertRaises(vol.MultipleInvalid): tc = {} tc["globalFlux"] = {"parameters": "keff", "convergence": 1e-05} tc = tightCouplingSettingsValidator(tc) # Fails because of the `extra` key. with self.assertRaises(vol.MultipleInvalid): tc = {} tc["globalFlux"] = { "parameter": "keff", "convergence": 1e-05, "extra": "fails", } tc = tightCouplingSettingsValidator(tc) def test_serializeSettingsException(self): """Ensure the TypeError in serializeTightCouplingSettings can be reached.""" tc = ["globalFlux"] with self.assertRaises(TypeError) as cm: tc = tightCouplingSettingsValidator(tc) the_exception = cm.exception self.assertEqual(the_exception.error_code, 3) def test_yamlIO(self): """Ensure we can read/write this custom setting object to yaml.""" yaml = YAML() inp = yaml.load(io.StringIO(TIGHT_COUPLING_SETTINGS_EXAMPLE)) tcd = TightCouplingSettingDef("TestSetting") tcd.setValue(inp) self.assertEqual(tcd.value["globalFlux"]["parameter"], "keff") outBuf = io.StringIO() output = tcd.dump() yaml.dump(output, outBuf) outBuf.seek(0) inp2 = yaml.load(outBuf) self.assertEqual(inp.keys(), inp2.keys()) ================================================ FILE: armi/settings/fwSettings/tightCouplingSettings.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The data structures and schema of the tight coupling settings. These are advanced/compound settings that are carried along in the normal cs object but aren't simple key/value pairs. """ from typing import Dict, Union import voluptuous as vol from armi.settings import Setting _SCHEMA = vol.Schema( { str: vol.Schema( { vol.Required("parameter"): str, vol.Required("convergence"): vol.Coerce(float), } ) } ) class TightCouplingSettings(dict): """ Dictionary with keys of Interface functions and a dictionary value. Notes ----- The dictionary value for each Interface function is required to contain a ``parameter`` and a ``convergence`` key with string and float values, respectively. No other keys are allowed. Examples -------- couplingSettings = TightCouplingSettings({'globalFlux': {'parameter': 'keff', 'convergence': 1e-05}}) """ def __repr__(self): return f"<{self.__class__.__name__} with Interface functions {self.keys()}>" def serializeTightCouplingSettings(tightCouplingSettingsDict: Union[TightCouplingSettings, Dict]) -> Dict[str, Dict]: """ Return a serialized form of the ``TightCouplingSettings`` as a dictionary. Notes ----- Attributes that are not set (i.e., set to None) will be skipped. """ if not isinstance(tightCouplingSettingsDict, dict): raise TypeError(f"Expected a dictionary for {tightCouplingSettingsDict}") output = {} for interfaceFunction, options in tightCouplingSettingsDict.items(): # Setting the value to an empty dictionary # if it is set to a None or an empty # dictionary. if not options: continue output[str(interfaceFunction)] = options return output class TightCouplingSettingDef(Setting): """ Custom setting object to manage the tight coupling settings for each interface. Notes ----- This uses the ``tightCouplingSettingsValidator`` schema to validate the inputs and will automatically coerce the value into a ``TightCouplingSettings`` dictionary. """ def __init__(self, name): description = ( "Data structure defining the tight coupling parameters and convergence criteria for each interface." ) label = "Interface Tight Coupling Control" default = TightCouplingSettings() options = None schema = tightCouplingSettingsValidator enforcedOptions = False subLabels = None isEnvironment = False oldNames = None Setting.__init__( self, name, default, description, label, options, schema, enforcedOptions, subLabels, isEnvironment, oldNames, ) def dump(self): """Return a serialized version of the ``TightCouplingSettings`` object.""" return serializeTightCouplingSettings(self._value) def tightCouplingSettingsValidator(tightCouplingSettingsDict: Dict[str, Dict]) -> TightCouplingSettings: """Returns a ``TightCouplingSettings`` object if validation is successful.""" tightCouplingSettingsDict = serializeTightCouplingSettings(tightCouplingSettingsDict) tightCouplingSettingsDict = _SCHEMA(tightCouplingSettingsDict) vals = TightCouplingSettings() for interfaceFunction, inputParams in tightCouplingSettingsDict.items(): vals[interfaceFunction] = inputParams return vals ================================================ FILE: armi/settings/setting.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ System to handle basic configuration settings. Notes ----- The type of each Setting is derived from the type of the default value. When users set values to their settings, ARMI enforces these types with schema validation. This also allows for more complex schema validation for settings that are more complex dictionaries (e.g. XS, rx coeffs). """ import copy import datetime from collections import namedtuple from typing import List, Optional, Tuple import voluptuous as vol from armi import runLog from armi.reactor.flags import Flags # Options are used to imbue existing settings with new Options. This allows a setting like `neutronicsKernel` to # strictly enforce options, even though the plugin that defines it does not know all possible options, which may be # provided from other plugins. Option = namedtuple("Option", ["option", "settingName"]) Default = namedtuple("Default", ["value", "settingName"]) class Setting: """ A particular setting. .. impl:: The setting default is mandatory. :id: I_ARMI_SETTINGS_DEFAULTS :implements: R_ARMI_SETTINGS_DEFAULTS Setting objects hold all associated information of a setting in ARMI and should typically be accessed through the Settings methods rather than directly. Settings require a mandatory default value. Setting subclasses can implement custom ``load`` and ``dump`` methods that can enable serialization (to/from dicts) of custom objects. When you set a setting's value, the value will be unserialized into the custom object and when you call ``dump``, it will be serialized. Just accessing the value will return the actual object in this case. """ def __init__( self, name, default, description, label=None, options=None, schema=None, enforcedOptions=False, subLabels=None, isEnvironment=False, oldNames: Optional[List[Tuple[str, Optional[datetime.date]]]] = None, ): """ Initialize a Setting object. Parameters ---------- name : str the setting's name default : object The setting's default value description : str The description of the setting label : str, optional the shorter description used for the ARMI GUI options : list, optional Legal values (useful in GUI drop-downs) schema : callable, optional A function that gets called with the configuration VALUES that build this setting. The callable will either raise an exception, safely modify/update, or leave unchanged the value. If left blank, a type check will be performed against the default. enforcedOptions : bool, optional Require that the value be one of the valid options. subLabels : tuple, optional The names of the fields in each tuple for a setting that accepts a list of tuples. For example, if a setting is a list of (assembly name, file name) tuples, the sublabels would be ("assembly name", "file name"). This is needed for building GUI widgets to input such data. isEnvironment : bool, optional Whether this should be considered an "environment" setting. These can be used by the Case system to propagate environment options through command-line flags. oldNames : list of tuple, optional List of previous names that this setting used to have, along with optional expiration dates. These can aid in automatic migration of old inputs. When provided, if it is appears that the expiration date has passed, old names will result in errors, requiring to user to update their input by hand to use more current names. """ assert description, f"Setting {name} defined without description." assert description != "None", f"Setting {name} defined without description." self.name = name self.description = description or name self.label = label or name self.options = options self.enforcedOptions = enforcedOptions self.subLabels = subLabels self.isEnvironment = isEnvironment self.oldNames: List[Tuple[str, Optional[datetime.date]]] = oldNames or [] self._default = default self._value = copy.deepcopy(default) # break link from _default # Retain the passed schema so that we don't accidentally stomp on it in addOptions(), et.al. self._customSchema = schema self._setSchema() @property def underlyingType(self): """Useful in categorizing settings, e.g. for GUI.""" return type(self._default) @property def containedType(self): """The subtype for lists.""" # assume schema set to [int] or [str] or something similar try: containedSchema = self.schema.schema[0] if isinstance(containedSchema, vol.Coerce): # special case for Coerce objects, which store their underlying type as ``.type``. return containedSchema.type return containedSchema except TypeError: # cannot infer. fall back to str return str def _setSchema(self): """Apply or auto-derive schema of the value.""" schema = self._customSchema if schema: self.schema = schema elif self.options and self.enforcedOptions: self.schema = vol.Schema(vol.In(self.options)) else: # Coercion is needed in some GUI instances where lists are getting set as strings. if isinstance(self.default, list) and self.default: # Non-empty default: assume the default has the desired contained type Coerce all values to the first # entry in the default so mixed floats and ints work. Note that this will not work for settings that # allow mixed types in their lists (e.g. [0, '10R']), so those all need custom schemas. self.schema = vol.Schema([vol.Coerce(type(self.default[0]))]) else: self.schema = vol.Schema(vol.Coerce(type(self.default))) @property def default(self): return self._default @property def value(self): return self._value @value.setter def value(self, val): """ Set the value directly. Notes ----- Can't just decorate ``setValue`` with ``@value.setter`` because some callers use setting.value=val and others use setting.setValue(val) and the latter fails with ``TypeError: 'XSSettings' object is not callable``. """ return self.setValue(val) def setValue(self, val): """ Set value of a setting. This validates it against its value schema on the way in. Some setting values are custom serializable objects. Rather than writing them directly to YAML using YAML's Python object-writing features, we prefer to use our own custom serializers on subclasses. """ try: val = self.schema(val) except vol.error.Invalid: runLog.error(f"Error in setting {self.name}, val: {val}.") raise self._value = self._load(val) def addOptions(self, options: List[Option]): """Extend this Setting's options with extra options.""" try: self.options.extend([o.option for o in options]) except AttributeError: if self.options is None: msg = ( f"The Setting {self.name} has no default options, it looks like you want to add that to the " + "definition." ) runLog.error(msg) raise AttributeError(msg) else: raise self._setSchema() def addOption(self, option: Option): """Extend this Setting's options with an extra option.""" self.addOptions([option]) def changeDefault(self, newDefault: Default): """Change the default of a setting, and also the current value.""" self._default = newDefault.value self.value = newDefault.value @staticmethod def _load(inputVal): """ Create setting value from input value. In some custom settings, this can return a custom object rather than just the input value. """ return inputVal def dump(self): """ Return a serializable version of this setting's value. Override to define custom deserializers for custom/compound settings. """ return self._value def __repr__(self): return f"<{self.__class__.__name__} {self.name} value:{self.value} default:{self.default}>" def __getstate__(self): """ Remove schema during pickling because it is often unpickleable. Notes ----- Errors are often with ``AttributeError: Can't pickle local object '_compile_scalar.<locals>.validate_instance'`` See Also -------- armi.settings.caseSettings.Settings.__setstate__ : regenerates the schema upon load. Note that we don't do it at the individual setting level because it'd be too O(N^2). """ state = copy.deepcopy(self.__dict__) for trouble in ("schema", "_customSchema"): if trouble in state: del state[trouble] return state def revertToDefault(self): """ Revert a setting back to its default. Notes ----- Skips the property setter because default val should already be validated. """ self._value = copy.deepcopy(self.default) def isDefault(self): """ Returns a boolean based on whether or not the setting equals its default value. It is possible for a setting to change and not be reported as such when it is changed back to its default. That behavior seems acceptable. """ return self.value == self.default @property def offDefault(self): """Return True if the setting is not the default value for that setting.""" return not self.isDefault() def getCustomAttributes(self): """Hack to work with settings writing system until old one is gone.""" return {"value": self.value} def getDefaultAttributes(self): """ Additional hack, residual from when settings system could write settings definitions. This is only needed here due to the unit tests in test_settings. """ return { "value": self.value, "type": type(self.default), "default": self.default, } def __copy__(self): setting = Setting( str(self.name), copy.copy(self._default), description=None if self.description is None else str(self.description), label=None if self.label is None else str(self.label), options=copy.copy(self.options), schema=copy.copy(self.schema) if hasattr(self, "schema") else None, enforcedOptions=bool(self.enforcedOptions), subLabels=copy.copy(self.subLabels), isEnvironment=bool(self.isEnvironment), oldNames=None if self.oldNames is None else list(self.oldNames), ) setting._value = copy.deepcopy(self._value) return setting class FlagListSetting(Setting): """Subclass of :py:class:`Setting <armi.settings.Setting>` convert settings between flags and strings.""" def __init__( self, name, default, description=None, label=None, oldNames: Optional[List[Tuple[str, Optional[datetime.date]]]] = None, ): Setting.__init__( self, name=name, default=default, description=description, label=label, options=None, schema=self.schema, enforcedOptions=None, subLabels=None, isEnvironment=False, oldNames=oldNames, ) @staticmethod def schema(val) -> List[Flags]: """ Return a list of :py:class:`Flags <armi.reactor.flags.Flags`. Raises ------ TypeError When ``val`` is not a list. ValueError When ``val`` is not an instance of str or Flags. """ if not isinstance(val, list): raise TypeError(f"Expected `{val}` to be a list.") flagVals = [] for v in val: if isinstance(v, str): flagVals.append(Flags.fromString(v)) elif isinstance(v, Flags): flagVals.append(v) else: raise ValueError(f"Invalid flag input `{v}` in `FlagListSetting`") return flagVals def dump(self) -> List[str]: """Return a list of strings converted from the flag values.""" return [Flags.toString(v) for v in self.value] ================================================ FILE: armi/settings/settingsIO.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module contains classes and methods for reading and writing :py:class:`~armi.settings.caseSettings.Settings`, and the contained :py:class:`~armi.settings.setting.Setting`. """ import collections import datetime import os import sys from typing import Dict, Set, Tuple import ruamel.yaml.comments from ruamel.yaml import YAML from armi import context, runLog from armi.meta import __version__ as version from armi.settings.setting import Setting from armi.utils.customExceptions import ( InvalidSettingsFileError, InvalidSettingsStopProcess, SettingException, ) # Constants defining valid output styles WRITE_SHORT = "short" WRITE_MEDIUM = "medium" WRITE_FULL = "full" class Roots: """XML tree root node common strings.""" CUSTOM = "settings" VERSION = "version" class SettingRenamer: """ Utility class to help with setting rename migrations. This class stores a cache of renaming maps, derived from the ``Setting.oldNames`` values of the passed ``settings``. Expired renames are retained, so that meaningful warning messages can be generated if one attempts to use one of them. The renaming logic follows the rules described in :py:meth:`renameSetting`. """ def __init__(self, settings: Dict[str, Setting]): self._currentNames: Set[str] = set() self._activeRenames: Dict[str, str] = dict() self._expiredRenames: Set[Tuple[str, str, datetime.date]] = set() today = datetime.date.today() for name, s in settings.items(): self._currentNames.add(name) for oldName, expiry in s.oldNames: if expiry is not None: expired = expiry <= today else: expired = False if expired: self._expiredRenames.add((oldName, name, expiry)) else: if oldName in self._activeRenames: raise SettingException( "The setting rename from {0}->{1} collides with another rename {0}->{2}".format( oldName, name, self._activeRenames[oldName] ) ) self._activeRenames[oldName] = name def renameSetting(self, name) -> Tuple[str, bool]: """ Attempt to rename a candidate setting. Renaming follows these rules: - If the ``name`` corresponds to a current setting name, do not attempt to rename it. - If the ``name`` does not correspond to a current setting name, but is one of the active renames, return the corresponding active rename. - If the ``name`` does not correspond to a current setting name, but is one of the expired renames, produce a warning and do not rename it. Parameters ---------- name : str The candidate setting name to potentially rename. Returns ------- name : str The potentially-renamed setting renamed : bool Whether the setting was actually renamed """ if name in self._currentNames: return name, False activeRename = self._activeRenames.get(name, None) if activeRename is not None: runLog.extra(f"Invalid setting {name} found. Renaming to {activeRename}.", single=True) return activeRename, True return name, False class SettingsReader: """Abstract class for processing settings files. .. impl:: The setting use a human-readable, plain text file as input. :id: I_ARMI_SETTINGS_IO_TXT :implements: R_ARMI_SETTINGS_IO_TXT ARMI uses the YAML standard for settings files. ARMI uses industry-standard ``ruamel.yaml`` Python library to read these files. ARMI does not bend or change the YAML file format standard in any way. Parameters ---------- cs : Settings The settings object to read into """ def __init__(self, cs): self.cs = cs self.inputPath = "<stream>" self.invalidSettings = set() self.settingsAlreadyRead = set() self._renamer = SettingRenamer(dict(self.cs.items())) # The input version will be overwritten if explicitly stated in input file. Otherwise, it's # assumed to precede the version inclusion change and should be treated as alright. self.inputVersion = version self.liveVersion = version def __getitem__(self, key): return self.cs[key] def __getattr__(self, attr): return getattr(self.cs, attr) def __repr__(self): return f"<{self.__class__.__name__} {self.inputPath}>" def readFromFile(self, path, handleInvalids=True): """Load file and read it.""" with open(path, "r") as f: ext = os.path.splitext(path)[1].lower() assert ext.lower() in (".yaml", ".yml"), f"{ext} is the wrong extension" self.inputPath = path try: self.readFromStream(f, handleInvalids) except Exception as ee: raise InvalidSettingsFileError(path, str(ee)) def readFromStream(self, stream, handleInvalids=True): """Read from a file-like stream.""" self._readYaml(stream) if handleInvalids: self._checkInvalidSettings() def _readYaml(self, stream): """Read settings from a YAML stream.""" from armi.physics.thermalHydraulics import const # avoid circular import from armi.settings.fwSettings.globalSettings import CONF_VERSIONS yaml = YAML(typ="rt") yaml.allow_duplicate_keys = False tree = yaml.load(stream) if "settings" not in tree: raise InvalidSettingsFileError( self.inputPath, "Missing the `settings:` header required in YAML settings", ) if const.ORIFICE_SETTING_ZONE_MAP in tree: raise InvalidSettingsFileError(self.inputPath, "Appears to be an orifice_settings file") caseSettings = tree[Roots.CUSTOM] setts = tree["settings"] if CONF_VERSIONS in setts and "armi" in setts[CONF_VERSIONS]: self.inputVersion = setts[CONF_VERSIONS]["armi"] else: # Versions setting section not found; continuing with uncontrolled versions. self.inputVersion = "uncontrolled" for settingName, settingVal in caseSettings.items(): self._applySettings(settingName, settingVal) def _checkInvalidSettings(self): if not self.invalidSettings: return try: invalidNames = "\n\t".join(self.invalidSettings) proceed = prompt( "Found {} invalid settings in {}.\n\n {} \n\t".format( len(self.invalidSettings), self.inputPath, invalidNames ), "Invalid settings will be ignored. Continue running the case?", "YES_NO", ) except RunLogPromptUnresolvable: # proceed with invalid settings (they'll be ignored). proceed = True if not proceed: raise InvalidSettingsStopProcess(self) else: runLog.info(f"Ignoring invalid settings: {invalidNames}") def _applySettings(self, name, val): """Add a setting, if it is valid. Capture invalid settings.""" _nameToSet, _wasRenamed = self._renamer.renameSetting(name) if name not in self.cs: self.invalidSettings.add(name) else: # apply validations _settingObj = self.cs.getSetting(name) # The val is automatically coerced into the expected type when set using either the default or user-defined # schema self.cs[name] = val class SettingsWriter: """Writes settings out to files. This can write in three styles: short setting values that are not their defaults only medium preserves all settings originally in file even if they match the default value full all setting values regardless of default status """ def __init__(self, settings_instance, style="short", settingsSetByUser=[]): self.cs = settings_instance self.style = style if style not in {WRITE_SHORT, WRITE_MEDIUM, WRITE_FULL}: raise ValueError(f"Invalid supplied setting writing style {style}") # The writer should know about the old settings it is overwriting, but only sometimes (when the style is medium) self.settingsSetByUser = settingsSetByUser @staticmethod def _getTag(): tag, _attrib = Roots.CUSTOM, {Roots.VERSION: version} return tag def writeYaml(self, stream): """Write settings to YAML file.""" settingData = self._getSettingDataToWrite() settingData = self._preprocessYaml(settingData) yaml = YAML() yaml.default_flow_style = False yaml.indent(mapping=2, sequence=4, offset=2) yaml.dump(settingData, stream) def _preprocessYaml(self, settingData): """ Clean up the dict before dumping to YAML. If it has just a value attrib it flattens it for brevity. """ from armi.settings.fwSettings.globalSettings import CONF_VERSIONS yamlData = {} cleanedData = collections.OrderedDict() for settingObj, settingDatum in settingData.items(): if "value" in settingDatum and len(settingDatum) == 1: # ok to flatten cleanedData[settingObj.name] = settingObj.dump() else: cleanedData[settingObj.name] = settingDatum # add ARMI version to the settings YAML if CONF_VERSIONS not in cleanedData: cleanedData[CONF_VERSIONS] = {} cleanedData[CONF_VERSIONS]["armi"] = version # this gets rid of a !!omap associated with ordered dicts tag = self._getTag() yamlData.update({tag: ruamel.yaml.comments.CommentedMap(cleanedData)}) return yamlData def _getSettingDataToWrite(self): """ Make an ordered dict with all settings slated for being written. This is general so it can be dumped to whatever file format. """ settingData = collections.OrderedDict() for settingName, settingObject in iter(sorted(self.cs.items(), key=lambda name: name[0].lower())): if self.style == WRITE_SHORT and not settingObject.offDefault: continue if ( self.style == WRITE_MEDIUM and not settingObject.offDefault and settingName not in self.settingsSetByUser ): continue attribs = settingObject.getCustomAttributes().items() settingDatum = {} for attribName, attribValue in attribs: if isinstance(attribValue, type): attribValue = attribValue.__name__ settingDatum[attribName] = attribValue settingData[settingObject] = settingDatum return settingData def prompt(statement, question, *options): """Prompt the user for some information.""" if context.CURRENT_MODE == context.Mode.GUI: # avoid hard dependency on wx import wx msg = statement + "\n\n\n" + question style = wx.CENTER for opt in options: style |= getattr(wx, opt) dlg = wx.MessageDialog(None, msg, style=style) response = dlg.ShowModal() dlg.Destroy() if response == wx.ID_CANCEL: raise RunLogPromptCancel("Manual cancellation of GUI prompt") return response in [wx.ID_OK, wx.ID_YES] elif context.CURRENT_MODE == context.Mode.INTERACTIVE: response = "" responses = [opt for opt in options if opt in ["YES_NO", "YES", "NO", "CANCEL", "OK"]] if "YES_NO" in responses: index = responses.index("YES_NO") responses[index] = "NO" responses.insert(index, "YES") if not any(responses): raise RuntimeError(f"No suitable responses in {responses}") # highly requested shorthand responses if "YES" in responses: responses.append("Y") if "NO" in responses: responses.append("N") # Use the logger tools to handle user prompts (runLog supports this). while response not in responses: runLog.LOG.log("prompt", statement) runLog.LOG.log("prompt", "{} ({}): ".format(question, ", ".join(responses))) response = sys.stdin.readline().strip().upper() if response == "CANCEL": raise RunLogPromptCancel("Manual cancellation of interactive prompt") return response in ["YES", "Y", "OK"] else: raise RunLogPromptUnresolvable(f"Incorrect CURRENT_MODE for prompting user: {context.CURRENT_MODE}") class RunLogPromptCancel(Exception): """An error that occurs when the user submits a cancel on a runLog prompt which allows for cancellation.""" pass class RunLogPromptUnresolvable(Exception): """ An error that occurs when the current mode enum in armi.__init__ suggests the user cannot be communicated with from the current process. """ pass ================================================ FILE: armi/settings/settingsValidation.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A system to check user settings for validity and provide users with meaningful suggestions to fix. This allows developers to define a rich set of rules and suggestions for user settings. These then pop up during initialization of a run, either on the command line or as dialogues in the GUI. They say things like: "Your ___ setting has the value ___, which is impossible. Would you like to switch to ___?" """ import itertools import os import re from armi import context, getPluginManagerOrFail, runLog from armi.settings.settingsIO import ( RunLogPromptCancel, RunLogPromptUnresolvable, prompt, ) from armi.utils import pathTools, safeCopy from armi.utils.mathematics import expandRepeatedFloats class Query: """ An individual setting validator. .. impl:: Rules to validate and customize a setting's behavior. :id: I_ARMI_SETTINGS_RULES :implements: R_ARMI_SETTINGS_RULES This class is meant to represent a generic validation test against a setting. The goal is: developers create new settings and they want to make sure those settings are used correctly. As an implementation, users pass in a ``condition`` function to this class that returns ``True`` or ``False`` based on the setting name and value. And then this class has a ``resolve`` method which tests if the condition is met. Optionally, this class also contains a ``correction`` function that allows users to automatically correct a bad setting, if the developers can find a clear path forward. """ def __init__(self, condition, statement, question, correction): """ Construct a query. Parameters ---------- condition : callable A callable that returns True or False. If True, then the query activates its question and potential correction. statement : str A statement of the problem indicated by a True condition question : str A question asking the user for confirmation of the proposed fix. correction : callable A callable that when called fixes the situation. See :py:meth:`Inspector.NO_ACTION` for no-ops. """ self.condition = condition self.statement = statement self.question = question self.correction = correction # True if the query is `passed` and does not result in an immediate failure self.corrected = False self._passed = False self.autoResolved = True def __repr__(self): # Add representation so that it's possible to identify which one is being referred to when there are errors. return f"<Query: {self.statement}>" def __bool__(self): try: return bool(self.condition()) except TypeError: runLog.error(f"Invalid setting validation query. Update validator for: {self})") raise def isCorrective(self): return self.correction is not Inspector.NO_ACTION def resolve(self): """Standard i/o prompt for resolution of an individual query.""" if context.MPI_RANK != 0: return if self.condition(): try: if self.isCorrective(): try: makeCorrection = prompt( "INSPECTOR: " + self.statement, self.question, "YES_NO", "NO_DEFAULT", "CANCEL", ) if makeCorrection: self.correction() self.corrected = True self._passed = True except RunLogPromptCancel: raise SystemExit("You have cancelled the submission.") else: try: continueSubmission = prompt( "INSPECTOR: " + self.statement, "Continue?", "YES_NO", "NO_DEFAULT", ) if not continueSubmission: raise SystemExit("You have cancelled the submission.") except RunLogPromptCancel: raise SystemExit("You have cancelled the submission.") except RunLogPromptUnresolvable: self.autoResolved = False self._passed = True class Inspector: """ This manages queries which assert certain states of the data model, generally presenting themselves to the user, offering information on the potential problem, a question and the action to take on an affirmative and negative answer from the user. In practice very useful for making sure setting values are as intended and without bad interplay with one another. One Inspector will contain multiple Queries and be associated directly with an :py:class:`~armi.operators.operator.Operator`. """ @staticmethod def NO_ACTION(): """Convenience callable used to generate Queries that can't be easily auto-resolved.""" return None def __init__(self, cs): """ Construct an inspector. Parameters ---------- cs : Settings """ self.queries = [] self.cs = cs self.geomType = None self.coreSymmetry = None self._inspectBlueprints() self._inspectSettings() # Gather and attach validators from all plugins. This runs on all registered plugins, not just active ones. pluginQueries = getPluginManagerOrFail().hook.defineSettingsValidators(inspector=self) for queries in pluginQueries: self.queries.extend(queries) def run(self, cs=None): """ Run through each query and deal with it if possible. Returns ------- correctionsMade : bool Whether or not anything was updated. Raises ------ RuntimeError When a programming error causes queries to loop. """ if context.MPI_RANK != 0: return False # the following attribute changes will alter what the queries investigate when resolved correctionsMade = False self.cs = cs or self.cs runLog.debug("{} executing queries.".format(self.__class__.__name__)) if not any(self.queries): runLog.debug("{} found no problems with the current state.".format(self.__class__.__name__)) else: for query in self.queries: query.resolve() if query.corrected: correctionsMade = True issues = [query for query in self.queries if query and (query.isCorrective() and not query._passed)] if any(issues): # something isn't resolved or was unresolved by changes raise RuntimeError( "The input inspection did not resolve all queries, " "some issues are creating cyclic resolutions: {}".format(issues) ) runLog.debug("{} has finished querying.".format(self.__class__.__name__)) if correctionsMade: # find unused file path to store original settings as to avoid overwrite strSkeleton = "{}_old".format(self.cs.path.split(".yaml")[0]) for num in itertools.count(): if num == 0: renamePath = f"{strSkeleton}.yaml" else: renamePath = f"{strSkeleton}{num}.yaml" if not self._csRelativePathExists(renamePath): break # preserve old file before saving settings file runLog.important(f"Preserving original settings file by renaming `{renamePath}`") safeCopy(self.cs.path, renamePath) # save settings file self.cs.writeToYamlFile(self.cs.path) return correctionsMade def addQuery(self, condition, statement, question, correction): """Convenience method, query must be resolved, else run fails.""" if not callable(correction): raise ValueError('Query for "{}" malformed. Expecting callable.'.format(statement)) self.queries.append(Query(condition, statement, question, correction)) def addQueryBadLocationWillLikelyFail(self, settingName): """Add a query indicating the current path for ``settingName`` does not exist and will likely fail.""" self.addQuery( lambda: not os.path.exists(pathTools.armiAbsPath(self.cs[settingName])), "Setting {} points to nonexistent location\n{}\nFailure extremely likely".format( settingName, self.cs[settingName] ), "", self.NO_ACTION, ) def addQueryCurrentSettingMayNotSupportFeatures(self, settingName): """Add a query that the current value for ``settingName`` may not support certain features.""" self.addQuery( lambda: self.cs[settingName] != self.cs.getSetting(settingName).default, "{} set as:\n{}\nUsing this location instead of the default location\n{}\n" "may not support certain functions.".format( settingName, self.cs[settingName], self.cs.getSetting(settingName).default, ), "Revert to default location?", lambda: self._assignCS(settingName, self.cs.getSetting(settingName).default), ) def _assignCS(self, key, value): """Lambda assignment workaround.""" # this type of assignment works, but be mindful of scoping when trying different methods runLog.extra(f"Updating setting `{key}` to `{value}`") self.cs[key] = value def _raise(self): raise KeyboardInterrupt("Input inspection has been interrupted.") def _inspectBlueprints(self): """Blueprints early error detection and old format conversions.""" from armi.physics.neutronics.settings import CONF_LOADING_FILE # if there is a blueprints object, we don't need to check for a file if self.cs.filelessBP: return self.addQuery( lambda: not self.cs[CONF_LOADING_FILE], "No blueprints file loaded. Run will probably fail.", "", self.NO_ACTION, ) self.addQuery( lambda: not self._csRelativePathExists(self.cs[CONF_LOADING_FILE]), "Blueprints file {} not found. Run will fail.".format(self.cs[CONF_LOADING_FILE]), "", self.NO_ACTION, ) def _csRelativePathExists(self, filename): csRelativePath = self._csRelativePath(filename) return os.path.exists(csRelativePath) and os.path.isfile(csRelativePath) def _csRelativePath(self, filename): return os.path.join(self.cs.inputDirectory, filename) def _correctCyclesToZeroBurnup(self): self._assignCS("nCycles", 1) self._assignCS("burnSteps", 0) self._assignCS("cycleLength", None) self._assignCS("cycleLengths", None) self._assignCS("availabilityFactor", None) self._assignCS("availabilityFactors", None) self._assignCS("cycles", []) def _checkForBothSimpleAndDetailedCyclesInputs(self): """ Because the only way to check if a setting has been "entered" is to check against the default, if the user specifies all the simple cycle settings exactly as the defaults, this won't be caught. But, it would be very coincidental for the user to _specify_ all the default values when performing any real analysis. Also, we must bypass the `Settings` getter and reach directly into the underlying `__settings` dict to avoid triggering an error at this stage in the run. Otherwise an error will inherently be raised if the detailed cycles input is used because the simple cycles inputs have defaults. We don't care that those defaults are there, we only have a problem with those defaults being _used_, which will be caught later on. """ bothCyclesInputTypesPresent = ( self.cs._Settings__settings["cycleLength"].value != self.cs._Settings__settings["cycleLength"].default or self.cs._Settings__settings["cycleLengths"].value != self.cs._Settings__settings["cycleLengths"].default or self.cs._Settings__settings["burnSteps"].value != self.cs._Settings__settings["burnSteps"].default or self.cs._Settings__settings["availabilityFactor"].value != self.cs._Settings__settings["availabilityFactor"].default or self.cs._Settings__settings["availabilityFactors"].value != self.cs._Settings__settings["availabilityFactors"].default or self.cs._Settings__settings["powerFractions"].value != self.cs._Settings__settings["powerFractions"].default ) and self.cs["cycles"] != [] return bothCyclesInputTypesPresent def _inspectSettings(self): """Check settings for inconsistencies.""" from armi import operators from armi.physics.fuelCycle.settings import ( CONF_SHUFFLE_LOGIC, CONF_SHUFFLE_SEQUENCE_FILE, ) from armi.settings.fwSettings.globalSettings import ( CONF_EXPLICIT_REPEAT_SHUFFLES, CONF_ZONE_DEFINITIONS, CONF_ZONES_FILE, ) self.addQuery( lambda: self.cs["outputFileExtension"] == "pdf" and self.cs["genReports"], "Output files of '.pdf' format are not supported by the reporting HTML generator. '.pdf' " "images will not be included.", "Switch to '.png'?", lambda: self._assignCS("outputFileExtension", "png"), ) ( self.addQuery( lambda: ( (self.cs["beta"] and isinstance(self.cs["beta"], list) and not self.cs["decayConstants"]) or (self.cs["decayConstants"] and not self.cs["beta"]) ), "Both beta components and decay constants should be provided if either are being supplied.", "", self.NO_ACTION, ), ) self.addQuery( lambda: self.cs["skipCycles"] > 0 and not self.cs["reloadDBName"], "You have chosen to do a restart case without specifying a database to load from. " "Run will load from output files, if they exist but burnup, etc. will not be updated.", "", self.NO_ACTION, ) self.addQuery( lambda: self.cs["runType"] != operators.RunTypes.SNAPSHOTS and self.cs["loadStyle"] == "fromDB" and self.cs["startCycle"] == 0 and self.cs["startNode"] == 0, "Starting from cycle 0, and time node 0 was chosen. Restart runs load from " "the time node just before the restart. There is no time node to load from " "before cycle 0 node 0. Either switch to the snapshot operator, start from " "a different time step or load from inputs rather than database as " "`loadStyle`.", "", self.NO_ACTION, ) self.addQuery( lambda: self.cs["runType"] == operators.RunTypes.SNAPSHOTS and not (self.cs["dumpSnapshot"] or self.cs["defaultSnapshots"]), "The Snapshots operator was specified, but no dump snapshots were chosen." "Please specify snapshot steps with the `dumpSnapshot` setting.", "", self.NO_ACTION, ) self.addQuery( lambda: self.cs.caseTitle.lower() == os.path.splitext(os.path.basename(self.cs["reloadDBName"].lower()))[0], "Snapshot DB ({0}) and main DB ({1}) cannot have the same name." "Change name of settings file and resubmit.".format(self.cs["reloadDBName"], self.cs.caseTitle), "", self.NO_ACTION, ) self.addQuery( lambda: self.cs["reloadDBName"] != "" and not os.path.exists(self.cs["reloadDBName"]), "Reload database {} does not exist. \nPlease point to an existing DB, " "or set to empty and load from input.".format(self.cs["reloadDBName"]), "", self.NO_ACTION, ) def _willBeCopiedFrom(fName): return any(fName == os.path.split(copyFile)[1] for copyFile in self.cs["copyFilesFrom"]) self.addQuery( lambda: self.cs[CONF_EXPLICIT_REPEAT_SHUFFLES] and not self._csRelativePathExists(self.cs[CONF_EXPLICIT_REPEAT_SHUFFLES]) and not _willBeCopiedFrom(self.cs[CONF_EXPLICIT_REPEAT_SHUFFLES]), "The specified repeat shuffle file `{0}` does not exist, and won't be copied. Run will crash.".format( self.cs[CONF_EXPLICIT_REPEAT_SHUFFLES] ), "", self.NO_ACTION, ) self.addQuery( lambda: self.cs[CONF_SHUFFLE_SEQUENCE_FILE] and not self._csRelativePathExists(self.cs[CONF_SHUFFLE_SEQUENCE_FILE]) and not _willBeCopiedFrom(self.cs[CONF_SHUFFLE_SEQUENCE_FILE]), "The specified shuffle sequence file `{0}` does not exist. Run will crash.".format( self.cs[CONF_SHUFFLE_SEQUENCE_FILE] ), "", self.NO_ACTION, ) self.addQuery( lambda: ( bool(self.cs[CONF_EXPLICIT_REPEAT_SHUFFLES]) and (bool(self.cs[CONF_SHUFFLE_SEQUENCE_FILE]) or bool(self.cs[CONF_SHUFFLE_LOGIC])) ), "explicitRepeatShuffles cannot be used together with shuffleSequenceFile or shuffleLogic. " "Please specify either explicitRepeatShuffles alone, or some combination of shuffleSequenceFile" "and shuffleLogic.", "", self.NO_ACTION, ) self.addQuery( lambda: not self.cs["power"] and not self.cs["powerDensity"], "No power or powerDensity set. You must always start by importing a base settings file.", "", self.NO_ACTION, ) self.addQuery( lambda: self.cs["power"] > 0 and self.cs["powerDensity"] > 0, "The power and powerDensity are both set, please note the power will be used as the truth.", "", self.NO_ACTION, ) self.addQuery( lambda: self.cs["outputCacheLocation"] and not os.path.exists(self.cs["outputCacheLocation"]), "`outputCacheLocation` path {} does not exist. Please specify a location that exists.".format( self.cs["outputCacheLocation"] ), "", self.NO_ACTION, ) self.addQuery( lambda: (not self.cs["tightCoupling"] and self.cs["tightCouplingMaxNumIters"] != 4), "You've requested a non default number of tight coupling iterations but left tightCoupling: False." "Do you want to set tightCoupling to True?", "", lambda: self._assignCS("tightCoupling", True), ) self.addQuery( lambda: (not self.cs["tightCoupling"] and self.cs["tightCouplingSettings"]), "You've requested non default tight coupling settings but tightCoupling: False." "Do you want to set tightCoupling to True?", "", lambda: self._assignCS("tightCoupling", True), ) self.addQuery( lambda: self.cs["startCycle"] and self.cs["nCycles"] < self.cs["startCycle"], "nCycles must be greater than or equal to startCycle in restart cases. nCycles" " is the _total_ number of cycles in the completed run (i.e. restarted +" " continued cycles). Please update the case settings.", "", self.NO_ACTION, ) self.addQuery( lambda: self.cs["nCycles"] in [0, None], "Cannot run 0 cycles. Set burnSteps to 0 to activate a single time-independent case.", "Set 1 cycle and 0 burnSteps for single time-independent case?", self._correctCyclesToZeroBurnup, ) self.addQuery( self._checkForBothSimpleAndDetailedCyclesInputs, "If specifying detailed cycle history with `cycles`, you may not" " also use any of the simple cycle history inputs `cycleLength(s)`," " `burnSteps`, `availabilityFactor(s)`, or `powerFractions`." " Using the detailed cycle history.", "", self.NO_ACTION, ) def _factorsAreValid(factors, maxVal=1.0): try: expandedList = expandRepeatedFloats(factors) except (ValueError, IndexError): return False return all(0.0 <= val <= maxVal for val in expandedList) and len(expandedList) == self.cs["nCycles"] if self.cs["cycles"] == []: self.addQuery( lambda: (self.cs["availabilityFactors"] and not _factorsAreValid(self.cs["availabilityFactors"])), "`availabilityFactors` was not set to a list compatible with the number of cycles. " "Please update input or use constant duration.", "Use constant availability factor specified in `availabilityFactor` setting?", lambda: self._assignCS("availabilityFactors", []), ) self.addQuery( lambda: (self.cs["powerFractions"] and not _factorsAreValid(self.cs["powerFractions"])), "`powerFractions` was not set to a compatible list. " "Please update input or use full power at all cycles.", "Use full power for all cycles?", lambda: self._assignCS("powerFractions", []), ) self.addQuery( lambda: (self.cs["cycleLengths"] and not _factorsAreValid(self.cs["cycleLengths"], maxVal=1e10)), "`cycleLengths` was not set to a list compatible with the number of cycles." " Please update input or use constant duration.", "Use constant cycle length specified in `cycleLength` setting?", lambda: self._assignCS("cycleLengths", []), ) self.addQuery( lambda: ( self.cs["runType"] == operators.RunTypes.STANDARD and self.cs["burnSteps"] == 0 and ( (len(self.cs["cycleLengths"]) > 1 if self.cs["cycleLengths"] is not None else False) or self.cs["nCycles"] > 1 ) ), "Cannot run multi-cycle standard cases with 0 burnSteps per cycle. Please update settings.", "", self.NO_ACTION, ) def decayCyclesHaveInputThatWillBeIgnored(): """Check if there is any decay-related input that will be ignored.""" try: powerFracs = expandRepeatedFloats(self.cs["powerFractions"]) availabilities = expandRepeatedFloats(self.cs["availabilityFactors"]) or ( [self.cs["availabilityFactor"]] * self.cs["nCycles"] ) except Exception: return True # This will be a full decay step and any power fraction will be ignored. May be ok. return any(pf > 0.0 and af == 0.0 for pf, af in zip(powerFracs, availabilities)) self.addQuery( lambda: ( self.cs["cycleLengths"] and self.cs["powerFractions"] and decayCyclesHaveInputThatWillBeIgnored() and not self.cs["cycles"] ), "At least one cycle has a non-zero power fraction but an availability of zero. Please " "update the input.", "", self.NO_ACTION, ) self.addQuery( lambda: self.cs["skipCycles"] > 0 and not os.path.exists(self.cs.caseTitle + ".restart.dat"), "This is a restart case, but the required restart file {0}.restart.dat is not found".format( self.cs.caseTitle ), "", self.NO_ACTION, ) self.addQuery( lambda: self.cs["deferredInterfacesCycle"] > self.cs["nCycles"], "The deferred interface activation cycle exceeds set cycle occurrence. " "Interfaces will not be activated in this run!", "", self.NO_ACTION, ) self.addQuery( lambda: (self.cs[CONF_ZONE_DEFINITIONS] and self.cs[CONF_ZONES_FILE]), f"Cannot specify both {CONF_ZONE_DEFINITIONS} and {CONF_ZONES_FILE}. Please remove one and resubmit.", "", self.NO_ACTION, ) def createQueryRevertBadPathToDefault(inspector, settingName, initialLambda=None): """ Return a query to revert a bad path to its default. Parameters ---------- inspector: Inspector the inspector who's settings are being queried settingName: str name of the setting to inspect initialLambda: None or callable function If ``None``, the callable argument for :py:meth:`addQuery` is does the setting's path exist. If more complicated callable arguments are needed, they can be passed in as the ``initialLambda`` setting. """ if initialLambda is None: initialLambda = lambda: ( not os.path.exists(pathTools.armiAbsPath(inspector.cs[settingName])) and inspector.cs.getSetting(settingName).offDefault ) # solution is to revert to default query = Query( initialLambda, "Setting {} points to a nonexistent location:\n{}".format(settingName, inspector.cs[settingName]), "Revert to default location?", inspector.cs.getSetting(settingName).revertToDefault, ) return query def validateVersion(versionThis: str, versionRequired: str) -> bool: """Helper function to allow users to verify that their version matches the settings file. Parameters ---------- versionThis: str The version of this ARMI, App, or Plugin. This MUST be in the form: 1.2.3. versionRequired: str The version to compare against, say in a Settings file. This must be in one of the forms: 1.2.3, 1.2, or 1. Returns ------- bool Does this version match the version in the Settings file/object? """ fullV = r"\d+\.\d+\.\d+" medV = r"\d+\.\d+" minV = r"\d+" if versionRequired == "uncontrolled": # This default flag means we don't want to check the version. return True elif re.search(fullV, versionThis) is None: raise ValueError("The input version ({0}) does not match the required format: {1}".format(versionThis, fullV)) elif re.search(fullV, versionRequired) is not None: return versionThis == versionRequired elif re.search(medV, versionRequired) is not None: return ".".join(versionThis.split(".")[:2]) == versionRequired elif re.search(minV, versionRequired) is not None: return versionThis.split(".")[0] == versionRequired else: raise ValueError("The required version is not a valid format: {}".format(versionRequired)) ================================================ FILE: armi/settings/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/settings/tests/test_inspectors.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for settings validation system.""" import os import unittest from armi import context, operators, settings from armi.settings import settingsValidation from armi.settings.settingsValidation import createQueryRevertBadPathToDefault from armi.utils import directoryChangers class TestInspector(unittest.TestCase): def setUp(self): self.td = directoryChangers.TemporaryDirectoryChanger() self.td.__enter__() self.init_mode = context.CURRENT_MODE self.cs = settings.Settings() self.inspector = operators.getOperatorClassFromSettings(self.cs).inspector(self.cs) self.inspector.queries = [] # clear out the auto-generated ones self.filepathYaml = os.path.join(os.getcwd(), self._testMethodName + "_test_setting_io.yaml") def tearDown(self): context.Mode.setMode(self.init_mode) self.td.__exit__(None, None, None) def test_query(self): buh = {1: 2, 3: 4} def defdef(x, y, z): x[y] = z self.inspector.addQuery( lambda: buh[1] == 2, "beepbopboopbeep", "bonkbonk", lambda: defdef(buh, 1, 10), ) query = self.inspector.queries[0] if query: query.correction() self.assertEqual(buh[1], 10) self.assertFalse(query) self.assertEqual(str(query), "<Query: beepbopboopbeep>") def test_overwriteSettingsCorrectiveQuery(self): """ Tests the case where a corrective query is resolved. Checks to make sure the settings file is overwritten with the resolved setting. .. test:: Settings have validation and correction tools. :id: T_ARMI_SETTINGS_RULES0 :tests: R_ARMI_SETTINGS_RULES """ # load settings from test settings file self.cs["cycleLength"] = 300.0 self.cs.writeToYamlFile(self.filepathYaml) self.cs.loadFromInputFile(self.filepathYaml) self.assertEqual(self.cs["cycleLength"], 300.0) # define corrective query def csChange(x, y, z): x[y] = z self.inspector.addQuery( lambda: self.inspector.cs["cycleLength"] == 300.0, "Changing `cycleLength` from 300.0 to 666", ":D", lambda: csChange(self.cs, "cycleLength", 666), ) # redefine prompt function in order to circumvent need for user input def fakePrompt(*inputs): return True nominalPromptFunction = settingsValidation.prompt settingsValidation.prompt = fakePrompt try: # run inspector self.inspector.run() # check to see if file was overwritten correctly self.cs.loadFromInputFile(self.filepathYaml) self.assertEqual(self.cs["cycleLength"], 666) # check to see if original settings were saved in "_old.yaml" file oldFilePath = "{}_old.yaml".format(self.filepathYaml.split(".yaml")[0]) self.assertTrue(os.path.exists(oldFilePath) and os.path.isfile(oldFilePath)) self.csOriginal = settings.Settings() self.csOriginal.loadFromInputFile(oldFilePath) self.assertEqual(self.csOriginal["cycleLength"], 300.0) finally: # reset prompt function to nominal settingsValidation.prompt = nominalPromptFunction def test_changeOfCS(self): self.inspector.addQuery( lambda: self.inspector.cs["runType"] == "banane", "babababa", "", self.inspector.NO_ACTION, ) query = self.inspector.queries[0] self.assertFalse(query) newCS = settings.Settings().duplicate() newSettings = {"runType": "banane"} newCS = newCS.modified(newSettings=newSettings) self.inspector.cs = newCS self.assertTrue(query) self.assertIsNone(self.inspector.NO_ACTION()) def test_nonCorrectiveQuery(self): self.inspector.addQuery(lambda: True, "babababa", "", self.inspector.NO_ACTION) self.inspector.run() def test_callableCorrectionCheck(self): successes = [lambda: True, lambda: False, self.inspector.NO_ACTION] failures = [1, "", None] for correction in successes: self.inspector.addQuery(lambda: True, "", "", correction) for correction in failures: with self.assertRaises(ValueError): self.inspector.addQuery(lambda: True, "", "", correction) def test_assignCS(self): keys = sorted(self.inspector.cs.keys()) self.assertIn("nCycles", keys) def test_createQueryRevertBadPathToDefault(self): query = createQueryRevertBadPathToDefault(self.inspector, "nTasks") self.assertEqual( str(query), "<Query: Setting nTasks points to a nonexistent location:\n1>", ) def test_correctCyclesToZeroBurnup(self): self.inspector._assignCS("nCycles", 666) self.inspector._assignCS("burnSteps", 666) self.assertEqual(self.inspector.cs["nCycles"], 666) self.assertEqual(self.inspector.cs["burnSteps"], 666) self.inspector._correctCyclesToZeroBurnup() self.assertEqual(self.inspector.cs["nCycles"], 1) self.assertEqual(self.inspector.cs["burnSteps"], 0) def test_checkForSimpleAndDetailedCycInps(self): self.inspector._assignCS( "cycles", [ {"cumulative days": [1, 2, 3]}, {"cycle length": 1}, {"step days": [3, 3, 3]}, ], ) self.assertFalse(self.inspector._checkForBothSimpleAndDetailedCyclesInputs()) self.inspector._assignCS( "cycles", [ {"cumulative days": [1, 2, 3]}, {"cycle length": 1}, {"step days": [3, 3, 3]}, ], ) self.inspector._assignCS("cycleLength", 666) self.assertTrue(self.inspector._checkForBothSimpleAndDetailedCyclesInputs()) ================================================ FILE: armi/settings/tests/test_settings.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for new settings system with plugin import.""" import copy import io import logging import os import unittest import voluptuous as vol from ruamel.yaml import YAML from armi import configure, getApp, getPluginManagerOrFail, plugins, settings from armi.physics.fuelCycle import FuelHandlerPlugin from armi.physics.fuelCycle.settings import CONF_SHUFFLE_LOGIC from armi.physics.neutronics.settings import CONF_NEUTRONICS_KERNEL from armi.reactor.flags import Flags from armi.settings import caseSettings, setting from armi.settings.settingsValidation import Inspector, validateVersion from armi.tests import ARMI_RUN_PATH, TEST_ROOT, mockRunLogs from armi.utils import directoryChangers from armi.utils.customExceptions import NonexistentSetting THIS_DIR = os.path.dirname(__file__) class DummySettingPlugin1(plugins.ArmiPlugin): @staticmethod @plugins.HOOKIMPL def defineSettings(): """Define settings for the plugin.""" return [ setting.Setting( "extendableOption", default="DEFAULT", label="Neutronics Kernel", description="The neutronics / depletion solver for global flux solve.", enforcedOptions=True, options=["DEFAULT", "OTHER"], ), setting.Setting( "avocado", default=0, label="Avocados", description="Avocados are delicious.", ), ] class DummySettingPlugin2(plugins.ArmiPlugin): @staticmethod @plugins.HOOKIMPL def defineSettings(): """Define settings for the plugin.""" return [ setting.Option("PLUGIN", "extendableOption"), setting.Default("PLUGIN", "extendableOption"), ] class PluginAddsOptions(plugins.ArmiPlugin): @staticmethod @plugins.HOOKIMPL def defineSettings(): """Define settings for the plugin.""" return [ setting.Option("MCNP", CONF_NEUTRONICS_KERNEL), setting.Option("MCNP_Slab", CONF_NEUTRONICS_KERNEL), ] class TestSettings(unittest.TestCase): def setUp(self): self.cs = caseSettings.Settings() def test_updateEnvironmentSettingsFrom(self): envSettings = [ "trace", "profile", "coverage", "branchVerbosity", "moduleVerbosity", "verbosity", "outputCacheLocation", ] self.assertEqual(self.cs.environmentSettings, envSettings) newEnv = {es: 9 for es in envSettings} newEnv["moduleVerbosity"] = {} self.cs.updateEnvironmentSettingsFrom(newEnv) self.assertEqual(self.cs["verbosity"], "9") def test_metaData(self): """Test we can get and set the important settings metadata. .. test:: Test getting and setting import settings metadata. :id: T_ARMI_SETTINGS_META :tests: R_ARMI_SETTINGS_META """ # test get/set on caseTitle self.assertEqual(self.cs.caseTitle, "armi") testTitle = "test_metaData" self.cs.caseTitle = testTitle self.assertEqual(self.cs.caseTitle, testTitle) # test get/set on comment self.assertEqual(self.cs["comment"], "") testComment = "Comment: test_metaData" self.cs = self.cs.modified(newSettings={"comment": testComment}) self.assertEqual(self.cs["comment"], testComment) # test get/set on version self.assertEqual(len(self.cs["versions"]), 0) self.cs = self.cs.modified(newSettings={"versions": {"something": 1.234}}) d = self.cs["versions"] self.assertEqual(len(d), 1) self.assertEqual(d["something"], 1.234) class TestAddingOptions(unittest.TestCase): def setUp(self): self.dc = directoryChangers.TemporaryDirectoryChanger() self.dc.__enter__() # load in the plugin with extra, added options self.pm = getPluginManagerOrFail() self.pm.register(PluginAddsOptions) def tearDown(self): self.dc.__exit__(None, None, None) self.pm.unregister(PluginAddsOptions) def test_addingOptions(self): # modify the default/text settings YAML file to include neutronicsKernel fin = os.path.join(TEST_ROOT, "armiRun.yaml") txt = open(fin, "r").read() txt = txt.replace("\n nCycles:", "\n neutronicsKernel: MCNP\n nCycles:") fout = "test_addingOptions.yaml" open(fout, "w").write(txt) # this settings file should load fine, and test some basics cs = settings.Settings(fout) self.assertEqual(cs["burnSteps"], 2) self.assertEqual(cs[CONF_NEUTRONICS_KERNEL], "MCNP") def test_illDefinedOptions(self): """Test an edge case where the Setting was ill-defined.""" s = setting.Setting( "illDefinedOptions", default="DEFAULT", label="stuff", description="Whatever", enforcedOptions=True, ) self.assertIsNone(s.options) with mockRunLogs.BufferLog() as mock: self.assertIs(mock.getStdout(), "") with self.assertRaises(AttributeError): s.addOptions([1, 2]) self.assertIn("has no default options", mock.getStdout()) with mockRunLogs.BufferLog() as mock: self.assertIs(mock.getStdout(), "") with self.assertRaises(AttributeError): s.addOption(3) self.assertIn("has no default options", mock.getStdout()) class TestSettings2(unittest.TestCase): def setUp(self): # We are going to be messing with the plugin manager, which is global ARMI # state, so we back it up and restore the original when we are done. self._backupApp = copy.copy(getApp()) def tearDown(self): configure(self._backupApp, permissive=True) def test_schemaChecksType(self): newSettings = FuelHandlerPlugin.defineSettings() good_input = io.StringIO( """ assemblyRotationAlgorithm: buReducingAssemblyRotation shuffleLogic: {} """.format(__file__) ) bad_input = io.StringIO( """ assemblyRotationAlgorithm: buReducingAssemblyRotatoin """ ) yaml = YAML(typ="rt") inp = yaml.load(good_input) for inputSetting, inputVal in inp.items(): settin = [s for s in newSettings if s.name == inputSetting][0] settin.schema(inputVal) inp = yaml.load(bad_input) for inputSetting, inputVal in inp.items(): with self.assertRaises(vol.error.MultipleInvalid): settin = [s for s in newSettings if s.name == inputSetting][0] settin.schema(inputVal) def test_listsMutable(self): listSetting = setting.Setting("aList", default=[], label="Dummy list", description="whatever") listSetting.value = [1, 2, 3] self.assertEqual([1, 2, 3], listSetting.value) listSetting.value[-1] = 4 self.assertEqual([1, 2, 4], listSetting.value) def test_listCoercion(self): """Make sure list setting values get coerced right.""" listSetting = setting.Setting("aList", default=[0.2, 5], label="Dummy list", description="whatever") listSetting.value = [1, 2, 3] self.assertEqual(listSetting.value, [1.0, 2.0, 3.0]) self.assertTrue(isinstance(listSetting.value[0], float)) def test_typeDetection(self): """Ensure some of the type inference operations work.""" listSetting = setting.Setting( "aList", default=[], label="label", description="desc", schema=vol.Schema([float]), ) self.assertEqual(listSetting.containedType, float) listSetting = setting.Setting( "aList", default=[], label="label", description="desc", schema=vol.Schema([vol.Coerce(float)]), ) self.assertEqual(listSetting.containedType, float) def test_csWorks(self): """Ensure plugin settings become available and have defaults.""" cs = settings.Settings() self.assertEqual(cs["nTasks"], 1) def test_pluginValidatorsAreDiscovered(self): cs = caseSettings.Settings() cs = cs.modified( caseTitle="test_pluginValidatorsAreDiscovered", newSettings={ CONF_SHUFFLE_LOGIC: "nothere", "cycleLengths": [3, 4, 5, 6, 9], "powerFractions": [0.2, 0.2, 0.2, 0.2, 0.2], }, ) inspector = Inspector(cs) self.assertTrue(any(["Shuffling will not occur" in query.statement for query in inspector.queries])) def test_pluginSettings(self): """Test settings change depending on what plugins are registered. .. test:: Registering a plugin can change what settings exist. :id: T_ARMI_PLUGIN_SETTINGS :tests: R_ARMI_PLUGIN_SETTINGS """ pm = getPluginManagerOrFail() pm.register(DummySettingPlugin1) # We have a setting; this should be fine cs = caseSettings.Settings() self.assertEqual(cs["extendableOption"], "DEFAULT") self.assertEqual(cs["avocado"], 0) # We shouldn't have any settings from the other plugin, so this should be an error. with self.assertRaises(vol.error.MultipleInvalid): newSettings = {"extendableOption": "PLUGIN"} cs = cs.modified(newSettings=newSettings) pm.register(DummySettingPlugin2) cs = caseSettings.Settings() self.assertEqual(cs["extendableOption"], "PLUGIN") # Now we should have the option from plugin 2; make sure that works cs = cs.modified(newSettings=newSettings) cs["extendableOption"] = "PLUGIN" self.assertIn("extendableOption", cs.keys()) pm.unregister(DummySettingPlugin2) pm.unregister(DummySettingPlugin1) # Now try the same, but adding the plugins in a different order. This is to make # sure that it doesn't matter if the Setting or its Options come first pm.register(DummySettingPlugin2) pm.register(DummySettingPlugin1) cs = caseSettings.Settings() self.assertEqual(cs["extendableOption"], "PLUGIN") self.assertEqual(cs["avocado"], 0) def test_default(self): """ Make sure default updating mechanism works. .. test:: The setting default is mandatory. :id: T_ARMI_SETTINGS_DEFAULTS :tests: R_ARMI_SETTINGS_DEFAULTS """ a = setting.Setting("testsetting", 0, description="whatever") newDefault = setting.Default(5, "testsetting") a.changeDefault(newDefault) self.assertEqual(a.value, 5) def test_getSettingsSetByUser(self): cs = caseSettings.Settings() settingsList = cs.getSettingsSetByUser(ARMI_RUN_PATH) # This test is dependent on the current setup of armiRun.yaml, which includes # some default settings values for sett in ["availabilityFactor", "db"]: self.assertIn(sett, settingsList) self.assertNotIn("nTasks", settingsList) def test_setModuleVerbosities(self): # init settings and use them to set module-level logging levels cs = caseSettings.Settings() newSettings = {"moduleVerbosity": {"test_setModuleVerbosities": "debug"}} cs = cs.modified(newSettings=newSettings) # set the logger once, and check it is was set cs.setModuleVerbosities() logger = logging.getLogger("test_setModuleVerbosities") self.assertEqual(logger.level, 10) # try to set the logger again, without forcing it newSettings = {"moduleVerbosity": {"test_setModuleVerbosities": "error"}} cs = cs.modified(newSettings=newSettings) cs.setModuleVerbosities() self.assertEqual(logger.level, 10) # try to set the logger again, with force=True cs.setModuleVerbosities(force=True) self.assertEqual(logger.level, 40) def test_getFailures(self): """Make sure the correct error is thrown when getting a nonexistent setting.""" cs = caseSettings.Settings() with self.assertRaises(NonexistentSetting): cs.getSetting("missingFake") with self.assertRaises(NonexistentSetting): _ = cs["missingFake"] def test_settingIsOkayToGrab(self): cs = caseSettings.Settings() newSettings = {"cycles": [{"cumulative days": [1]}]} cs = cs.modified(newSettings=newSettings) with self.assertRaises(ValueError): _ = cs["cycleLength"] def test_modified(self): """Prove that using the modified() method does not mutate the original object.""" # init settings cs = caseSettings.Settings() # prove this setting doesn't exist with self.assertRaises(NonexistentSetting): cs.getSetting("extendableOption") # ensure that defaults in getSetting works val = cs.getSetting("extendableOption", 789) self.assertEqual(val, 789) # prove the new settings object has the new setting cs2 = cs.modified(newSettings={"extendableOption": "PLUGIN"}) self.assertEqual(cs2["extendableOption"], "PLUGIN") # prove modified() didn't alter the original object with self.assertRaises(NonexistentSetting): cs.getSetting("extendableOption") # prove that successive applications of "modified" don't fail cs3 = cs2.modified(newSettings={"numberofGenericParams": 7}) _cs4 = cs3.modified(newSettings={"somethingElse": 123}) def test_copySetting(self): """Ensure that when we copy a Setting() object, the result is sound. Notes ----- In particular, self.schema and self._customSchema on a Setting object are removed by Setting.__getstate__, and that has been a problem in the past. """ # get a baseline: show how the Setting object looks to start s1 = setting.Setting("testCopy", 765, description="whatever") self.assertEqual(s1.name, "testCopy") self.assertEqual(s1._value, 765) self.assertTrue(hasattr(s1, "schema")) self.assertTrue(hasattr(s1, "_customSchema")) # show that copy(Setting) is working correctly s2 = copy.copy(s1) self.assertEqual(s2._value, 765) self.assertEqual(s2.name, "testCopy") self.assertTrue(hasattr(s2, "schema")) self.assertTrue(hasattr(s2, "_customSchema")) def test_copySettingNotDefault(self): """Ensure that when we copy a Setting() object, the result is sound when the Setting value is set to a non-default value. """ # get a baseline: show how the Setting object looks to start s1 = setting.Setting("testCopy", 765, description="whatever") s1.value = 999 self.assertEqual(s1.name, "testCopy") self.assertEqual(s1._value, 999) self.assertTrue(hasattr(s1, "schema")) self.assertTrue(hasattr(s1, "_customSchema")) # show that copy(Setting) is working correctly s2 = copy.copy(s1) self.assertEqual(s2._value, 999) self.assertEqual(s2.name, "testCopy") self.assertTrue(hasattr(s2, "schema")) self.assertTrue(hasattr(s2, "_customSchema")) def test_empty(self): cs = caseSettings.Settings() cs = cs.modified(newSettings={"buGroups": []}) self.assertEqual(cs["buGroups"], []) class TestSettingsUtils(unittest.TestCase): """Tests for utility functions.""" def setUp(self): self.dc = directoryChangers.TemporaryDirectoryChanger() self.dc.__enter__() # Create a little case suite on the fly. Whipping it up from defaults should be # more evergreen than committing settings files as a test resource cs = caseSettings.Settings() cs.writeToYamlFile("settings1.yaml") cs.writeToYamlFile("settings2.yaml") with open("notSettings.yaml", "w") as f: f.write("some: other\nyaml: file\n") os.mkdir("subdir") cs.writeToYamlFile("subdir/settings3.yaml") cs.writeToYamlFile("subdir/skipSettings.yaml") def tearDown(self): self.dc.__exit__(None, None, None) def test_recursiveScan(self): loadedSettings = settings.recursivelyLoadSettingsFiles(".", ["*.yaml"], ignorePatterns=["skip*"]) names = {cs.caseTitle for cs in loadedSettings} self.assertIn("settings1", names) self.assertIn("settings2", names) self.assertIn("settings3", names) self.assertNotIn("skipSettings", names) loadedSettings = settings.recursivelyLoadSettingsFiles( ".", ["*.yaml"], recursive=False, ignorePatterns=["skip*"] ) names = {cs.caseTitle for cs in loadedSettings} self.assertIn("settings1", names) self.assertIn("settings2", names) self.assertNotIn("settings3", names) def test_prompt(self): selection = settings.promptForSettingsFile(1) self.assertEqual(selection, "settings1.yaml") class TestFlagListSetting(unittest.TestCase): def test_flagListSetting(self): """Test that a list of strings can be converted to a list of flags and back.""" flagsAsStringList = ["DUCT", "FUEL", "CLAD"] flagsAsFlagList = [Flags.DUCT, Flags.FUEL, Flags.CLAD] fs = setting.FlagListSetting(name="testFlagSetting", default=[], description="whatever") # Set the value as a list of strings first fs.value = flagsAsStringList self.assertEqual(fs.value, flagsAsFlagList) self.assertEqual(fs.dump(), flagsAsStringList) # Set the value as a list of flags fs.value = flagsAsFlagList self.assertEqual(fs.value, flagsAsFlagList) self.assertEqual(fs.dump(), flagsAsStringList) def test_invalidFlagListTypeError(self): """Test raising a TypeError when a list is not provided.""" fs = setting.FlagListSetting(name="testFlagSetting", default=[], description="whatever") with self.assertRaises(TypeError): fs.value = "DUCT" class TestSettingsValidationUtils(unittest.TestCase): def test_validateVersion(self): # controlled version, and true self.assertTrue(validateVersion("1.22.3", "1.22.3")) self.assertTrue(validateVersion("1.3.102", "1.3.102")) self.assertTrue(validateVersion("1.2.3", "1.2")) self.assertTrue(validateVersion("1.2.37", "1.2")) self.assertTrue(validateVersion("13.7.3", "13.7")) self.assertTrue(validateVersion("1.22.310", "1")) # uncontrolled version is always true self.assertTrue(validateVersion("4.2.0", "uncontrolled")) # controlled versions and false self.assertFalse(validateVersion("11.2.3", "11.2.4")) self.assertFalse(validateVersion("1.2.3", "3.2.1")) self.assertFalse(validateVersion("11.2.3", "2.2")) # examples of various errors with self.assertRaises(ValueError): validateVersion("1.2.a", "1.20.3") with self.assertRaises(ValueError): validateVersion("nope", "7") with self.assertRaises(ValueError): validateVersion("1.2.3", "zzz") ================================================ FILE: armi/settings/tests/test_settingsIO.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License0. """Testing the settingsIO.""" import datetime import io import os import unittest from armi import context, settings from armi.cli import entryPoint from armi.settings import setting, settingsIO from armi.tests import TEST_ROOT from armi.utils import directoryChangers from armi.utils.customExceptions import ( InvalidSettingsFileError, NonexistentSetting, SettingException, ) class SettingsFailureTests(unittest.TestCase): def test_settingsObjSetting(self): sets = settings.Settings() with self.assertRaises(NonexistentSetting): sets["idontexist"] = "this test should fail because no setting named idontexist should exist." def test_loadFromYamlFailsOnBadNames(self): ss = settings.Settings() with self.assertRaises(TypeError): ss.loadFromInputFile(None) with self.assertRaises(IOError): ss.loadFromInputFile("this-settings-file-does-not-exist.yaml") def test_invalidFile(self): with self.assertRaises(InvalidSettingsFileError): cs = settings.caseSettings.Settings() reader = settingsIO.SettingsReader(cs) reader.readFromStream(io.StringIO("useless:\n should_fail")) class SettingsReaderTests(unittest.TestCase): def setUp(self): self.cs = settings.caseSettings.Settings() def test_basicSettingsReader(self): reader = settingsIO.SettingsReader(self.cs) self.assertEqual(reader["nTasks"], 1) self.assertEqual(reader["nCycles"], 1) self.assertFalse(getattr(reader, "filelessBP")) self.assertEqual(getattr(reader, "path"), "") def test_readFromFile(self): """Read settings from a (human-readable) YAML file. .. test:: Settings can be input from a human-readable text file. :id: T_ARMI_SETTINGS_IO_TXT0 :tests: R_ARMI_SETTINGS_IO_TXT """ with directoryChangers.TemporaryDirectoryChanger(): inPath = os.path.join(TEST_ROOT, "armiRun.yaml") outPath = "test_readFromFile.yaml" txt = open(inPath, "r").read() verb = "branchVerbosity:" txt0, txt1 = txt.split(verb) newTxt = f"{txt0}{verb} fake\n {verb}{txt1}" open(outPath, "w").write(newTxt) with self.assertRaises(InvalidSettingsFileError): settings.caseSettings.Settings(outPath) class SettingsRenameTests(unittest.TestCase): testSettings = [ setting.Setting( "testSetting1", default=None, oldNames=[("oSetting1", None), ("osetting1", datetime.date.today())], description="Just a unit test setting.", ), setting.Setting( "testSetting2", default=None, oldNames=[("oSetting2", None)], description="Just a unit test setting.", ), setting.Setting( "testSetting3", default=None, description="Just a unit test setting.", ), ] def test_rename(self): renamer = settingsIO.SettingRenamer({setting.name: setting for setting in self.testSettings}) self.assertEqual(renamer.renameSetting("testSetting1"), ("testSetting1", False)) self.assertEqual(renamer.renameSetting("oSetting1"), ("testSetting1", True)) # this one is expired self.assertEqual(renamer.renameSetting("osetting1"), ("osetting1", False)) self.assertEqual(renamer.renameSetting("oSetting2"), ("testSetting2", True)) self.assertEqual(renamer.renameSetting("testSetting2"), ("testSetting2", False)) self.assertEqual(renamer.renameSetting("testSetting3"), ("testSetting3", False)) # No rename; let it through self.assertEqual(renamer.renameSetting("boo!"), ("boo!", False)) def test_collidingRenames(self): settings = { setting.name: setting for setting in self.testSettings + [ setting.Setting( "someOtherSetting", default=None, oldNames=[("oSetting1", None)], description="Just a unit test setting.", ) ] } with self.assertRaises(SettingException): _ = settingsIO.SettingRenamer(settings) class SettingsWriterTests(unittest.TestCase): def setUp(self): self.td = directoryChangers.TemporaryDirectoryChanger() self.td.__enter__() self.init_mode = context.CURRENT_MODE self.filepathYaml = os.path.join(os.getcwd(), self._testMethodName + "test_setting_io.yaml") self.cs = settings.Settings() self.cs = self.cs.modified(newSettings={"nCycles": 55}) def tearDown(self): context.Mode.setMode(self.init_mode) self.td.__exit__(None, None, None) def test_writeShort(self): """Setting output as a sparse file.""" self.cs.writeToYamlFile(self.filepathYaml, style="short") self.cs.loadFromInputFile(self.filepathYaml) txt = open(self.filepathYaml, "r").read() self.assertIn("nCycles: 55", txt) self.assertNotIn("nTasks", txt) def test_writeMedium(self): """Setting output as a sparse file that only includes defaults if they are user-specified. """ with open(self.filepathYaml, "w") as stream: # Specify a setting that is also a default self.cs.writeToYamlStream(stream, "medium", ["nTasks"]) txt = open(self.filepathYaml, "r").read() self.assertIn("nCycles: 55", txt) self.assertIn("nTasks: 1", txt) def test_writeFull(self): """Setting output as a full, all defaults included file. .. test:: Settings can be output to a human-readable text file. :id: T_ARMI_SETTINGS_IO_TXT1 :tests: R_ARMI_SETTINGS_IO_TXT """ self.cs.writeToYamlFile(self.filepathYaml, style="full") txt = open(self.filepathYaml, "r").read() self.assertIn("nCycles: 55", txt) # check a default setting self.assertIn("nTasks: 1", txt) def test_writeYaml(self): self.cs.writeToYamlFile(self.filepathYaml) self.cs.loadFromInputFile(self.filepathYaml) self.assertEqual(self.cs["nCycles"], 55) def test_errorSettingsWriter(self): with self.assertRaises(ValueError): _ = settingsIO.SettingsWriter(self.cs, "wrong") class MockEntryPoint(entryPoint.EntryPoint): name = "dummy" class SettingArgsTests(unittest.TestCase): def setUp(self): self.cs = None def test_commandLineSetting(self): ep = MockEntryPoint() self.cs = cs = ep.cs self.assertEqual(cs["nCycles"], 1) ep.createOptionFromSetting("nCycles") ep.parse_args(["--nCycles", "5"]) self.assertEqual(cs["nCycles"], 5) def test_cannotLoadSettingsAfterParsingCLI(self): self.test_commandLineSetting() with self.assertRaises(RuntimeError): self.cs.loadFromInputFile("somefile.yaml") ================================================ FILE: armi/testing/__init__.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Importable testing utilities. This is a very limited set of ARMI testing tools, meant to be importable as part of the ARMI API. The goal is to provide a small set of high quality tools to help downstream ARMI developers write tests. Notes ----- This will not be a catch-all for random unit test functions. Be very sparing here. """ import os import pickle from armi import runLog from armi.reactor import geometry, grids, reactors TEST_ROOT = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "tests")) TESTING_ROOT = os.path.dirname(os.path.abspath(__file__)) ARMI_RUN_PATH = os.path.join(TEST_ROOT, "armiRun.yaml") COMPXS_PATH = os.path.join(TESTING_ROOT, "resources", "COMPXS.ascii") ISOAA_PATH = os.path.join(TEST_ROOT, "ISOAA") _TEST_REACTORS = {} # dictionary of pickled string of test reactors (for fast caching) def loadTestReactor(inputFilePath=TEST_ROOT, customSettings=None, inputFileName="armiRun.yaml", useCache=True): """ Loads a test reactor. Can be used in other test modules. Parameters ---------- inputFilePath : str, default=TEST_ROOT Path to the directory of the input file. customSettings : dict with str keys and values of any type, default=None For each key in customSettings, the cs which is loaded from the armiRun.yaml will be overwritten to the value given in customSettings for that key. inputFileName : str, default="armiRun.yaml" Name of the input file to run. useCache : bool, default=True Look for a copy of this Reactor in the cache, if not in the cache, put it there. (Set to False when you are sure there will only be one test using this test reactor.) Notes ----- If the armiRun.yaml test reactor 3 rings instead of 9, most unit tests that use it go ~4 times faster. The problem is it would breat a LOT of downstream tests that import this method. It is still worth it though. Returns ------- o : Operator r : Reactor """ from armi import operators, settings global _TEST_REACTORS fName = os.path.abspath(os.path.join(inputFilePath, inputFileName)) customSettings = customSettings or {} reactorHash = hash(fName + str(customSettings)) if useCache and reactorHash in _TEST_REACTORS: # return test reactor from cache o, r = pickle.loads(_TEST_REACTORS[reactorHash]) o.reattach(r, o.cs) return o, r # Overwrite settings if desired cs = settings.Settings(fName=fName) if customSettings: cs = cs.modified(newSettings=customSettings) if "verbosity" not in customSettings: runLog.setVerbosity("error") o = operators.factory(cs) r = reactors.loadFromCs(cs) o.initializeInterfaces(r) o.r.core.regenAssemblyLists() if useCache: # cache it for fast load for other future tests protocol=2 allows for classes with __slots__ but not # __getstate__ to be pickled _TEST_REACTORS[reactorHash] = pickle.dumps((o, o.r), protocol=2) return o, o.r def reduceTestReactorRings(r, cs, maxNumRings): """Helper method for the test reactor above. The goal is to reduce the size of the reactor for tests that don't need such a large reactor, and would run much faster with a smaller one. """ maxRings = r.core.getNumRings() if maxNumRings > maxRings: runLog.info(f"The test reactor has a maximum of {maxRings} rings.") return elif maxNumRings <= 1: raise ValueError("The test reactor must have multiple rings.") # reducing the size of the test reactor, by removing the outer rings for ring in range(maxRings, maxNumRings, -1): r.core.removeAssembliesInRing(ring, cs) def getEmptyHexReactor(): """Make an empty hex reactor for use in tests.""" from armi.reactor import blueprints bp = blueprints.Blueprints() reactor = reactors.Reactor("Reactor", bp) reactor.add(reactors.Core("Core")) reactor.core.spatialGrid = grids.HexGrid.fromPitch(1.0) reactor.core.spatialGrid.symmetry = geometry.SymmetryType( geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC ) reactor.core.spatialGrid.geomType = geometry.HEX reactor.core.spatialGrid.armiObject = reactor.core return reactor def getEmptyCartesianReactor(pitch=(10.0, 16.0), throughCenterAssembly=True): """Return an empty Cartesian reactor for use in tests.""" from armi.reactor import blueprints bp = blueprints.Blueprints() reactor = reactors.Reactor("Reactor", bp) reactor.add(reactors.Core("Core")) reactor.core.spatialGrid = grids.CartesianGrid.fromRectangle(*pitch) reactor.core.spatialGrid.symmetry = geometry.SymmetryType( geometry.DomainType.QUARTER_CORE, geometry.BoundaryType.REFLECTIVE, throughCenterAssembly=throughCenterAssembly, ) reactor.core.spatialGrid.geomType = geometry.CARTESIAN reactor.core.spatialGrid.armiObject = reactor.core return reactor ================================================ FILE: armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml ================================================ # The comments in this file are important, as they are used # to bring in sections of this file into the tutorial in the docs. # start-block-clad blocks: fuel: &block_fuel clad: shape: Circle material: HT9 Tinput: 25.0 Thot: 450.0 id: 0.6962 od: 0.808 mult: 271 # end-block-clad wire: shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30.0 helixDiameter: 0.8888 id: 0.0 od: 0.0808 mult: 271 # end-block-wire fuel: shape: Circle material: UZr Tinput: 25.0 Thot: 500.0 id: 0.0 mult: 271 od: 0.6029 # end-block-fuel bond: shape: Circle material: Sodium Tinput: 447.0 Thot: 447.0 id: fuel.od mult: fuel.mult od: clad.id # end-block-bond duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 14.922 op: 15.710 mult: 1.0 # end-block-duct intercoolant: shape: Hexagon material: Sodium Tinput: 450.0 Thot: 450.0 ip: duct.op op: 16.142 mult: 1.0 # end-block-intercoolant coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 # end-block-coolant radial shield: &block_shield control: shape: Circle material: B4C Tinput: 597.0 Thot: 597.0 id: 0.0 od: 0.6962 mult: 271 duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 14.922 op: 15.710 mult: 1.0 intercoolant: shape: Hexagon material: Sodium Tinput: 447.0 Thot: 447.0 ip: duct.op mult: 1.0 op: 16.142 coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 # end-block-radialshield reflector: &block_reflector reflector: shape: Circle material: HT9 Tinput: 450.0 Thot: 450.0 id: 0.0 od: 0.6962 mult: 271 wire: shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30.0 helixDiameter: 0.777 id: 0.0 od: 0.0808 mult: 271 duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 14.922 op: 15.710 mult: 1.0 intercoolant: shape: Hexagon material: Sodium Tinput: 447.0 Thot: 447.0 ip: duct.op mult: 1.0 op: 16.142 coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 # end-block-reflector control: &block_control duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 14.922 op: 15.710 mult: 1.0 intercoolant: shape: Hexagon material: Sodium Tinput: 447.0 Thot: 447.0 ip: duct.op op: 16.142 mult: 1.0 coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 # end-block-control plenum: &block_plenum clad: shape: Circle material: HT9 Tinput: 25.0 Thot: 450.0 id: 0.6962 od: 0.808 mult: 271 wire: shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30.0 helixDiameter: 0.88888 id: 0.0 od: 0.0808 mult: 271 gap: shape: Circle material: Void Tinput: 450.0 Thot: 450.0 id: 0.0 od: clad.id mult: 271 duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 14.922 op: 15.710 mult: 1.0 intercoolant: shape: Hexagon material: Sodium Tinput: 447.0 Thot: 447.0 ip: duct.op mult: 1.0 op: 16.142 coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 # end-block-plenum assemblies: heights: &heights - 15.0 - 20.32 - 20.32 - 20.32 - 20.32 - 20.32 - 191.14 axial mesh points: &mesh - 1 - 2 - 2 - 2 - 2 - 2 - 8 # end-assemblies-common inner fuel: specifier: IC blocks: &fuel_blocks - *block_reflector - *block_fuel - *block_fuel - *block_fuel - *block_fuel - *block_fuel - *block_plenum height: *heights axial mesh points: *mesh material modifications: U235_wt_frac: - '' - 0.127 - 0.127 - 0.127 - 0.127 - 0.127 - '' xs types: &IC_xs - A - A - A - A - A - A - A # end-assemblies-ic middle core fuel: specifier: MC blocks: *fuel_blocks height: *heights axial mesh points: *mesh material modifications: U235_wt_frac: - '' - 0.153 - 0.153 - 0.153 - 0.153 - 0.153 - '' xs types: - B - B - B - B - B - B - B # end-assemblies-mc outer core fuel: specifier: OC blocks: *fuel_blocks height: *heights axial mesh points: *mesh material modifications: U235_wt_frac: - '' - 0.180 - 0.180 - 0.180 - 0.180 - 0.180 - '' xs types: - C - C - C - C - C - C - C # end-assemblies-oc radial reflector: specifier: RR blocks: [*block_reflector] height: [307.74] axial mesh points: [1] xs types: [A] # end-assemblies-rr radial shield: specifier: SH blocks: [*block_shield] height: [307.74] axial mesh points: [1] xs types: [A] # end-assemblies-sh control: specifier: PC blocks: [*block_control] height: [307.74] axial mesh points: [1] xs types: [A] ultimate shutdown: specifier: US blocks: [*block_control] height: [307.74] axial mesh points: [1] xs types: [A] # end-assemblies-section systems: core: grid name: core origin: x: 0.0 y: 0.0 z: 0.0 grids: core: !include anl-afci-177-coreMap.yaml # end-systems-section ================================================ FILE: armi/testing/reactors/anl-afci-177/anl-afci-177-coreMap.yaml ================================================ geom: hex symmetry: third periodic lattice map: | - SH SH SH - SH SH SH SH SH RR RR RR SH RR RR RR RR SH RR RR RR RR RR SH RR OC OC RR RR SH OC OC OC RR RR SH OC OC OC OC RR RR OC MC OC OC RR SH MC MC PC OC RR SH MC MC MC OC OC RR MC MC MC OC RR SH PC MC MC OC RR SH MC MC MC MC OC RR IC MC MC OC RR SH IC US MC OC RR IC IC MC OC RR SH IC MC MC OC RR IC IC MC PC RR SH ================================================ FILE: armi/testing/reactors/anl-afci-177/anl-afci-177-fuelManagement.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from armi.physics.fuelCycle.fuelHandlers import FuelHandler from armi.utils import units class SampleShuffler(FuelHandler): def chooseSwaps(self, shuffleParameters): cycleSeconds = self.r.p.cycleLength * self.r.p.availabilityFactor * units.SECONDS_PER_DAY for a in self.r.core: peakFlux = a.getMaxParam("fastFlux") if peakFlux * cycleSeconds > 4.0e23: newAssem = self.r.core.createAssemblyOfType(a.getType()) self.dischargeSwap(newAssem, a) def getFactorList(self, cycle, cs=None): """Parameters here can be used to adjust shuffling philosophy vs. cycle.""" return {}, [] ================================================ FILE: armi/testing/reactors/anl-afci-177/anl-afci-177.yaml ================================================ # This file is part of the walthrough_inputs tutorial in ARMI, which # uses .. literalinclude to bring in sections of this file. Thus, # the comments and order are important. These will get wiped out # if you load and re-write a settings file via the ARMI gui, unfortunately. # begin-settings settings: availabilityFactor: 0.9 power: 1000000000.0 cycleLength: 411.11 # end-section-1 loadingFile: anl-afci-177-blueprints.yaml shuffleLogic: anl-afci-177-fuelManagement.py fuelHandlerName: SampleShuffler # end-section-2 nCycles: 10 burnSteps: 2 # end-section-3 buGroups: - 100 comment: ANL-AFCI-177 CR 1.0 metal core but with HALEU instead of TRU genXS: Neutron nTasks: 1 versions: armi: uncontrolled ================================================ FILE: armi/testing/reactors/c5g7/c5g7-blueprints.yaml ================================================ # Simple description of the C5G7 benchmark problem # General description from: https://www.oecd-nea.org/upload/docs/application/pdf/2019-12/nsc-doc2003-16.pdf # Composition/dimensions description from: https://www.oecd-nea.org/upload/docs/application/pdf/2020-01/nsc-doc96-02-rev2.pdf # start-custom-isotopics custom isotopics: # NEA/NSC/DOC(96)2 Table 2 - Isotopic Distributions for each medium mox low: # 4.3% input format: number densities U235: 5.00E-5 U238: 2.21E-2 PU238: 1.50E-5 PU239: 5.80E-4 PU240: 2.40E-4 PU241: 9.80E-5 PU242: 5.40E-5 AM241: 1.30E-5 O: 4.63E-2 mox medium: # 7.0% input format: number densities U235: 5.00E-5 U238: 2.21E-2 PU238: 2.40E-5 PU239: 9.30E-4 PU240: 3.90E-4 PU241: 1.52E-4 PU242: 8.40E-5 AM241: 2.00E-5 O: 4.63E-2 mox high: # 8.7% input format: number densities U235: 5.00E-5 U238: 2.21E-2 PU238: 3.00E-5 PU239: 1.16E-3 PU240: 4.90E-4 PU241: 1.90E-4 PU242: 1.05E-4 AM241: 2.50E-5 O: 4.63E-2 UO2: input format: number densities U235: 8.65e-4 U238: 2.225E-2 O: 4.622E-2 moderator: input format: number densities H: 6.70e-2 O: 3.35E-2 B: 2.78E-5 Zr clad: input format: number densities ZR: 4.30E-2 Al clad: input format: number densities AL27: 6.00e-2 fission chamber: # NEA/NSC/DOC(96)2 Documents: # "Central guide tube contains: moderator (as defined in Table 2) # and 1.0E-8 at/(b cm) of U-235" input format: number densities H: 6.70e-2 O: 3.35E-2 B: 2.78E-5 U235: 1.0e-8 # end-custom-isotopics blocks: uo2: &block_uo2 # NEA/NSC/DOC(96)2 Table 1 - Cell geometries grid name: UO2 grid fuel: shape: Circle material: UO2 isotopics: UO2 Tinput: 20.0 Thot: 20.0 od: .8190 latticeIDs: [U] gap 1: &fuel_gap_1 shape: Circle material: Void Tinput: 20.0 Thot: 20.0 id: fuel.od od: zirconium clad.id latticeIDs: [U] zirconium clad: &clad_Zr shape: Circle material: Custom isotopics: Zr clad Tinput: 20.0 Thot: 20.0 id: .8360 od: .9500 latticeIDs: [U] gap 2: &fuel_gap_2 shape: Circle material: Void Tinput: 20.0 Thot: 20.0 id: zirconium clad.od od: aluminum clad.id latticeIDs: [U] aluminum clad: &clad_Al # NEA/NSC/DOC(96)2 Documents: # "This clad is used to simulate hot conditions at room temperature # (decrease the moderation ratio)" shape: Circle material: Custom isotopics: Al clad Tinput: 20.0 Thot: 20.0 id: .9700 od: 1.0800 latticeIDs: [U] moderator: &moderator shape: DerivedShape material: SaturatedWater isotopics: moderator Tinput: 450.0 Thot: 450.0 # Moderator within the guide tube inner moderator guide tube: &guide_tube_moderator shape: Circle material: SaturatedWater isotopics: moderator Tinput: 20.0 Thot: 20.0 od: guide tube.id latticeIDs: [GT] guide tube: &guide_tube shape: Circle material: Custom isotopics: Al clad Tinput: 20.0 Thot: 20.0 id: .6800 od: 1.0800 latticeIDs: [GT] fission chamber guide tube: &fission_chamber_guide_tube <<: *guide_tube # Avoid giving this the same flag as "guide tube" by implementing # a custom flag. This is done to distinguish the "fission chamber guide tube" # from the regular "guide tube". This demonstrates the use of setting # flags directly rather than relying on them to be implied based on the # name. flags: fission chamber structure latticeIDs: [FC] fission chamber: &fission_chamber shape: Circle material: Custom isotopics: fission chamber Tinput: 20.0 Thot: 20.0 od: .8190 # No documentation fission chamber dims of composition latticeIDs: [FC] inner moderator FC: &fission_chamber_mod # No documentation of this either, but assuming fission chamber # has same od as fuel, so there needs to be something in the gap. shape: Circle material: Void Tinput: 20.0 Thot: 20.0 id: fission chamber.od od: guide tube.id latticeIDs: [FC] pitch: &pitch # dummy component for assembly sizing shape: Square material: Void Tinput: 20.0 Thot: 20.0 widthInner: 21.42 widthOuter: 21.42 mult: 1.0 # end-block-uo2 mox: &block_mox grid name: MOX grid mox low fuel: shape: Circle material: UO2 isotopics: mox low Tinput: 20.0 Thot: 20.0 od: .8190 latticeIDs: [ML] mox medium fuel: shape: Circle material: UO2 isotopics: mox medium Tinput: 20.0 Thot: 20.0 od: .8190 latticeIDs: [MM] mox high fuel: shape: Circle material: UO2 isotopics: mox high Tinput: 20.0 Thot: 20.0 od: .8190 latticeIDs: [MH] void 1: <<: *fuel_gap_1 id: mox low fuel.od latticeIDs: [ML, MM, MH] zirconium clad: <<: *clad_Zr latticeIDs: [ML, MM, MH] void 2: <<: *fuel_gap_2 latticeIDs: [ML, MM, MH] aluminum clad: # See Aluminum Clad note above about why there are 2 clads. <<: *clad_Al latticeIDs: [ML, MM, MH] moderator: *moderator inner moderator GT: *guide_tube_moderator guide tube: *guide_tube fission chamber guide tube: *fission_chamber_guide_tube fission chamber: *fission_chamber moderator fission chamber: *fission_chamber_mod pitch: *pitch # end-block-mox moderator: &block_mod moderator: shape: Square material: SaturatedWater isotopics: moderator Tinput: 20.0 Thot: 20.0 widthOuter: 21.42 mult: 1.0 # end-block-mod assemblies: heights: &heights - 64.26 - 64.26 - 64.26 - 21.42 axial mesh points: &mesh - 3 - 3 - 3 - 2 UO2: flags: fuel specifier: UO2 blocks: - *block_uo2 - *block_uo2 - *block_uo2 - *block_mod height: *heights axial mesh points: *mesh xs types: [A, A, A, A] mox: flags: fuel specifier: MOX blocks: - *block_mox - *block_mox - *block_mox - *block_mod height: *heights axial mesh points: *mesh xs types: [A, A, A, A] mod: specifier: MOD blocks: - *block_mod - *block_mod - *block_mod - *block_mod height: *heights axial mesh points: *mesh xs types: [A, A, A, A] # end-assemblies systems: core: grid name: core origin: x: 0.0 y: 0.0 z: 0.0 # end-systems grids: core: symmetry: quarter reflective geom: cartesian lattice pitch: x: 21.42 y: 21.42 lattice map: | MOD MOD MOD MOX UO2 MOD UO2 MOX MOD # end-grid-core UO2 grid: symmetry: full geom: cartesian lattice pitch: x: 1.26 y: 1.26 lattice map: | U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U GT U U GT U U GT U U U U U U U U GT U U U U U U U U U GT U U U U U U U U U U U U U U U U U U U U U U GT U U GT U U GT U U GT U U GT U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U GT U U GT U U FC U U GT U U GT U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U GT U U GT U U GT U U GT U U GT U U U U U U U U U U U U U U U U U U U U U U GT U U U U U U U U U GT U U U U U U U U GT U U GT U U GT U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U U # end-grid-UO2 MOX grid: symmetry: full geom: cartesian lattice pitch: x: 1.26 y: 1.26 lattice map: | ML ML ML ML ML ML ML ML ML ML ML ML ML ML ML ML ML ML MM MM MM MM MM MM MM MM MM MM MM MM MM MM MM ML ML MM MM MM MM GT MM MM GT MM MM GT MM MM MM MM ML ML MM MM GT MM MH MH MH MH MH MH MH MM GT MM MM ML ML MM MM MM MH MH MH MH MH MH MH MH MH MM MM MM ML ML MM GT MH MH GT MH MH GT MH MH GT MH MH GT MM ML ML MM MM MH MH MH MH MH MH MH MH MH MH MH MM MM ML ML MM MM MH MH MH MH MH MH MH MH MH MH MH MM MM ML ML MM GT MH MH GT MH MH FC MH MH GT MH MH GT MM ML ML MM MM MH MH MH MH MH MH MH MH MH MH MH MM MM ML ML MM MM MH MH MH MH MH MH MH MH MH MH MH MM MM ML ML MM GT MH MH GT MH MH GT MH MH GT MH MH GT MM ML ML MM MM MM MH MH MH MH MH MH MH MH MH MM MM MM ML ML MM MM GT MM MH MH MH MH MH MH MH MM GT MM MM ML ML MM MM MM MM GT MM MM GT MM MM GT MM MM MM MM ML ML MM MM MM MM MM MM MM MM MM MM MM MM MM MM MM ML ML ML ML ML ML ML ML ML ML ML ML ML ML ML ML ML ML # end-grid-MOX nuclide flags: H: {burn: false, xs: true} O: burn: false xs: true expandTo: ["O16", "O17"] # O18 is not in many nuclear data sets. B: {burn: false, xs: true} AL: {burn: false, xs: true} ZR: {burn: false, xs: true} U235: {burn: false, xs: true} U238: {burn: false, xs: true} PU238: {burn: false, xs: true} PU239: {burn: false, xs: true} PU240: {burn: false, xs: true} PU241: {burn: false, xs: true} PU242: {burn: false, xs: true} AM241: {burn: false, xs: true} # end-nucflags ================================================ FILE: armi/testing/reactors/c5g7/c5g7-settings.yaml ================================================ settings: # global availabilityFactor: 0.9 buGroups: - 100 burnSteps: 2 comment: C5G7 LWR Benchmark inputs cycleLength: 411.11 loadingFile: c5g7-blueprints.yaml nCycles: 10 nTasks: 1 power: 1000000000.0 versions: armi: uncontrolled # database db: true # neutronics genXS: Neutron # report genReports: false ================================================ FILE: armi/testing/reactors/godiva/godiva-blueprints.yaml ================================================ nuclide flags: PU237: {burn: false, xs: true, expandTo: []} PU240: {burn: false, xs: true, expandTo: []} PU241: {burn: false, xs: true, expandTo: []} AR: {burn: false, xs: true, expandTo: []} PA233: {burn: false, xs: true, expandTo: []} NP238: {burn: false, xs: true, expandTo: []} AR36: {burn: false, xs: true, expandTo: []} TH230: {burn: false, xs: true, expandTo: []} AR38: {burn: false, xs: true, expandTo: []} U238: {burn: false, xs: true, expandTo: []} U239: {burn: false, xs: true, expandTo: []} C: {burn: false, xs: true, expandTo: []} LFP35: {burn: false, xs: true, expandTo: []} U233: {burn: false, xs: true, expandTo: []} U234: {burn: false, xs: true, expandTo: []} U235: {burn: false, xs: true, expandTo: []} U236: {burn: false, xs: true, expandTo: []} U237: {burn: false, xs: true, expandTo: []} PU239: {burn: false, xs: true, expandTo: []} PU238: {burn: false, xs: true, expandTo: []} TH234: {burn: false, xs: true, expandTo: []} TH232: {burn: false, xs: true, expandTo: []} AR40: {burn: false, xs: true, expandTo: []} LFP39: {burn: false, xs: true, expandTo: []} DUMP2: {burn: false, xs: true, expandTo: []} LFP41: {burn: false, xs: true, expandTo: []} LFP40: {burn: false, xs: true, expandTo: []} PU242: {burn: false, xs: true, expandTo: []} PU236: {burn: false, xs: true, expandTo: []} U232: {burn: false, xs: true, expandTo: []} DUMP1: {burn: false, xs: true, expandTo: []} LFP38: {burn: false, xs: true, expandTo: []} AM243: {burn: false, xs: true, expandTo: []} PA231: {burn: false, xs: true, expandTo: []} CM244: {burn: false, xs: true, expandTo: []} CM242: {burn: false, xs: true, expandTo: []} AM242: {burn: false, xs: true, expandTo: []} CM245: {burn: false, xs: true, expandTo: []} CM243: {burn: false, xs: true, expandTo: []} CM246: {burn: false, xs: true, expandTo: []} CM247: {burn: false, xs: true, expandTo: []} O: {burn: false, xs: true, expandTo: [O16]} N: {burn: false, xs: true, expandTo: [N14]} ZR: {burn: false, xs: true, expandTo: []} custom isotopics: {} blocks: {} assemblies: heights: - 3.5 - 3.5 - 3.5 - 3.5 - 3.5 axial mesh points: - 5 - 5 - 5 - 5 - 5 assembly1_1: specifier: assembly1_1 blocks: - name: block1_1_1 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 3.001 inner_radius: 0.0 mult: 0.9226919412612915 compliment: shape: RadialSegment material: Air Tinput: 0.0 Thot: 0.0 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 3.001 inner_radius: 0.0 mult: 0.0773080587387085 - name: block1_1_2 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 3.001 inner_radius: 0.0 mult: 1.0 - name: block1_1_3 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 3.001 inner_radius: 0.0 mult: 1.0 - name: block1_1_4 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 3.001 inner_radius: 0.0 mult: 1.0 - name: block1_1_5 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 3.001 inner_radius: 0.0 mult: 0.9271114468574524 compliment: shape: RadialSegment material: Air Tinput: 0.0 Thot: 0.0 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 3.001 inner_radius: 0.0 mult: 0.07288855314254761 height: - 3.5 - 3.5 - 3.5 - 3.5 - 3.5 axial mesh points: - 5 - 5 - 5 - 5 - 5 radial mesh points: 2 azimuthal mesh points: 7 material modifications: U235_wt_frac: - 0.9371 - 0.9371 - 0.9371 - 0.9371 - 0.9371 ZR_wt_frac: - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 xs types: - A - A - A - A - A assembly2_1: specifier: assembly2_1 blocks: - name: block2_1_1 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 6.002 inner_radius: 3.001 mult: 0.5954532027244568 compliment: shape: RadialSegment material: Air Tinput: 0.0 Thot: 0.0 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 6.002 inner_radius: 3.001 mult: 0.4045467972755432 - name: block2_1_2 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 6.002 inner_radius: 3.001 mult: 1.0 - name: block2_1_3 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 6.002 inner_radius: 3.001 mult: 1.0 - name: block2_1_4 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 6.002 inner_radius: 3.001 mult: 1.0 - name: block2_1_5 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 6.002 inner_radius: 3.001 mult: 0.5924441814422607 compliment: shape: RadialSegment material: Air Tinput: 0.0 Thot: 0.0 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 6.002 inner_radius: 3.001 mult: 0.40755581855773926 height: - 3.5 - 3.5 - 3.5 - 3.5 - 3.5 axial mesh points: - 5 - 5 - 5 - 5 - 5 radial mesh points: 2 azimuthal mesh points: 7 material modifications: U235_wt_frac: - 0.9371 - 0.9371 - 0.9371 - 0.9371 - 0.9371 ZR_wt_frac: - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 xs types: - A - A - A - A - A assembly3_1: specifier: assembly3_1 blocks: - name: block3_1_1 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 9.0 inner_radius: 6.002 mult: 0.046154800802469254 compliment: shape: RadialSegment material: Air Tinput: 0.0 Thot: 0.0 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 9.0 inner_radius: 6.002 mult: 0.9538451991975307 - name: block3_1_2 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 9.0 inner_radius: 6.002 mult: 0.6035306453704834 compliment: shape: RadialSegment material: Air Tinput: 0.0 Thot: 0.0 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 9.0 inner_radius: 6.002 mult: 0.3964693546295166 - name: block3_1_3 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 9.0 inner_radius: 6.002 mult: 0.8756284713745117 compliment: shape: RadialSegment material: Air Tinput: 0.0 Thot: 0.0 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 9.0 inner_radius: 6.002 mult: 0.12437152862548828 - name: block3_1_4 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 9.0 inner_radius: 6.002 mult: 0.5993080139160156 compliment: shape: RadialSegment material: Air Tinput: 0.0 Thot: 0.0 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 9.0 inner_radius: 6.002 mult: 0.4006919860839844 - name: block3_1_5 godiva: shape: RadialSegment material: UZr Tinput: 26.85 Thot: 26.85 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 9.0 inner_radius: 6.002 mult: 0.04680449143052101 compliment: shape: RadialSegment material: Air Tinput: 0.0 Thot: 0.0 outer_theta: 0.7853981633974483 height: 3.5 inner_theta: 0.0 outer_radius: 9.0 inner_radius: 6.002 mult: 0.953195508569479 height: - 3.5 - 3.5 - 3.5 - 3.5 - 3.5 axial mesh points: - 5 - 5 - 5 - 5 - 5 radial mesh points: 2 azimuthal mesh points: 7 material modifications: U235_wt_frac: - 0.9371 - 0.9371 - 0.9371 - 0.9371 - 0.9371 ZR_wt_frac: - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 xs types: - A - A - A - A - A systems: core: grid name: core origin: x: 0.0 y: 0.0 z: 0.0 grids: core: geom: thetarz lattice map: grid bounds: r: - 0.0 - 3.001 - 6.002 - 9.0 theta: - 0.0 - 0.7853981633974483 z: - -8.75 - -5.25 - -1.7500000000000002 - 1.7500000000000002 - 5.25 - 8.75 symmetry: eighth periodic grid contents: ? - 0 - 0 : assembly1_1 ? - 0 - 1 : assembly2_1 ? - 0 - 2 : assembly3_1 ================================================ FILE: armi/testing/reactors/godiva/godiva.armi.unittest.yaml ================================================ settings: acceptableBlockAreaError: 0.0001 burnSteps: 0 comment: Bare, Highly Enriched Uranium Sphere crossSectionControl: AA: geometry: 0D validBlockTypes: - fuel blockRepresentation: FluxWeightedAverage criticalBuckling: true genReports: false genXS: Neutron groupStructure: ARMI45 loadingFile: godiva-blueprints.yaml neutronicsKernel: DIF3D-FD neutronicsOutputsToSave: All neutronicsType: both nTasks: 36 outers: 200 power: 0.001 verbosity: debug versions: armi: uncontrolled ================================================ FILE: armi/testing/reactors/smallHexReactor/smallHexReactor-bp.yaml ================================================ # A small, hex-based, full-core reactor nuclide flags: U234: burn: true xs: true expandTo: U235: burn: true xs: true expandTo: U236: burn: true xs: true expandTo: U238: burn: true xs: true expandTo: NP237: burn: true xs: true expandTo: NP238: burn: true xs: true expandTo: PU236: burn: true xs: true expandTo: PU238: burn: true xs: true expandTo: PU239: burn: true xs: true expandTo: PU240: burn: true xs: true expandTo: PU241: burn: true xs: true expandTo: PU242: burn: true xs: true expandTo: AM241: burn: true xs: true expandTo: AM242: burn: true xs: true expandTo: AM243: burn: true xs: true expandTo: CM242: burn: true xs: true expandTo: CM243: burn: true xs: true expandTo: CM244: burn: true xs: true expandTo: CM245: burn: true xs: true expandTo: CM246: burn: true xs: true expandTo: CM247: burn: true xs: true expandTo: LFP35: burn: true xs: true expandTo: LFP38: burn: true xs: true expandTo: LFP39: burn: true xs: true expandTo: LFP40: burn: true xs: true expandTo: LFP41: burn: true xs: true expandTo: DUMP1: burn: true xs: true expandTo: DUMP2: burn: true xs: true expandTo: B10: burn: false xs: true expandTo: B11: burn: false xs: true expandTo: ZR: burn: false xs: true expandTo: C: burn: false xs: true expandTo: SI: burn: false xs: true expandTo: V: burn: false xs: true expandTo: CR: burn: false xs: true expandTo: MN: burn: false xs: true expandTo: FE: burn: false xs: true expandTo: NI: burn: false xs: true expandTo: MO: burn: false xs: true expandTo: W: burn: false xs: true expandTo: NA: burn: false xs: true expandTo: HE: burn: false xs: true expandTo: N: burn: false xs: true expandTo: - N14 - N15 S: burn: false xs: true expandTo: - S32 - S33 - S34 - S36 P: burn: false xs: true expandTo: - P31 NB: burn: false xs: true expandTo: - NB93 CO: burn: false xs: true expandTo: - CO59 CU: burn: false xs: true expandTo: - CU63 - CU65 SN: burn: false xs: true expandTo: - SN112 - SN114 - SN115 - SN116 - SN117 - SN118 - SN119 - SN120 - SN122 - SN124 - SN126 BI: burn: false xs: true expandTo: - BI209 AL: burn: false xs: true expandTo: - AL27 PB: burn: false xs: true expandTo: - PB204 - PB206 - PB207 - PB208 O: burn: false xs: true expandTo: - O16 AS: burn: false xs: true expandTo: - AS75 TA: burn: false xs: true expandTo: [] TI: burn: false xs: true expandTo: - TI46 - TI47 - TI48 - TI49 - TI50 BE: burn: false xs: true expandTo: - BE9 SB: burn: false xs: true expandTo: - SB121 - SB123 Y: burn: false xs: true expandTo: RU: burn: false xs: true expandTo: - RU96 - RU98 - RU99 - RU100 - RU101 - RU102 - RU104 PD: burn: false xs: true expandTo: - PD102 - PD104 - PD105 - PD106 - PD108 - PD110 RH: burn: false xs: true expandTo: - RH103 B: burn: true xs: true expandTo: - B10 - B11 blocks: fuel: &block_fuel clad1: shape: Circle material: HT9 Tinput: 25.0 Thot: 450.0 id: 0.6962 od: 0.808 latticeIDs: - 1 # end-block-clad wire1: shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30.0 helixDiameter: 0.8888 id: 0.0 od: 0.0808 latticeIDs: - 1 # end-block-wire fuel1: shape: Circle material: UO2 Tinput: 25.0 Thot: 500.0 id: 0.0 od: 0.6029 latticeIDs: - 1 # end-block-fuel bond1: shape: Circle material: Sodium Tinput: 447.0 Thot: 447.0 id: fuel1.od od: clad1.id latticeIDs: - 1 # end-block-bond duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 14.922 op: 15.710 mult: 1.0 # end-block-duct intercoolant: shape: Hexagon material: Sodium Tinput: 450.0 Thot: 450.0 ip: duct.op op: 16.142 mult: 1.0 # end-block-intercoolant coolant: shape: DerivedShape material: Sodium Tinput: 200.0 Thot: 450.0 # end-block-coolant radial shield: &block_shield control: shape: Circle material: B4C Tinput: 597.0 Thot: 597.0 id: 0.0 od: 0.6962 mult: 271 duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 14.922 op: 15.710 mult: 1.0 intercoolant: shape: Hexagon material: Sodium Tinput: 447.0 Thot: 447.0 ip: duct.op mult: 1.0 op: 16.142 coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 # end-block-radialshield reflector: &block_reflector reflector: shape: Circle material: HT9 Tinput: 450.0 Thot: 450.0 id: 0.0 od: 0.6962 mult: 271 wire: shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30.0 helixDiameter: 0.777 id: 0.0 od: 0.0808 mult: 271 duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 14.922 op: 15.710 mult: 1.0 intercoolant: shape: Hexagon material: Sodium Tinput: 447.0 Thot: 447.0 ip: duct.op mult: 1.0 op: 16.142 coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 plenum: &block_plenum clad: shape: Circle material: HT9 Tinput: 25.0 Thot: 450.0 id: 0.6962 od: 0.808 mult: 271 wire: shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30.0 helixDiameter: 0.88888 id: 0.0 od: 0.0808 mult: 271 gap: shape: Circle material: Void Tinput: 450.0 Thot: 450.0 id: 0.0 od: clad.id mult: 271 duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 14.922 op: 15.710 mult: 1.0 intercoolant: shape: Hexagon material: Sodium Tinput: 447.0 Thot: 447.0 ip: duct.op mult: 1.0 op: 16.142 coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 # end-block-plenum assemblies: heights: &heights - 15.0 - 20.32 - 20.32 - 20.32 - 20.32 - 20.32 - 191.14 axial mesh points: &mesh - 1 - 2 - 2 - 2 - 2 - 2 - 8 # end-assemblies-common inner fuel: specifier: IC blocks: &fuel_blocks - *block_reflector - *block_fuel - *block_fuel - *block_fuel - *block_fuel - *block_fuel - *block_plenum height: *heights axial mesh points: *mesh xs types: &IC_xs - A - A - A - A - A - A - A # end-assemblies-ic middle core fuel: specifier: MC blocks: *fuel_blocks height: *heights axial mesh points: *mesh material modifications: TD_frac: - '' - 0.153 - 0.153 - 0.153 - 0.153 - 0.153 - '' xs types: - B - B - B - B - B - B - B # end-assemblies-mc # end-assemblies-section systems: core: grid name: core origin: x: 0.0 y: 0.0 z: 0.0 grids: core: geom: hex_corners_up symmetry: full lattice map: | - MC MC MC - MC IC IC IC IC IC IC IC IC IC IC IC IC IC IC IC # end-systems-section ================================================ FILE: armi/testing/reactors/smallHexReactor/smallHexReactor.yaml ================================================ # A simple test reactor, not physicially functional # # * pin-type reactor with hex assemblies # * sodium-cooled, fast # * full core symmetry # * The core grid is corners up # * The symmetric core positions have different assembly types # * The different assembly types have different amounts of molesHmBOL because they had different TD_frac material modifications # * There are 3 rings (necessary for checking problems with symmetry because the assemblies in the 2nd ring don't actually fall along the symmetry line that is checked) settings: # global availabilityFactor: 0.9 buGroups: - 100 burnSteps: 2 comment: Small, full core test reactor. cycleLength: 400.0 loadingFile: smallHexReactor-bp.yaml nCycles: 1 nTasks: 1 power: 1000000000.0 verbosity: warning versions: armi: uncontrolled # neutronics genXS: Neutron ================================================ FILE: armi/testing/reactors/thirdSmallHexReactor/thirdSmallHexReactor-bp.yaml ================================================ # A small, hex-based, full-core reactor nuclide flags: U234: burn: true xs: true expandTo: U235: burn: true xs: true expandTo: U236: burn: true xs: true expandTo: U238: burn: true xs: true expandTo: NP237: burn: true xs: true expandTo: NP238: burn: true xs: true expandTo: PU236: burn: true xs: true expandTo: PU238: burn: true xs: true expandTo: PU239: burn: true xs: true expandTo: PU240: burn: true xs: true expandTo: PU241: burn: true xs: true expandTo: PU242: burn: true xs: true expandTo: AM241: burn: true xs: true expandTo: AM242: burn: true xs: true expandTo: AM243: burn: true xs: true expandTo: CM242: burn: true xs: true expandTo: CM243: burn: true xs: true expandTo: CM244: burn: true xs: true expandTo: CM245: burn: true xs: true expandTo: CM246: burn: true xs: true expandTo: CM247: burn: true xs: true expandTo: LFP35: burn: true xs: true expandTo: LFP38: burn: true xs: true expandTo: LFP39: burn: true xs: true expandTo: LFP40: burn: true xs: true expandTo: LFP41: burn: true xs: true expandTo: DUMP1: burn: true xs: true expandTo: DUMP2: burn: true xs: true expandTo: B10: burn: false xs: true expandTo: B11: burn: false xs: true expandTo: ZR: burn: false xs: true expandTo: C: burn: false xs: true expandTo: SI: burn: false xs: true expandTo: V: burn: false xs: true expandTo: CR: burn: false xs: true expandTo: MN: burn: false xs: true expandTo: FE: burn: false xs: true expandTo: NI: burn: false xs: true expandTo: MO: burn: false xs: true expandTo: W: burn: false xs: true expandTo: NA: burn: false xs: true expandTo: HE: burn: false xs: true expandTo: N: burn: false xs: true expandTo: - N14 - N15 S: burn: false xs: true expandTo: - S32 - S33 - S34 - S36 P: burn: false xs: true expandTo: - P31 NB: burn: false xs: true expandTo: - NB93 CO: burn: false xs: true expandTo: - CO59 CU: burn: false xs: true expandTo: - CU63 - CU65 SN: burn: false xs: true expandTo: - SN112 - SN114 - SN115 - SN116 - SN117 - SN118 - SN119 - SN120 - SN122 - SN124 - SN126 BI: burn: false xs: true expandTo: - BI209 AL: burn: false xs: true expandTo: - AL27 PB: burn: false xs: true expandTo: - PB204 - PB206 - PB207 - PB208 O: burn: false xs: true expandTo: - O16 AS: burn: false xs: true expandTo: - AS75 TA: burn: false xs: true expandTo: [] TI: burn: false xs: true expandTo: - TI46 - TI47 - TI48 - TI49 - TI50 BE: burn: false xs: true expandTo: - BE9 SB: burn: false xs: true expandTo: - SB121 - SB123 Y: burn: false xs: true expandTo: RU: burn: false xs: true expandTo: - RU96 - RU98 - RU99 - RU100 - RU101 - RU102 - RU104 PD: burn: false xs: true expandTo: - PD102 - PD104 - PD105 - PD106 - PD108 - PD110 RH: burn: false xs: true expandTo: - RH103 B: burn: true xs: true expandTo: - B10 - B11 blocks: fuel: &block_fuel clad1: shape: Circle material: HT9 Tinput: 25.0 Thot: 450.0 id: 0.6962 od: 0.808 latticeIDs: - 1 # end-block-clad wire1: shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30.0 helixDiameter: 0.8888 id: 0.0 od: 0.0808 latticeIDs: - 1 # end-block-wire fuel1: shape: Circle material: UO2 Tinput: 25.0 Thot: 500.0 id: 0.0 od: 0.6029 latticeIDs: - 1 # end-block-fuel bond1: shape: Circle material: Sodium Tinput: 447.0 Thot: 447.0 id: fuel1.od od: clad1.id latticeIDs: - 1 # end-block-bond duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 14.922 op: 15.710 mult: 1.0 # end-block-duct intercoolant: shape: Hexagon material: Sodium Tinput: 450.0 Thot: 450.0 ip: duct.op op: 16.142 mult: 1.0 # end-block-intercoolant coolant: shape: DerivedShape material: Sodium Tinput: 200.0 Thot: 450.0 # end-block-coolant radial shield: &block_shield control: shape: Circle material: B4C Tinput: 597.0 Thot: 597.0 id: 0.0 od: 0.6962 mult: 271 duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 14.922 op: 15.710 mult: 1.0 intercoolant: shape: Hexagon material: Sodium Tinput: 447.0 Thot: 447.0 ip: duct.op mult: 1.0 op: 16.142 coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 # end-block-radialshield reflector: &block_reflector reflector: shape: Circle material: HT9 Tinput: 450.0 Thot: 450.0 id: 0.0 od: 0.6962 mult: 271 wire: shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30.0 helixDiameter: 0.777 id: 0.0 od: 0.0808 mult: 271 duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 14.922 op: 15.710 mult: 1.0 intercoolant: shape: Hexagon material: Sodium Tinput: 447.0 Thot: 447.0 ip: duct.op mult: 1.0 op: 16.142 coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 plenum: &block_plenum clad: shape: Circle material: HT9 Tinput: 25.0 Thot: 450.0 id: 0.6962 od: 0.808 mult: 271 wire: shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30.0 helixDiameter: 0.88888 id: 0.0 od: 0.0808 mult: 271 gap: shape: Circle material: Void Tinput: 450.0 Thot: 450.0 id: 0.0 od: clad.id mult: 271 duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 14.922 op: 15.710 mult: 1.0 intercoolant: shape: Hexagon material: Sodium Tinput: 447.0 Thot: 447.0 ip: duct.op mult: 1.0 op: 16.142 coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 # end-block-plenum assemblies: heights: &heights - 15.0 - 20.32 - 191.14 axial mesh points: &mesh - 1 - 2 - 8 # end-assemblies-common inner fuel: specifier: IC blocks: &fuel_blocks - *block_reflector - *block_fuel - *block_plenum height: *heights axial mesh points: *mesh xs types: &IC_xs - A - A - A # end-assemblies-ic middle core fuel: specifier: MC blocks: *fuel_blocks height: *heights axial mesh points: *mesh material modifications: TD_frac: - '' - 0.153 - '' xs types: - B - B - B # end-assemblies-mc # end-assemblies-section systems: core: grid name: core origin: x: 0.0 y: 0.0 z: 0.0 grids: core: geom: hex_corners_up symmetry: third periodic lattice map: | MC MC IC IC IC IC IC # end-systems-section ================================================ FILE: armi/testing/reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml ================================================ # A simple test reactor, not physicially functional # # * pin-type reactor with hex assemblies # * sodium-cooled, fast # * third-core symmetry # * The core grid is corners up # * The symmetric core positions have different assembly types # * The different assembly types have different amounts of molesHmBOL because they had different TD_frac material modifications # * There are 3 rings (necessary for checking problems with symmetry because the assemblies in the 2nd ring don't actually fall along the symmetry line that is checked) settings: # global availabilityFactor: 0.9 buGroups: - 100 burnSteps: 2 comment: Small, third-core test reactor. cycleLength: 400.0 loadingFile: thirdSmallHexReactor-bp.yaml nCycles: 1 nTasks: 1 power: 1000000000.0 startCycle: 1 startNode: 2 verbosity: error versions: armi: uncontrolled # neutronics genXS: Neutron genReports: false summarizeAssemDesign: false ================================================ FILE: armi/testing/resources/armiRun-SHUFFLES.yaml ================================================ sequence: 1: &cycle_1 - &shuffle_1_9_45 cascade: ["igniter fuel", "009-045", "008-004", "007-001", "006-005"] fuelEnrichment: [0, 0.12, 0.14, 0.15, 0] - extraRotations: {"009-045": 60} - &shuffle_1_4_4 cascade: ["middle fuel", "004-004", "005-005", "006-006", "Delete"] fuelEnrichment: [0, 0.12, 0.14, 0.15, 0] 2: - *shuffle_1_9_45 - *shuffle_1_4_4 - extraRotations: {"009-045": 60} - cascade: ["SFP", "005-003", "SFP"] ringPosCycle: [6, 5, 0] 3: - *shuffle_1_9_45 - swap: ["009-045", "008-004"] - swap: ["007-001", "006-005"] - cascade: ["SFP", "002-002", "SFP"] ringPosCycle: [5, 3, 1] ================================================ FILE: armi/testing/singleMixedAssembly.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io from armi.reactor.blueprints import Blueprints from armi.settings import Settings BLOCK_DEFINITIONS_2PIN = """ blocks: grid plate: &block_grid_plate grid: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 15.277 mult: 1.0 op: 16.577 coolant: &component_coolant shape: DerivedShape material: Sodium Tinput: 25.0 Thot: 450.0 intercoolant: shape: Hexagon material: Sodium Tinput: 25.0 Thot: 450.0 ip: grid.op mult: 1.0 op: 19.0 duct: &block_duct coolant: *component_coolant duct: &component_duct shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 18.0 mult: 1.0 op: 18.5 intercoolant: &component_intercoolant shape: Hexagon material: Sodium Tinput: 25.0 Thot: 450.0 ip: duct.op mult: 1.0 op: 19.0 axial shield twoPin: &block_fuel_multiPin_axial_shield grid name: twoPin shield: &component_shield_shield1 shape: Circle material: HT9 Tinput: 25.0 Thot: 600.0 id: 0.0 od: 0.86602 latticeIDs: [1] bond: &component_shield_bond1 shape: Circle material: Sodium Tinput: 25.0 Thot: 470.0 id: shield.od od: clad.id latticeIDs: [1] clad: &component_shield_clad1 shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 od: 1.09 latticeIDs: [1] wire: &component_shield_wire1 shape: Helix material: HT9 Tinput: 25.0 Thot: 470.0 axialPitch: 30.15 helixDiameter: 1.19056 id: 0.0 od: 0.10056 latticeIDs: [1] shield test: <<: *component_shield_shield1 latticeIDs: [2] bond test: <<: *component_shield_bond1 id: shield test.od od: clad test.id latticeIDs: [2] clad test: <<: *component_shield_clad1 latticeIDs: [2] coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant axial expansion target component: shield fuel twoPin: &block_fuel_multiPin grid name: twoPin fuel: &component_fuelmultiPin shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 od: 0.86602 latticeIDs: [1] bond: &component_fuelmultiPin_bond shape: Circle material: Sodium Tinput: 25.0 Thot: 470.0 id: fuel.od od: clad.id latticeIDs: [1] clad: &component_fuelmultiPin_clad1 shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 od: 1.09 latticeIDs: [1] wire: &component_fuelmultiPin_wire1 shape: Helix material: HT9 Tinput: 25.0 Thot: 470.0 axialPitch: 30.15 helixDiameter: 1.19056 id: 0.0 od: 0.10056 latticeIDs: [1] fuel test: &component_fuelmultiPin_fuel2 <<: *component_fuelmultiPin latticeIDs: [2] bond test: &component_fuelmultiPin_bond2 <<: *component_fuelmultiPin_bond id: fuel test.od od: clad test.id latticeIDs: [2] clad test: &component_fuelmultiPin_clad2 <<: *component_fuelmultiPin_clad1 latticeIDs: [2] coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant axial expansion target component: fuel plenum 2pin: &block_plenum_multiPin grid name: twoPin gap: &component_plenummultiPin_gap1 shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: 0.0 od: clad.id latticeIDs: [1] clad: *component_fuelmultiPin_clad1 wire: *component_fuelmultiPin_wire1 gap test: <<: *component_plenummultiPin_gap1 od: clad test.id latticeIDs: [2] clad test: *component_fuelmultiPin_clad2 coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant axial expansion target component: clad test mixed fuel plenum 2pin: &block_mixed_multiPin grid name: twoPin gap: *component_plenummultiPin_gap1 clad: *component_fuelmultiPin_clad1 wire: *component_fuelmultiPin_wire1 fuel test: *component_fuelmultiPin_fuel2 bond test: *component_fuelmultiPin_bond2 clad test: *component_fuelmultiPin_clad2 coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant axial expansion target component: fuel test aclp plenum 2pin: &block_aclp_multiPin <<: *block_plenum_multiPin SodiumBlock: &block_dummy flags: dummy coolant: shape: Hexagon material: Sodium Tinput: 25.0 Thot: 450.0 ip: 0.0 mult: 1.0 op: 19.0 """ BLOCK_DEFINITIONS_3PIN = """ blocks: grid plate: &block_grid_plate grid: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 15.277 mult: 1.0 op: 16.577 coolant: &component_coolant shape: DerivedShape material: Sodium Tinput: 25.0 Thot: 450.0 intercoolant: shape: Hexagon material: Sodium Tinput: 25.0 Thot: 450.0 ip: grid.op mult: 1.0 op: 19.0 duct: &block_duct coolant: *component_coolant duct: &component_duct shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 18.0 mult: 1.0 op: 18.5 intercoolant: &component_intercoolant shape: Hexagon material: Sodium Tinput: 25.0 Thot: 450.0 ip: duct.op mult: 1.0 op: 19.0 axial shield threePin: &block_fuel_multiPin_axial_shield grid name: threePin shield: &component_shield_shield1 shape: Circle material: HT9 Tinput: 25.0 Thot: 600.0 id: 0.0 od: 0.86602 latticeIDs: [1] bond: &component_shield_bond1 shape: Circle material: Sodium Tinput: 25.0 Thot: 470.0 id: shield.od od: clad.id latticeIDs: [1] clad: &component_shield_clad1 shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 od: 1.09 latticeIDs: [1] wire: &component_shield_wire1 shape: Helix material: HT9 Tinput: 25.0 Thot: 470.0 axialPitch: 30.15 helixDiameter: 1.19056 id: 0.0 od: 0.10056 latticeIDs: [1] shield test: <<: *component_shield_shield1 latticeIDs: [2] bond test: <<: *component_shield_bond1 id: shield test.od od: clad test.id latticeIDs: [2] clad test: <<: *component_shield_clad1 latticeIDs: [2] annular void: &shield_annular_void shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: 0.0 od: annular shield test.id latticeIDs: [3] annular shield test: shape: Circle material: HT9 Tinput: 25.0 Thot: 600.0 id: 0.600 od: 0.950 latticeIDs: [3] gap1: shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: annular shield test.od od: liner.id latticeIDs: [3] liner: shape: Circle material: Zr Tinput: 25.0 Thot: 600.0 id: 0.950 od: 1.000 latticeIDs: [3] gap2: shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: liner.od od: annular clad test.id latticeIDs: [3] annular clad test: shape: Circle material: HT9 Tinput: 25.0 Thot: 600.0 id: 1.000 od: 1.090 latticeIDs: [3] coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant axial expansion target component: shield fuel threePin: &block_fuel_multiPin grid name: threePin fuel: &component_fuelmultiPin shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 od: 0.86602 latticeIDs: [1] bond: &component_fuelmultiPin_bond shape: Circle material: Sodium Tinput: 25.0 Thot: 470.0 id: fuel.od od: clad.id latticeIDs: [1] clad: &component_fuelmultiPin_clad1 shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 od: 1.09 latticeIDs: [1] wire: &component_fuelmultiPin_wire1 shape: Helix material: HT9 Tinput: 25.0 Thot: 470.0 axialPitch: 30.15 helixDiameter: 1.19056 id: 0.0 od: 0.10056 latticeIDs: [1] fuel test: &component_fuelmultiPin_fuel2 <<: *component_fuelmultiPin latticeIDs: [2] bond test: &component_fuelmultiPin_bond2 <<: *component_fuelmultiPin_bond id: fuel test.od od: clad test.id latticeIDs: [2] clad test: &component_fuelmultiPin_clad2 <<: *component_fuelmultiPin_clad1 latticeIDs: [2] annular void: &fuel_annular_void <<: *shield_annular_void od: annular fuel test.id annular fuel test: &fuel_annular_test shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.600 od: 0.950 latticeIDs: [3] gap1: &annular_test_gap1 shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: annular fuel test.od od: liner.id latticeIDs: [3] liner: &liner shape: Circle material: Zr Tinput: 25.0 Thot: 600.0 id: 0.950 od: 1.000 latticeIDs: [3] gap2: &annular_test_gap2 shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: liner.od od: annular clad test.id latticeIDs: [3] annular clad test: &annular_clad_test shape: Circle material: HT9 Tinput: 25.0 Thot: 600.0 id: 1.000 od: 1.090 latticeIDs: [3] coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant axial expansion target component: fuel plenum 3pin: &block_plenum_multiPin grid name: threePin gap: &component_plenummultiPin_gap1 shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: 0.0 od: clad.id latticeIDs: [1] clad: *component_fuelmultiPin_clad1 wire: *component_fuelmultiPin_wire1 gap test: <<: *component_plenummultiPin_gap1 od: clad test.id latticeIDs: [2] clad test: *component_fuelmultiPin_clad2 annular void: <<: *fuel_annular_void od: liner.id latticeIDs: [3] liner: *liner gap2: *annular_test_gap2 annular clad test: *annular_clad_test coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant axial expansion target component: clad test mixed fuel plenum 3pin: &block_mixed_multiPin grid name: threePin gap: *component_plenummultiPin_gap1 clad: *component_fuelmultiPin_clad1 wire: *component_fuelmultiPin_wire1 fuel test: *component_fuelmultiPin_fuel2 bond test: *component_fuelmultiPin_bond2 clad test: *component_fuelmultiPin_clad2 annular void: *fuel_annular_void annular fuel test: *fuel_annular_test gap1: *annular_test_gap1 liner: *liner gap2: *annular_test_gap2 annular clad test: *annular_clad_test coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant axial expansion target component: fuel test aclp plenum 3pin: &block_aclp_multiPin <<: *block_plenum_multiPin SodiumBlock: &block_dummy flags: dummy coolant: shape: Hexagon material: Sodium Tinput: 25.0 Thot: 450.0 ip: 0.0 mult: 1.0 op: 19.0 """ REGULAR_ASSEMBLY_DEF = """ assemblies: multi pin fuel: specifier: LA blocks: [*block_grid_plate, *block_fuel_multiPin_axial_shield, *block_fuel_multiPin, *block_fuel_multiPin, *block_fuel_multiPin, *block_mixed_multiPin, *block_aclp_multiPin, *block_plenum_multiPin, *block_duct, *block_dummy] height: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] axial mesh points: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] material modifications: U235_wt_frac: ['', '', 0.2, 0.2, 0.2, 0.2, '', '', '', ''] ZR_wt_frac: ['', '', 0.07, 0.07, 0.07, 0.07, '', '', '', ''] xs types: [A, A, B, C, C, D, A, A, A, A] """ # noqa: E501 GRID_DEFINITION = """ grids: core: geom: hex symmetry: third periodic lattice map: LA twoPin: geom: hex_corners_up symmetry: full lattice map: | - 2 1 2 1 2 1 2 threePin: geom: hex_corners_up symmetry: full lattice map: | - 2 1 3 1 3 1 2 """ def buildMixedPinAssembly( blockDefs: str = BLOCK_DEFINITIONS_2PIN, assemDef: str = REGULAR_ASSEMBLY_DEF, gridDef: str = GRID_DEFINITION, ): """Builds a hex-shaped mixed-pin assembly for a sodium fast reactor. This assembly consists of 2 pin types arranged as specified in the lattice map. """ completeBlueprints = blockDefs + assemDef + gridDef cs = Settings() with io.StringIO(completeBlueprints) as stream: blueprints = Blueprints.load(stream) blueprints._prepConstruction(cs) return list(blueprints.assemblies.values())[0] def buildMixedThreePinAssembly( blockDefs: str = BLOCK_DEFINITIONS_3PIN, assemDef: str = REGULAR_ASSEMBLY_DEF, gridDef: str = GRID_DEFINITION, ): """Builds a hex-shaped mixed-pin assembly for a sodium fast reactor. This assembly consists of 3 pin types arranged as specified in the lattice map. """ completeBlueprints = blockDefs + assemDef + gridDef cs = Settings() with io.StringIO(completeBlueprints) as stream: blueprints = Blueprints.load(stream) blueprints._prepConstruction(cs) return list(blueprints.assemblies.values())[0] ================================================ FILE: armi/testing/symmetryTesting.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing utilities for symmetry. Symmetry factor usage can be difficult to verify across multiple plugins, and plugins may write one-off fixes for situations involving the symmetry factor. The utilities provided here are an attempt to catch symmetry factor issues at the unit test level, rather than during integration tests. The goal of this utility is to test symmetry intent, not functionality. This means individual implementations of symmetry-aware operations are still responsible for testing the implemetation. This module serves as a check that the parameters that are expected to change with symmetry do indeed change. This might be obvious, but this test CANNOT detect errors where the parameter is not either: 1) Labeled as a symmetry-aware parameter in the parameter definition. 2) Labeled as a symmetry-aware parameter in the test. Failing to do at least one of the above will result in passing symmetry tests. The tests here use the `growToFullCore` since that should be one of the most mature symmetry-aware operations. This module provides the `BasicArmiSymmetryTestHelper` which is meant to be inherited into a downstream unit test. The test helper uses the `SymmetryFactorTester` to handle the bookkeeping tasks associated with testing symmetry. """ import unittest from contextlib import contextmanager from typing import TYPE_CHECKING, Any, Iterable, Union from armi.testing import loadTestReactor if TYPE_CHECKING: from armi.reactor import Core, parameters from armi.reactor.assemblies import Assembly from armi.reactor.blocks import Block class BasicArmiSymmetryTestHelper(unittest.TestCase): """ Customizable test runner for symmetry-intent audit. This class is meant to be customized in a plugin to check the plugin-specific symmetry-aware parameters. To use the test fixture, make a subclass test and assign the `*ParamsToTest` and `expectedSymmetric*` attributes in the `setUp` method of the subclass. The subclass must have `super.setUp()` in it's `setUp` method at some point after the necessary plugin attributes are assigned. It should generally not be necessary for the plugin to implement any further unit tests, the parent class contains a test method that should adequately verify the the expected symmetric parameters are indeed expanded. Attributes ---------- coreParamsToTest : Iterable[str] | armi.reactor.parameters.parameterDefinitionCollection, optional Core parameters that should be initialized and tested. assemblyParamsToTest : Iterable[str] | armi.reactor.parameters.parameterDefinitionCollection, optional Assembly parameters that should be initialized and tested. blockParamsToTest : Iterable[str] | armi.reactor.parameters.parameterDefinitionCollection, optional Block parameters that should be initialized and tested. expectedSymmetricCoreParams : Iterable[str], optional Core parameters that are expected to change with symmetry. expectedSymmetricAssemblyParams : Iterable[str], optional Assembly parameters that are expected to change with symmetry. expectedSymmetricBlockParams : Iterable[str], optional Block Parameters that are expected to change with symmetry. parameterOverrides : dict[str: Any], optional Dictionary of specific values to assign to a particular parameter. Useful for parameters that have validators. paramsToIgnore : Iterable[str], optional Parameter names to ignore the comparison results for. customSettings : dict[str: Any] Dictionary of custom settings that is passed to the test reactor builder. Useful for disabling features that require additional input and are not useful for the symmetry audit. Example ------- class MySymmetryTest(symmetryTesting.BasicArmiSymmetryTestHelper): def setUp(): # Tests are configured using attributes. Attributes must be set prior to calling super.setUp() # Note that it is not required to set any attributes, all have empty defaults # Repeat for self.coreParamsToTest and self.assemblyParamsToTest as necessary: self.blockParamsToTest = [p if isinstance(p, str) else p.name for p in getPluginBlockParameterDefinitions()] # Repeat for self.expectedSymmetricCoreParams and self.expectedSymmetricAssemblyParams as necessary: self.expectedSymmetricBlockParams = ["mySymmetricBlockParam1", "mySymmetricBlockParam2"] # Set specific parameter overrides if the parameters need a specific value (usually due to input validators) self.parameterOverrides = {"parameterName1": value1, "parameterName2": value2} # Set specific parameters to ignore in comparison. self.paramsToIgnore = ["myIgnoredParameter"] # Finish setting up the tests by calling the parent's `setUp` method. super.setUp() """ def __init__(self, methodName="runTest"): self.coreParamsToTest = [] self.assemblyParamsToTest = [] self.blockParamsToTest = [] self.expectedSymmetricCoreParams = [] self.expectedSymmetricAssemblyParams = [] self.expectedSymmetricBlockParams = [] self.parameterOverrides = {} self.paramsToIgnore = [] self.customSettings = {} super().__init__(methodName) def setUp(self): self._preprocessPluginParams() self.symTester = SymmetryFactorTester(self) def _preprocessPluginParams(self): """Parameters can be provided as string names or whole parameter objects, need to convert to string name.""" self.coreParamsToTest = [p if isinstance(p, str) else p.name for p in self.coreParamsToTest] self.assemblyParamsToTest = [p if isinstance(p, str) else p.name for p in self.assemblyParamsToTest] self.blockParamsToTest = [p if isinstance(p, str) else p.name for p in self.blockParamsToTest] self.expectedSymmetricCoreParams = [ p if isinstance(p, str) else p.name for p in self.expectedSymmetricCoreParams ] self.expectedSymmetricAssemblyParams = [ p if isinstance(p, str) else p.name for p in self.expectedSymmetricAssemblyParams ] self.expectedSymmetricBlockParams = [ p if isinstance(p, str) else p.name for p in self.expectedSymmetricBlockParams ] def test_defaultSymmetry(self): self.symTester.runSymmetryFactorTests( expectedCoreParams=self.expectedSymmetricCoreParams, expectedAssemblyParams=self.expectedSymmetricAssemblyParams, expectedBlockParams=self.expectedSymmetricBlockParams, ) class SymmetryFactorTester: """ A test runner for symmetry factors. This class does the actual symmetry testing, but there is a lot of bookkeeping that isn't important to expose in the test helper class so putting it here helps keep the BasicArmiSymmetryTestHelper clean. """ def __init__(self, armiSymmetryTester: BasicArmiSymmetryTestHelper): self.o, self.r = loadTestReactor(customSettings=armiSymmetryTester.customSettings) self.core = self.r.core # there is exactly one assembly with 3-symmetry in the test core self.partialAssembly = [a for a in self.r.core.getAssemblies() if a.getSymmetryFactor() == 3][0] self.partialBlock = self.partialAssembly.getBlocks()[0] # expectedSymmetry describes the ratio of (post-expansion / pre-expansion) values self.expectedSymmetryRatio = 3 self.defaultParameterValue = 2 # some parameters have validation on their inputs and need specific settings self.parameterOverrides = armiSymmetryTester.parameterOverrides self.testObject = armiSymmetryTester self.coreParamsToTest = armiSymmetryTester.coreParamsToTest self.assemblyParamsToTest = armiSymmetryTester.assemblyParamsToTest self.blockParamsToTest = armiSymmetryTester.blockParamsToTest self._initializeCore() self._initializeAssembly() self._initializeBlock() # Some parameters change because of symmetry but are not "volume integrated" # so this marks them for skipping in the compare. # Also allows plugins the flexibility to skip some parameters if needed. self.paramsToIgnore = armiSymmetryTester.paramsToIgnore @staticmethod def _getParameters(obj: object, paramList: Iterable[str]): return {param: obj.p[param] for param in paramList} @staticmethod def _getParamNamesFromDefs(pdefs: "parameters.ParameterDefinitionCollection"): return set([p.name for p in pdefs]) def _initializeCore(self): self._initializeParameters(self.coreParamsToTest, self.core) def _initializeAssembly(self): self._initializeParameters(self.assemblyParamsToTest, self.partialAssembly) def _initializeBlock(self): self._initializeParameters(self.blockParamsToTest, self.partialBlock) def _initializeParameters(self, parameterNames, obj: Union["Core", "Assembly", "Block"]): """ Load values into each parameter. The values generally do not need to be the correct types (see Notes) because this test fixture is for auditing intent, not capability. The capability of the expansion functions to expand different types correctly should be part of the tests for those functions. Parameters ---------- parameterNames : Iterable[str] Iterable of string parameter names to initialize on the object. obj : armi.reactor.Core | armi.reactor.assemblies.Assembly | armi.reactor.blocks.Block The object on which to initialize parameter values. Notes ----- Some parameters are specifically adjusted here because inspecting their types does not yield usable results for setting the values. Current specific settings are: xsType: must be an iterable of strings. xsTypeNum: must be an integer corresponding to an ASCII character in the range of what is acceptable for xsType. notes: must be a string with length less than 1000 characters. """ for p in parameterNames: name = str(p) if name in self.parameterOverrides.keys(): obj.p[name] = self.parameterOverrides[name] else: obj.p[name] = self.defaultParameterValue def _compareParameters( self, referenceParameters: dict[str:Any], perturbedParameters: dict[str:Any], expectedParameters: Iterable[str], scopeName: str, ): """ Run the comparison of reference parameters vs the perturbed parameters. Tests: 1. Parameters that change after core expansion are in the list of parameters expected to change. 2. All parameters in the list of parameters expected to change do indeed change by the expected ratio. """ for paramName, perturbedValue in perturbedParameters.items(): referenceValue = referenceParameters[paramName] if referenceValue != perturbedValue and paramName not in self.paramsToIgnore: self.testObject.assertIn( paramName, expectedParameters, f"The value of {paramName} on the {scopeName} changed from {referenceValue} to {perturbedValue} but" " is not specified in the parameters expected to change.", ) if paramName in expectedParameters: ratio = perturbedParameters[paramName] / referenceParameters[paramName] self.testObject.assertEqual( ratio, self.expectedSymmetryRatio, f"The after-to-before expansion ratio of parameter '{paramName}' was expected to be " f"{self.expectedSymmetryRatio} but was instead {ratio} for the {scopeName}.", ) @contextmanager def _checkCore(self, expectedParams: Iterable[str]): coreReferenceParameters = self._getParameters(self.core, self.coreParamsToTest) yield # yield to allow the core to be expanded corePerturbedParameters = self._getParameters(self.core, self.coreParamsToTest) self._compareParameters(coreReferenceParameters, corePerturbedParameters, expectedParams, "core") @contextmanager def _checkAssembly(self, expectedParams: Iterable[str]): assemblyReferenceParameters = self._getParameters(self.partialAssembly, self.assemblyParamsToTest) yield # yield to allow the core to be expanded assemblyPerturbedParameters = self._getParameters(self.partialAssembly, self.assemblyParamsToTest) self._compareParameters(assemblyReferenceParameters, assemblyPerturbedParameters, expectedParams, "assembly") @contextmanager def _checkBlock(self, expectedParams: Iterable[str]): blockReferenceParameters = self._getParameters(self.partialBlock, self.blockParamsToTest) yield # yield to allow the core to be expanded blockPerturbedParameters = self._getParameters(self.partialBlock, self.blockParamsToTest) self._compareParameters(blockReferenceParameters, blockPerturbedParameters, expectedParams, "block") def runSymmetryFactorTests( self, expectedCoreParams: Iterable[str] = [], expectedAssemblyParams: Iterable[str] = [], expectedBlockParams: Iterable[str] = [], ): """ Runs tests on how symmetry factors apply to parameters during partial-to-full core coversions and vice-versa. This method provides a convenient way for plugins to test that symmetry factors are applied correctly to flagged parameters when the core is converted. Parameters ---------- testObject : unittest.TestCase The TestCase object is injected to give this fixture the ability to do unittest asserts without causing the fixture itself to be run as a unit test. coreParams : Iterable[str], optional Dictionary of core parameters that the user expects to be symmetry aware. assemblyParams : Iterable[str], optional Dictionary of assembly parameters that the user expects to be symmetry aware. blockParams : Iterable[str], optional Dictionary of block parameters that the user expects to be symmetry aware. """ with ( self._checkCore(expectedCoreParams), self._checkAssembly(expectedAssemblyParams), self._checkBlock(expectedBlockParams), ): converter = self.r.core.growToFullCore(self.o.cs) self.expectedSymmetryRatio = 1 / 3 with ( self._checkCore(expectedCoreParams), self._checkAssembly(expectedAssemblyParams), self._checkBlock(expectedBlockParams), ): converter.restorePreviousGeometry() ================================================ FILE: armi/testing/tests/__init__.py ================================================ # Copyright 2026 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/testing/tests/test_symmetryTesting.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the symmetry testing fixture.""" from armi.testing import symmetryTesting class SymmetryTestFixtureTester(symmetryTesting.BasicArmiSymmetryTestHelper): """Run the basic symmetry test helper with some input known to raise errors.""" def setUp(self): self.blockParamsToTest = ["zbottom", "massHmBOL"] self.expectedSymmetricBlockParams = ["massHmBOL"] self.parameterOverrides = {"xsType": ["A"], "xsTypeNum": 65, "notes": ""} return super().setUp() def test_errorWhenExpandedButNotRequested(self): if ( len( self.expectedSymmetricCoreParams + self.expectedSymmetricAssemblyParams + self.expectedSymmetricBlockParams ) > 0 ): with self.assertRaises(AssertionError) as err: self.symTester.runSymmetryFactorTests() self.assertIn(f"The value of {self.expectedSymmetricBlockParams} on the", err.msg) def test_errorWhenRequestedButNotExpanded(self): with self.assertRaises(AssertionError) as err: targetParam = self.blockParamsToTest[0] self.symTester.runSymmetryFactorTests(expectedBlockParams=targetParam) self.assertIn(f"The after-to-before expansion ratio of parameter '{targetParam}'", err.msg) ================================================ FILE: armi/tests/1DslabXSByCompTest.yaml ================================================ nuclide flags: NA23: {burn: false, xs: true} FE: {burn: false, xs: true} U235: {burn: false, xs: true} U238: {burn: false, xs: true} PU239: {burn: false, xs: true} PU240: {burn: false, xs: true} PU241: {burn: false, xs: true} custom isotopics: eUranium: input format: number densities U235: 0.025 U238: 0.02 PuUranium: input format: number densities PU239: 0.02 PU240: 0.0075 PU241: 0.0025 U238: 0.015 depletedUranium: input format: number densities U238: 0.045 sodium: input format: number densities NA23: 0.02 structuralSteel: input format: number densities FE: 0.07 eUraniumHalf: input format: number densities U235: 0.0125 U238: 0.01 blocks: eu fuel block: &block_eufuelblock depleted_uranium: &component_eufuelblock_depleted_uranium shape: SolidRectangle material: Custom Tinput: 20.0 Thot: 20.0 isotopics: depletedUranium lengthOuter: 1.0 mult: 1.0 widthOuter: 1.0 enriched_uranium fuel: shape: SolidRectangle material: Custom Tinput: 20.0 Thot: 20.0 isotopics: eUranium lengthOuter: 1.0 mult: 1.0 widthOuter: 2.0 sodium: &component_eufuelblock_sodium shape: SolidRectangle material: Custom Tinput: 20.0 Thot: 20.0 isotopics: sodium lengthOuter: 1.0 mult: 1.0 widthOuter: 1.0 iron: &component_eufuelblock_iron shape: SolidRectangle material: Custom Tinput: 20.0 Thot: 20.0 isotopics: structuralSteel lengthOuter: 1.0 mult: 1.0 widthOuter: 4.0 latticeboundarycell: &component_eufuelblock_latticeboundarycell shape: Rectangle material: Void Tinput: 20.0 Thot: 20.0 lengthInner: 1.0 lengthOuter: 1.0 mult: 1.0 widthInner: 8.0 widthOuter: 8.0 reversedeu fuel block: &block_reversedeufuelblock iron: *component_eufuelblock_iron sodium: *component_eufuelblock_sodium enriched_uranium fuel: shape: SolidRectangle material: Custom Tinput: 20.0 Thot: 20.0 isotopics: eUraniumHalf lengthOuter: 1.0 mult: 1.0 widthOuter: 2.0 depleted_uranium: *component_eufuelblock_depleted_uranium latticeboundarycell: *component_eufuelblock_latticeboundarycell inheritseublocks: &block_inheritseublocks sodium: shape: SolidRectangle material: Custom Tinput: 20.0 Thot: 20.0 isotopics: sodium lengthOuter: 1.0 mult: 1.0 widthOuter: 0.5 pu(fuel): shape: SolidRectangle material: Custom Tinput: 20.0 Thot: 20.0 isotopics: PuUranium lengthOuter: 1.0 mult: 1.0 widthOuter: 3.0 iron: shape: SolidRectangle material: Custom Tinput: 20.0 Thot: 20.0 isotopics: structuralSteel lengthOuter: 1.0 mult: 1.0 widthOuter: 1.0 pu(fuel)2: shape: SolidRectangle material: Custom Tinput: 20.0 Thot: 20.0 isotopics: PuUranium lengthOuter: 1.0 mult: 1.0 widthOuter: 0.5 iron2: shape: SolidRectangle material: Custom Tinput: 20.0 Thot: 20.0 isotopics: structuralSteel lengthOuter: 1.0 mult: 1.0 widthOuter: 3.0 latticeboundarycell: *component_eufuelblock_latticeboundarycell blanket fuel block: &block_blanketfuelblock depleted_uranium fuel 1: *component_eufuelblock_depleted_uranium sodium: shape: SolidRectangle material: Custom Tinput: 20.0 Thot: 20.0 isotopics: sodium lengthOuter: 1.0 mult: 1.0 widthOuter: 6.0 depleted_uranium(fuel)2: *component_eufuelblock_depleted_uranium latticeboundarycell: *component_eufuelblock_latticeboundarycell reflectorblockinheritsblanket: &block_reflectorblockinheritsblanket iron: shape: SolidRectangle material: Custom Tinput: 20.0 Thot: 20.0 isotopics: structuralSteel lengthOuter: 1.0 mult: 1.0 widthOuter: 8.0 latticeboundarycell: *component_eufuelblock_latticeboundarycell assemblies: heights: &standard_heights [10.0, 30.0, 30.0, 15.0, 15.0, 30.0, 30.0, 10.0] axial mesh points: &standard_axial_mesh_points [1, 2, 2, 1, 1, 2, 2, 1] feed fuel: specifier: D1 blocks: [ *block_reflectorblockinheritsblanket, *block_blanketfuelblock, *block_eufuelblock, *block_inheritseublocks, *block_reversedeufuelblock, *block_eufuelblock, *block_blanketfuelblock, *block_reflectorblockinheritsblanket, ] height: *standard_heights axial mesh points: *standard_axial_mesh_points xs types: &feed_fuel_xs_types [AZ, AZ, AA, AA, AA, AA, AZ, AZ] drawerset2: specifier: D2 blocks: [ *block_reflectorblockinheritsblanket, *block_blanketfuelblock, *block_inheritseublocks, *block_eufuelblock, *block_reversedeufuelblock, *block_reversedeufuelblock, *block_blanketfuelblock, *block_reflectorblockinheritsblanket, ] height: *standard_heights axial mesh points: *standard_axial_mesh_points xs types: *feed_fuel_xs_types NotInCoreButGetBlocks: specifier: NotInCore blocks: [ *block_reflectorblockinheritsblanket, *block_blanketfuelblock, *block_eufuelblock, *block_inheritseublocks, *block_reversedeufuelblock, *block_eufuelblock, *block_blanketfuelblock, *block_reflectorblockinheritsblanket, ] height: *standard_heights axial mesh points: *standard_axial_mesh_points xs types: [AZ, AZ, AC, AC, AC, AC, AZ, AZ] ================================================ FILE: armi/tests/__init__.py ================================================ # Copyright 2021 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ General framework-wide testing functions and files. This package contains some input files that can be used across a wide variety of unit tests in other lower-level subpackages. """ import datetime import itertools import os import unittest from typing import Optional from armi import runLog from armi.testing import ( # noqa: F401 ARMI_RUN_PATH, COMPXS_PATH, ISOAA_PATH, getEmptyCartesianReactor, getEmptyHexReactor, ) TEST_ROOT = os.path.dirname(os.path.abspath(__file__)) TESTING_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "testing") class Fixture: """ Fixture for presenting a consistent data source for testing. A Fixture is a class that wraps a function which generates resources needed by one or more tests that does not need to be updated every time tests are run. Do not use this class directly, instead use the :code:`@fixture` and :code:`@requires_fixture` decorators. """ def __init__(self, refDirectory, targets, dependencies, function): def resolvePath(relativePath): absolutePath = os.path.abspath(relativePath) if absolutePath != relativePath: absolutePath = os.path.join(refDirectory, relativePath) return absolutePath self.targets = [resolvePath(t) for t in targets] self.dependencies = [resolvePath(d) for d in dependencies] self._function = function self._isUpToDate = None self.__name__ = function.__name__ self.__doc__ = function.__doc__ self._error = None self._success = False self.status = None def __repr__(self): return f"{self._function.__module__}.{self.__name__}" def __call__(self): if self._error is not None: raise self._error elif not self._success: missingDependencies = [d for d in self.dependencies if not os.path.exists(d)] if any(missingDependencies): self._error = EnvironmentError( "Missing dependencies:\n {}".format("\n ".join(missingDependencies)) ) raise self._error # at this point we need to update because # 1) there are missing targets that need to be generated, or # 2) targets are older than the dependencies. missingTargets = [t for t in self.targets if not os.path.exists(t)] needToUpdate = any(missingTargets) if any(missingTargets): runLog.important("Fixture is missing targets {}\n {}".format(self, "\n ".join(missingTargets))) if not needToUpdate: # this doesn't need to run if there are any missing targets. oldestTarget = sorted((os.path.getmtime(t), t) for t in self.targets)[0] newestDependency = sorted((os.path.getmtime(d), d) for d in self.dependencies)[-1] needToUpdate = newestDependency[0] > oldestTarget[0] if needToUpdate: targetTime = datetime.datetime.fromtimestamp(oldestTarget[0]) dependencyTime = datetime.datetime.fromtimestamp(newestDependency[0]) runLog.important( "Fixture is out of date {}\noldest target: {} {}\nnewest dependency: {} {}".format( self, targetTime, oldestTarget[1], dependencyTime, newestDependency[1], ) ) if needToUpdate: runLog.important(f"Running test fixture: {self}") try: self._function() except Exception as ee: self._error = ee raise else: runLog.important(f"Skipping test fixture: {self}") runLog.important(f"Fixture is up to date: {self}") self._success = True def fixture(refDirectory=None, targets=None, dependencies=None): """ Decorator to run function based on targets and dependencies similar to GNU Make. Parameters ---------- refDirectory : str String reference directory for all targets/dependencies. This makes it possible to simplify file paths. If ``os.path.abspath(<path>) == <path>``, then refDirectory is not used. targets : iterable(str) List of targets that the function generates. dependencies : iterable(str) List of dependencies that the ``targets`` require. """ def _decorator(makeFunction): return Fixture(refDirectory, targets, dependencies, makeFunction) return _decorator def requires_fixture(fixtureFunction): """ Decorator to require a fixture to have been completed. Parameters ---------- fixtureFunction : function without any parameters Fixture function is a function that has been decorated with fixture and is called prior to running the decorated function. Notes ----- This cannot be used on classes. """ def _decorator(func): def _callWrapper(*args, **kwargs): fixtureFunction() func(*args, **kwargs) return _callWrapper return _decorator class ArmiTestHelper(unittest.TestCase): """Class containing common testing methods shared by many tests.""" def compareFilesLineByLine(self, expectedFilePath, actualFilePath, falseNegList=None, eps=None): """ Compare the contents of two files line by line. .. warning:: The file located at actualFilePath will be deleted if they do match. Some tests write text files that should be compared line-by-line with reference files. This method performs the comparison. This class of test is not ideal but does cover a lot of functionality quickly. To assist in the maintenance burden, the following standards are expected and enforced: * The reference file compared against will be called either ``[name]-ref.[ext]`` or ``[name].expected``. * The file that the test creates will be called ``[name]-test.[ext]`` or ``[name]``. Parameters ---------- expectedFilePath: str Path to the reference or expected file actualFilePath: str Path to the file that will be compared to ``expectedFilePath`` falseNegList: None or Iterable Optional argument. If two lines are not equal, then check if any values from ``falseNegList`` are in this line. If so, do not fail the test. eps: float, optional If provided, try to determine if the only difference between compared lines is in the value of something that can be parsed into a float, and the relative difference between the two floats is below the passed eps. """ if falseNegList is None: falseNegList = [] elif isinstance(falseNegList, str): falseNegList = [falseNegList] with open(expectedFilePath, "r") as expected, open(actualFilePath, "r") as actual: for lineIndex, (expectedLine, actualLine) in enumerate(itertools.zip_longest(expected, actual)): if expectedLine is None: raise AssertionError("The test-generated file is longer than expected file") if actualLine is None: raise AssertionError("The test-generated file is shorter than expected file") if not self.compareLines(actualLine, expectedLine, eps): if any(falseNeg in line for falseNeg in falseNegList for line in (actualLine, expectedLine)): pass else: raise AssertionError( "Error on line {}:\nE>{}\nA<{}".format( lineIndex, expectedLine.rstrip(), actualLine.rstrip() ) ) os.remove(actualFilePath) @staticmethod def compareLines(actual: str, expected: str, eps: Optional[float] = None): """ Impl of line comparison for compareFilesLineByLine. If rstripped lines are equal -> Good. Otherwise, split on whitespace and try to parse element pairs as floats. If they are both parsable, compare with relative eps, if provided. A side effect of the epsilon comparison is that differing whitespace between words is treated as irrelevant. """ actual = actual.rstrip() expected = expected.rstrip() if actual == expected: return True if eps is None: # no more in-depth comparison is allowed return False actualWords = actual.split() expectedWords = expected.split() if len(actualWords) != len(expectedWords): # different number of words can't possibly be the same enough return False for actualWord, expectedWord in zip(actualWords, expectedWords): actualVal = ArmiTestHelper._tryFloat(actualWord) expectedVal = ArmiTestHelper._tryFloat(expectedWord) if (actualVal is None) ^ (expectedVal is None): # could not coerce both words into a float, so they cannot possibly match return False if actualVal is not None: # we have two floats and can compare them if actualVal == expectedVal == 0: continue elif abs(actualVal - expectedVal) / expectedVal > eps: return False else: # strings, compare directly if actualWord != expectedWord: return False # The lines should match. return True @staticmethod def _tryFloat(val: str) -> Optional[float]: try: return float(val) except ValueError: return None ================================================ FILE: armi/tests/armiRun.yaml ================================================ settings: # global availabilityFactor: 1 beta: 0.003454 branchVerbosity: debug buGroups: - 100 burnSteps: 2 comment: Simple test input. cycleLength: 2000.0 detailAssemLocationsBOL: - 002-001 freshFeedType: igniter fuel loadingFile: refSmallReactor.yaml moduleVerbosity: armi.reactor.reactors: info nCycles: 6 outputFileExtension: png power: 100000000.0 rmExternalFilesAtEOL: true startCycle: 1 startNode: 2 targetK: 1.002 verbosity: extra versions: armi: uncontrolled # cross section crossSectionControl: DA: geometry: 0D blockRepresentation: Median criticalBuckling: true externalDriver: true useHomogenizedBlockComposition: false numInternalRings: 1 numExternalRings: 1 UA: geometry: 1D cylinder blockRepresentation: ComponentAverage1DCylinder validBlockTypes: - fuel externalDriver: false mergeIntoClad: - gap2 - inner liner - gap3 - outer liner - gap4 mergeIntoFuel: - gap1 numInternalRings: 1 numExternalRings: 1 XA: xsFileLocation: - ISOXA YA: geometry: 0D fluxFileLocation: rzmflxYA ZA: geometry: 1D cylinder blockRepresentation: ComponentAverage1DCylinder validBlockTypes: - fuel externalDriver: false mergeIntoClad: - gap numInternalRings: 1 numExternalRings: 1 # database db: false # fuel cycle fuelHandlerName: EquilibriumShuffler jumpRingNum: 9 shuffleLogic: refSmallReactorShuffleLogic.py # neutronics epsFSAvg: 1e-06 epsFSPoint: 1e-06 loadPadElevation: 200.0 # report genReports: false summarizeAssemDesign: false ================================================ FILE: armi/tests/detailedAxialExpansion/armiRun.yaml ================================================ settings: # global beta: 0.003454 branchVerbosity: debug buGroups: - 100 burnSteps: 2 comment: Simple test input with detailed axial expansion. cycleLength: 2000.0 detailAssemLocationsBOL: - 002-001 detailedAxialExpansion: true freshFeedType: igniter fuel loadingFile: refSmallReactor.yaml moduleVerbosity: armi.reactor.reactors: info nCycles: 6 outputFileExtension: png power: 100000000.0 startNode: 1 targetK: 1.002 verbosity: extra versions: armi: uncontrolled # cross section crossSectionControl: DA: geometry: 0D blockRepresentation: Median criticalBuckling: true externalDriver: true useHomogenizedBlockComposition: false numInternalRings: 1 numExternalRings: 1 XA: xsFileLocation: - ISOXA # database db: false # fuel cycle fuelHandlerName: EquilibriumShuffler jumpRingNum: 9 # fuel performance axialExpansion: true # neutronics epsFSAvg: 1e-06 epsFSPoint: 1e-06 loadPadElevation: 162.5 # report genReports: false summarizeAssemDesign: false ================================================ FILE: armi/tests/detailedAxialExpansion/refSmallCoreGrid.yaml ================================================ core: geom: hex symmetry: third periodic lattice map: | SH AF MC SH LA PC IC twoPin: geom: hex_corners_up symmetry: full lattice map: | - - - 1 1 1 1 - - 1 1 2 1 1 - 1 1 2 2 1 1 1 1 2 2 2 1 1 1 1 2 2 1 1 1 1 2 1 1 1 1 1 1 ================================================ FILE: armi/tests/detailedAxialExpansion/refSmallReactor.yaml ================================================ !include refSmallReactorBase.yaml systems: core: grid name: core origin: x: 0.0 y: 0.0 z: 0.0 sfp: type: sfp grid name: sfp origin: x: 5000.0 y: 5000.0 z: 6000.0 grids: !include refSmallCoreGrid.yaml sfp: symmetry: full geom: cartesian lattice pitch: x: 50.0 y: 50.0 grid contents: [0,0]: MC [1,0]: MC [0,1]: MC [1,1]: MC ================================================ FILE: armi/tests/detailedAxialExpansion/refSmallReactorBase.yaml ================================================ custom isotopics: MOX: input format: number densities AM241: 2.3606e-05 PU238: 3.7387e-06 PU239: 0.00286038 PU240: 0.000712945 PU241: 9.82312e-05 PU242: 2.02221e-05 U235: 0.00405533 U238: 0.0134125 PuUZr: input format: mass fractions density: 9.491820414019937 PU239: 0.1 U235: 0.15 U238: 0.65 ZR: 0.1 blocks: ## ------------------------------------------------------------------------------------ ## universal blocks grid plate: &block_grid_plate grid: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 15.277 mult: 1.0 op: 16.577 coolant: &component_coolant shape: DerivedShape material: Sodium Tinput: 25.0 Thot: 450.0 intercoolant: shape: Hexagon material: Sodium Tinput: 25.0 Thot: 450.0 ip: grid.op mult: 1.0 op: 19.0 duct: &block_duct coolant: *component_coolant duct: &component_duct shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 18.0 mult: 1.0 op: 18.5 intercoolant: &component_intercoolant shape: Hexagon material: Sodium Tinput: 25.0 Thot: 450.0 ip: duct.op mult: 1.0 op: 19.0 SodiumBlock: &block_dummy flags: dummy coolant: shape: Hexagon material: Sodium Tinput: 25.0 Thot: 450.0 ip: 0.0 mult: 1.0 op: 19.0 ## ------------------------------------------------------------------------------------ ## fuel blocks axial shield: &block_fuel_axial_shield shield: shape: Circle material: HT9 Tinput: 25.0 Thot: 600.0 id: 0.0 mult: 169.0 od: 0.86602 bond: shape: Circle material: Sodium Tinput: 25.0 Thot: 470.0 id: shield.od mult: shield.mult od: clad.id clad: shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 mult: shield.mult od: 1.09 wire: shape: Helix material: HT9 Tinput: 25.0 Thot: 470.0 axialPitch: 30.15 helixDiameter: 1.19056 id: 0.0 mult: shield.mult od: 0.10056 coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant axial shield twoPin: &block_fuel_multiPin_axial_shield grid name: twoPin shield: &component_shield_shield1 shape: Circle material: HT9 Tinput: 25.0 Thot: 600.0 id: 0.0 od: 0.86602 latticeIDs: [1] bond: &component_shield_bond1 shape: Circle material: Sodium Tinput: 25.0 Thot: 470.0 id: shield.od od: clad.id latticeIDs: [1] clad: &component_shield_clad1 shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 od: 1.09 latticeIDs: [1] wire: &component_shield_wire1 shape: Helix material: HT9 Tinput: 25.0 Thot: 470.0 axialPitch: 30.15 helixDiameter: 1.19056 id: 0.0 od: 0.10056 latticeIDs: [1] shield test: <<: *component_shield_shield1 latticeIDs: [2] bond test: <<: *component_shield_bond1 id: shield test.od od: clad test.id latticeIDs: [2] clad test: <<: *component_shield_clad1 latticeIDs: [2] wire test: <<: *component_shield_wire1 latticeIDs: [2] coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant axial expansion target component: shield fuel: &block_fuelPin fuel: &component_fuel_fuel shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 mult: 169.0 od: 0.86602 bond: shape: Circle material: Sodium Tinput: 25.0 Thot: 470.0 id: fuel.od mult: fuel.mult od: clad.id clad: &component_fuel_clad shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 mult: fuel.mult od: 1.09 wire: &component_fuel_wire shape: Helix material: HT9 Tinput: 25.0 Thot: 470.0 axialPitch: 30.15 helixDiameter: 1.19056 id: 0.0 mult: clad.mult od: 0.10056 coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant fuel lined clad: &block_fuelPin_linedClad fuel: <<: *component_fuel_fuel material: Custom isotopics: MOX bond: shape: Circle material: Sodium Tinput: 25.0 Thot: 470.0 id: fuel.od mult: fuel.mult od: liner.id liner: &component_fuel_liner shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 0.90 mergeWith: clad mult: 169.0 od: clad.id clad: *component_fuel_clad wire: *component_fuel_wire coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant annular fuel lined clad: &block_fuelAnnular_linedClad gap1: shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: 0.0 mult: fuel.mult od: fuel.id fuel: shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.600 mult: 169.0 od: 0.86602 flags: annular fuel depletable gap: shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: fuel.od mult: fuel.mult od: liner.id liner: *component_fuel_liner clad: *component_fuel_clad wire: *component_fuel_wire coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant fuel twoPin: &block_fuel_multiPin grid name: twoPin fuel: &component_fuelmultiPin shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 od: 0.86602 latticeIDs: [1] bond: &component_fuelmultiPin_bond shape: Circle material: Sodium Tinput: 25.0 Thot: 470.0 id: fuel.od od: clad.id latticeIDs: [1] clad: &component_fuelmultiPin_clad1 shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 od: 1.09 latticeIDs: [1] wire: &component_fuelmultiPin_wire1 shape: Helix material: HT9 Tinput: 25.0 Thot: 470.0 axialPitch: 30.15 helixDiameter: 1.19056 id: 0.0 od: 0.10056 latticeIDs: [1] fuel test: &component_fuelmultiPin_fuel2 <<: *component_fuelmultiPin latticeIDs: [2] bond test: &component_fuelmultiPin_bond2 <<: *component_fuelmultiPin_bond id: fuel test.od od: clad test.id latticeIDs: [2] clad test: &component_fuelmultiPin_clad2 <<: *component_fuelmultiPin_clad1 latticeIDs: [2] wire test: &component_fuelmultiPin_wire2 <<: *component_fuelmultiPin_wire1 latticeIDs: [2] coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant axial expansion target component: fuel plenum: &block_plenum gap: shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: 0.0 mult: clad.mult od: clad.id clad: shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 mult: 169.0 od: 1.09 wire: *component_fuel_wire coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant axial expansion target component: clad aclp plenum: &block_aclp <<: *block_plenum plenum 2pin: &block_plenum_multiPin grid name: twoPin gap: &component_plenummultiPin_gap1 shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: 0.0 od: clad.id latticeIDs: [1] clad: *component_fuelmultiPin_clad1 wire: *component_fuelmultiPin_wire1 gap test: <<: *component_plenummultiPin_gap1 od: clad test.id latticeIDs: [2] clad test: *component_fuelmultiPin_clad2 wire test: *component_fuelmultiPin_wire2 coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant axial expansion target component: clad test mixed fuel plenum 2pin: &block_mixed_multiPin grid name: twoPin gap: *component_plenummultiPin_gap1 clad: *component_fuelmultiPin_clad1 wire: *component_fuelmultiPin_wire1 fuel test: *component_fuelmultiPin_fuel2 bond test: *component_fuelmultiPin_bond2 clad test: *component_fuelmultiPin_clad2 wire test: *component_fuelmultiPin_wire2 coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant axial expansion target component: fuel test aclp plenum 2pin: &block_aclp_multiPin <<: *block_plenum_multiPin ## ------------------------------------------------------------------------------------ ## control moveable duct: &block_ctrl_duct coolant: *component_coolant duct: &component_control_duct shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 15.277 mult: 1.0 op: 16.28228 intercoolant: &component_control_intercoolant shape: Hexagon material: Sodium Tinput: 25.0 Thot: 450.0 ip: duct.op mult: 1.0 op: 19.0 moveable control: &block_control control: shape: Circle material: B4C Tinput: 25.0 Thot: 600.0 id: 0.0 mult: clad.mult od: gap.id gap: &component_control_gap shape: Circle material: Void Tinput: 25.0 Thot: 450.0 id: 1.286 mult: clad.mult od: clad.id clad: &component_control_clad shape: Circle material: HT9 Tinput: 25.0 Thot: 450.0 id: 1.358 mult: 61.0 od: 1.686 wire: &component_control_wire shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 50.0 helixDiameter: 1.771 id: 0.0 mult: clad.mult od: 0.085 innerDuct: &component_control_innerDuct shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 14.268 mult: 1.0 op: 14.582 duct: *component_control_duct coolant: *component_coolant intercoolant: *component_control_intercoolant moveable plenum: &block_control_plenum gap: *component_control_gap clad: *component_control_clad wire: *component_control_wire coolant: *component_coolant innderDuct: *component_control_innerDuct duct: *component_control_duct intercoolant: *component_control_intercoolant ## ------------------------------------------------------------------------------------ ## radial shield radial shield: &block_radial_shield shield: shape: Circle material: HT9 Tinput: 25.0 Thot: 600.0 id: 0.0 mult: 169.0 od: 0.90362 gap: &component_radial_shield_gap shape: Circle material: Void Tinput: 25.0 Thot: 450.0 id: 0.90362 mult: 169.0 od: clad.id clad: &component_radial_shield_clad shape: Circle material: HT9 Tinput: 25.0 Thot: 450.0 id: 0.90562 mult: 169.0 od: 1.05036 wire: &component_radial_shield_wire shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30.15 helixDiameter: 16.85056 id: 0.0 mult: 169.0 od: 0.10056 duct: *component_duct intercoolant: *component_intercoolant coolant: *component_coolant radial shield plenum: &block_shield_plenum gap: *component_radial_shield_gap clad: *component_radial_shield_clad wire: *component_radial_shield_wire coolant: *component_coolant duct: *component_duct intercoolant: *component_intercoolant radial shield aclp: &block_shield_aclp <<: *block_shield_plenum axial expansion target component: clad # not necessary, but useful for testing coverage assemblies: heights: &highOffset_height [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] axial mesh points: &standard_axial_mesh_points [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] igniter fuel: specifier: IC blocks: [*block_grid_plate, *block_fuel_axial_shield, *block_fuelPin, *block_fuelPin, *block_fuelPin, *block_plenum, *block_aclp, *block_plenum, *block_duct, *block_dummy] height: *highOffset_height axial mesh points: *standard_axial_mesh_points material modifications: U235_wt_frac: ['', '', 0.11, 0.11, 0.11, '', '', '', '', ''] ZR_wt_frac: ['', '', 0.06, 0.06, 0.06, '', '', '', '', ''] xs types: &igniter_fuel_xs_types [A, A, B, C, C, D, A, A, A, A] middle fuel: specifier: MC blocks: [*block_grid_plate, *block_fuel_axial_shield, *block_fuelPin_linedClad, *block_fuelPin_linedClad, *block_fuelPin_linedClad, *block_plenum, *block_aclp, *block_plenum, *block_duct, *block_dummy] height: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] axial mesh points: *standard_axial_mesh_points xs types: *igniter_fuel_xs_types annular fuel: specifier: AF blocks: [*block_grid_plate, *block_fuel_axial_shield, *block_fuelAnnular_linedClad, *block_fuelAnnular_linedClad, *block_fuelAnnular_linedClad, *block_plenum, *block_aclp, *block_plenum, *block_duct, *block_dummy] height: *highOffset_height axial mesh points: *standard_axial_mesh_points xs types: *igniter_fuel_xs_types multi pin fuel: specifier: LA blocks: [*block_grid_plate, *block_fuel_multiPin_axial_shield, *block_fuel_multiPin, *block_fuel_multiPin, *block_fuel_multiPin, *block_mixed_multiPin, *block_aclp_multiPin, *block_plenum_multiPin, *block_duct, *block_dummy] height: *highOffset_height axial mesh points: *standard_axial_mesh_points material modifications: U235_wt_frac: ['', '', 0.2, 0.2, 0.2, 0.2, '', '', '', ''] ZR_wt_frac: ['', '', 0.07, 0.07, 0.07, 0.07, '', '', '', ''] xs types: *igniter_fuel_xs_types control: specifier: PC blocks: [*block_grid_plate, *block_ctrl_duct, *block_ctrl_duct, *block_control, *block_control, *block_control, *block_control_plenum, *block_ctrl_duct, *block_ctrl_duct, *block_dummy] height: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] axial mesh points: *standard_axial_mesh_points xs types: *igniter_fuel_xs_types radial shield: specifier: SH blocks: [*block_grid_plate, *block_radial_shield, *block_radial_shield, *block_radial_shield, *block_radial_shield, *block_shield_plenum, *block_shield_aclp, *block_shield_plenum, *block_duct, *block_dummy] height: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] axial mesh points: *standard_axial_mesh_points xs types: *igniter_fuel_xs_types ================================================ FILE: armi/tests/mockRunLogs.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains subclasses of the armi.runLog._RunLog class that can be used to determine whether or not one of the specific methods were called. These should only be used in testing. """ import io import sys from logging import LogRecord from armi import runLog class BufferLog(runLog._RunLog): """Log which captures the output in attributes instead of emitting them. Used mostly in testing to ensure certain things get output, or to prevent any output from showing. """ def __init__(self, *args, **kwargs): super(BufferLog, self).__init__(*args, **kwargs) self.originalLog = None self._outputStream = "" self._errStream = io.StringIO() self._deduplication = runLog.DeduplicationFilter() sys.stderr = self._errStream self.setVerbosity(0) def __enter__(self): self.originalLog = runLog.LOG runLog.LOG = self return self def __exit__(self, exception_type, exception_value, traceback): runLog.LOG = self.originalLog def log(self, msgType, msg, single=False, label=None): """ Add formatting to a message and handle its singleness, if applicable. This is a wrapper around logger.log() that does most of the work and is used by all message passers (e.g. info, warning, etc.). """ # the message label is only used to determine unique for single-print warnings if label is None: label = msg # Skip writing the message if it is below the set verbosity msgVerbosity = self.logLevels[msgType][0] if msgVerbosity < self._verbosity: return # Skip writing the message if it is single-print warning record = LogRecord("BufferLog", msgVerbosity, "pathname", 1, msg, {}, ()) record.label = label record.single = single if single and not self._deduplication.filter(record): return # Do the actual logging, but add that custom indenting first msg = self.logLevels[msgType][1] + str(msg) + "\n" self._outputStream += msg def clearSingleLogs(self): """Reset the single warned list so we get messages again.""" self._deduplication.singleMessageLabels.clear() def getStdout(self): return self._outputStream def emptyStdout(self): self._outputStream = "" def getStderrValue(self): return self._errStream.getvalue() class LogCounter(BufferLog): """This mock log is used to count the number of times a method was called. It can be used in testing to make sure a warning was issued, without checking the content of the message. """ def __init__(self, *args, **kwargs): BufferLog.__init__(self) self.messageCounts = {msgType: 0 for msgType in self.logLevels.keys()} def log(self, msgType, *args, **kwargs): self.messageCounts[msgType] += 1 ================================================ FILE: armi/tests/refSmallCartesian.yaml ================================================ custom isotopics: MOX: input format: number densities AM241: 2.3606e-05 PU238: 3.7387e-06 PU239: 0.00286038 PU240: 0.000712945 PU241: 9.82312e-05 PU242: 2.02221e-05 U235: 0.00405533 U238: 0.0134125 blocks: fuel: &block_fuel fuel: shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 mult: 64.0 od: 0.7 clad: &component_fuel_clad shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 mult: fuel.mult od: 1.15 bond: &component_fuel_bond shape: Circle material: Sodium Tinput: 450.0 Thot: 450.0 id: fuel.od mult: fuel.mult od: clad.id wire: &component_fuel_wire shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30.15 helixDiameter: 1.2 id: 0.0 mult: fuel.mult od: 0.100 coolant: &component_fuel_coolant shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 duct: &component_fuel_duct shape: Rectangle material: HT9 Tinput: 25.0 Thot: 450.0 lengthInner: 9.0 lengthOuter: 9.5 mult: 1.0 widthInner: 9.0 widthOuter: 9.5 intercoolant: &component_fuel_intercoolant shape: Rectangle material: Sodium Tinput: 450.0 Thot: 450.0 lengthInner: 9.5 lengthOuter: 10.0 mult: 1.0 widthInner: 9.5 widthOuter: 10.0 control: &block_control control: shape: Circle material: B4C Tinput: 600.0 Thot: 600.0 id: 0.0 mult: 25.0 od: 1.3 innerduct: shape: Rectangle material: HT9 Tinput: 450.0 Thot: 450.0 lengthInner: 8.0 lengthOuter: 8.5 mult: 1.0 widthInner: 8.0 widthOuter: 8.5 duct: shape: Rectangle material: HT9 Tinput: 450.0 Thot: 450.0 lengthInner: 8.7 lengthOuter: 9.0 mult: 1.0 widthInner: 8.7 widthOuter: 9.0 clad: shape: Circle material: HT9 Tinput: 450.0 Thot: 450.0 id: 1.35 mult: control.mult od: 1.7 wire: shape: Helix material: HT9 Tinput: 450.0 Thot: 450.0 axialPitch: 50.0 helixDiameter: 1.7 id: 0.0 mult: control.mult od: 0.085 intercoolant: *component_fuel_intercoolant gap: shape: Circle material: Void Tinput: 450.0 Thot: 450.0 id: control.od mult: control.mult od: clad.id coolant: *component_fuel_coolant duct: &block_duct duct: &component_duct_duct shape: Rectangle material: HT9 Tinput: 450.0 Thot: 450.0 lengthInner: 9.0 lengthOuter: 9.5 mult: 1.0 widthInner: 9.0 widthOuter: 9.5 coolant: *component_fuel_coolant intercoolant: *component_fuel_intercoolant grid plate: &block_grid_plate grid: shape: Rectangle material: HT9 Tinput: 450.0 Thot: 450.0 lengthInner: 0.0 lengthOuter: 9.5 mult: 1.0 widthInner: 0.0 widthOuter: 9.5 coolant: *component_fuel_coolant intercoolant: *component_fuel_intercoolant axial shield: &block_axial_shield shield: shape: Circle material: HT9 Tinput: 600.0 Thot: 600.0 id: 0.0 mult: 64.0 od: 0.90 clad: shape: Circle material: HT9 Tinput: 450.0 Thot: 450.0 id: 0.905 mult: shield.mult od: 1.050 gap: shape: Circle material: Void Tinput: 450.0 Thot: 450.0 id: shield.od mult: shield.mult od: clad.id duct: *component_duct_duct intercoolant: *component_fuel_intercoolant coolant: *component_fuel_coolant wire: shape: Helix material: HT9 Tinput: 450.0 Thot: 450.0 axialPitch: 30.15 helixDiameter: 10.10 id: 0.0 mult: shield.mult od: 0.100 plenum: &block_plenum clad: shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 mult: 64.0 od: 1.09 gap: shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: 0.0 mult: clad.mult od: clad.id wire: shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30. helixDiameter: 1.2 id: 0.0 mult: clad.mult od: 0.1 coolant: *component_fuel_coolant duct: *component_fuel_duct intercoolant: *component_fuel_intercoolant fuel2: &block_fuel2 fuel: shape: Circle material: Custom Tinput: 25.0 Thot: 600.0 id: 0.0 isotopics: MOX mult: 64.0 od: 0.87 clad: *component_fuel_clad bond: *component_fuel_bond wire: *component_fuel_wire coolant: *component_fuel_coolant duct: *component_fuel_duct intercoolant: *component_fuel_intercoolant assemblies: heights: &standard_heights [25.0, 25.0, 25.0, 25.0, 75.0] axial mesh points: &standard_axial_mesh_points [1, 1, 1, 1, 4] igniter fuel: specifier: IC blocks: &igniter_fuel_blocks [ *block_grid_plate, *block_fuel, *block_fuel, *block_fuel, *block_plenum, ] height: *standard_heights axial mesh points: *standard_axial_mesh_points hotChannelFactors: TWRPclad material modifications: U235_wt_frac: &igniter_fuel_u235_wt_frac ["", 0.11, 0.11, 0.11, ""] ZR_wt_frac: &igniter_fuel_zr_wt_frac ["", 0.06, 0.06, 0.06, ""] xs types: &igniter_fuel_xs_types [A, A, A, A, A] middle fuel: specifier: MC blocks: [ *block_grid_plate, *block_fuel2, *block_fuel2, *block_fuel2, *block_plenum, ] height: *standard_heights axial mesh points: *standard_axial_mesh_points xs types: *igniter_fuel_xs_types feed fuel: specifier: OC blocks: *igniter_fuel_blocks height: *standard_heights axial mesh points: *standard_axial_mesh_points material modifications: U235_wt_frac: *igniter_fuel_u235_wt_frac ZR_wt_frac: *igniter_fuel_zr_wt_frac xs types: *igniter_fuel_xs_types primary control: specifier: PC blocks: [ *block_grid_plate, *block_duct, *block_duct, *block_control, *block_plenum, ] height: *standard_heights axial mesh points: *standard_axial_mesh_points xs types: *igniter_fuel_xs_types radial shield: specifier: SH blocks: [ *block_grid_plate, *block_axial_shield, *block_axial_shield, *block_axial_shield, *block_plenum, ] height: *standard_heights axial mesh points: *standard_axial_mesh_points xs types: *igniter_fuel_xs_types systems: core: grid name: core origin: x: 0.0 y: 0.0 z: 0.0 grids: core: geom: cartesian symmetry: full lattice pitch: x: 10.0 y: 10.0 lattice map: | SH SH SH SH SH SH SH SH SH SH SH SH SH SH OC OC OC OC OC OC OC OC OC OC OC SH SH OC MC MC MC MC MC MC MC MC MC OC SH SH OC MC IC IC IC IC IC IC IC MC OC SH SH OC MC IC IC IC IC IC IC IC MC OC SH SH OC MC IC IC IC IC IC IC IC MC OC SH SH OC MC IC IC IC IC IC IC IC MC OC SH SH OC MC IC IC IC IC IC IC IC MC OC SH SH OC MC IC IC IC IC IC IC IC MC OC SH SH OC MC IC IC IC IC IC IC IC MC OC SH SH OC MC MC MC MC MC MC MC MC MC OC SH SH OC OC OC OC OC OC OC OC OC OC OC SH SH SH SH SH SH SH SH SH SH SH SH SH SH ================================================ FILE: armi/tests/refSmallCoreGrid.yaml ================================================ core: geom: hex lattice map: | - - SH - SH SH - SH OC SH SH OC OC SH OC IC OC SH OC IC IC OC SH IC IC IC OC SH IC IC PC OC SH IC PC IC IC OC SH LA IC IC IC OC IC IC IC IC SH IC LB IC IC OC IC IC PC IC SH LA IC IC OC IC IC IC IC SH IC IC IC OC IC IC IC PC SH symmetry: third periodic ================================================ FILE: armi/tests/refSmallReactor.yaml ================================================ !include refSmallReactorBase.yaml systems: core: grid name: core origin: x: 0.0 y: 0.0 z: 0.0 Spent Fuel Pool: type: sfp grid name: sfp origin: x: 5000.0 y: 5000.0 z: 6000.0 grids: !include refSmallCoreGrid.yaml !include refSmallSfpGrid.yaml ================================================ FILE: armi/tests/refSmallReactorBase.yaml ================================================ custom isotopics: MOX: input format: number densities AM241: 2.3606e-05 PU238: 3.7387e-06 PU239: 0.00286038 PU240: 0.000712945 PU241: 9.82312e-05 PU242: 2.02221e-05 U235: 0.00405533 U238: 0.0134125 PuUZr: input format: mass fractions PU239: 0.1 U235: 0.15 U238: 0.65 ZR: 0.1 blocks: fuel: &block_fuel fuel: &component_fuel_fuel shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 mult: 169.0 od: 0.86602 clad: &component_fuel_clad shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 mult: fuel.mult od: 1.09 bond: &component_fuel_bond shape: Circle material: Sodium Tinput: 450.0 Thot: 450.0 id: fuel.od mult: fuel.mult od: clad.id wire: &component_fuel_wire shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30.15 helixDiameter: 1.19056 id: 0.0 mult: fuel.mult od: 0.10056 coolant: &component_fuel_coolant shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 duct: &component_fuel_duct shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 16.0 mult: 1.0 op: 16.6 intercoolant: &component_fuel_intercoolant shape: Hexagon material: Sodium Tinput: 450.0 Thot: 450.0 ip: duct.op mult: 1.0 op: 16.75 moveable control: &block_control control: shape: Circle material: B4C Tinput: 600.0 Thot: 600.0 id: 0.0 mult: 61.0 od: 1.286 innerDuct: shape: Hexagon material: HT9 Tinput: 450.0 Thot: 450.0 ip: 14.268 mult: 1.0 op: 14.582 duct: &component_control_duct shape: Hexagon material: HT9 Tinput: 450.0 Thot: 450.0 ip: 15.277 mult: 1.0 op: 16.28228 clad: shape: Circle material: HT9 Tinput: 450.0 Thot: 450.0 id: 1.358 mult: control.mult od: 1.686 wire: shape: Helix material: HT9 Tinput: 450.0 Thot: 450.0 axialPitch: 50.0 helixDiameter: 1.771 id: 0.0 mult: control.mult od: 0.085 intercoolant: *component_fuel_intercoolant gap: shape: Circle material: Void Tinput: 450.0 Thot: 450.0 id: control.od mult: control.mult od: clad.id coolant: *component_fuel_coolant duct: &block_duct duct: *component_control_duct coolant: *component_fuel_coolant intercoolant: *component_fuel_intercoolant grid plate: &block_grid_plate grid: &component_grid_plate_grid shape: Hexagon material: HT9 Tinput: 450.0 Thot: 450.0 ip: 15.277 mult: 1.0 op: 16.577 coolant: *component_fuel_coolant intercoolant: shape: Hexagon material: Sodium Tinput: 450.0 Thot: 450.0 ip: grid.op mult: 1.0 op: 16.75 grid plate broken: grid: *component_grid_plate_grid coolant: *component_fuel_coolant intercoolant: shape: Hexagon material: Sodium Tinput: 450.0 Thot: 450.0 ip: grid.op mult: 1.0 op: 0.0 axial shield: &block_axial_shield shield: shape: Circle material: HT9 Tinput: 600.0 Thot: 600.0 id: 0.0 mult: 169.0 od: 0.90362 clad: shape: Circle material: HT9 Tinput: 450.0 Thot: 450.0 id: 0.90562 mult: shield.mult od: 1.05036 gap: shape: Circle material: Void Tinput: 450.0 Thot: 450.0 id: shield.od mult: shield.mult od: clad.id duct: *component_control_duct intercoolant: *component_fuel_intercoolant coolant: *component_fuel_coolant wire: shape: Helix material: HT9 Tinput: 450.0 Thot: 450.0 axialPitch: 30.15 helixDiameter: 16.85056 id: 0.0 mult: shield.mult od: 0.10056 moveable plenum: &block_plenum clad: shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 mult: 169.0 od: 1.09 gap: shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: 0.0 mult: clad.mult od: clad.id wire: shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30.15 helixDiameter: 1.19056 id: 0.0 mult: clad.mult od: 0.10056 coolant: *component_fuel_coolant duct: *component_fuel_duct intercoolant: *component_fuel_intercoolant fuel2: &block_fuel2 fuel: shape: Circle material: Custom Tinput: 25.0 Thot: 600.0 id: 0.0 isotopics: MOX mult: 169.0 od: 0.86602 bond: &component_fuel_bond2 shape: Circle material: Sodium Tinput: 450.0 Thot: 450.0 id: fuel.od mult: fuel.mult od: liner1.id clad: *component_fuel_clad liner1: &component_fuel2_liner1 shape: Circle material: HT9 Tinput: 25.0 Thot: 600.0 id: 0.99 mergeWith: clad mult: 169.0 od: 1.0 liner2: &component_fuel2_liner2 shape: Circle material: HT9 Tinput: 25.0 Thot: 600.0 id: 0.98 mergeWith: clad mult: 169.0 od: 0.99 wire: *component_fuel_wire coolant: *component_fuel_coolant duct: *component_fuel_duct intercoolant: *component_fuel_intercoolant lta1 fuel: &block_lta1_fuel fuel: *component_fuel_fuel clad: *component_fuel_clad liner1: *component_fuel2_liner1 liner2: *component_fuel2_liner2 bond: *component_fuel_bond wire: *component_fuel_wire coolant: *component_fuel_coolant duct: *component_fuel_duct intercoolant: *component_fuel_intercoolant lta2 fuel: &block_lta2_fuel fuel: shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 isotopics: PuUZr mult: 169.0 od: 0.86602 clad: *component_fuel_clad liner1: *component_fuel2_liner1 liner2: *component_fuel2_liner2 bond: *component_fuel_bond wire: *component_fuel_wire coolant: *component_fuel_coolant duct: *component_fuel_duct intercoolant: *component_fuel_intercoolant annular fuel gap: &block_fuel3 gap1: shape: Circle material: Void Tinput: 20.0 Thot: 430.0 id: 0.0 mult: fuel.mult od: fuel.id fuel: shape: Circle material: UZr Tinput: 20.0 Thot: 600.0 id: 0.600 mult: 169.0 od: 0.878 flags: annular fuel depletable gap2: shape: Circle material: Void Tinput: 20.0 Thot: 430.0 id: fuel.od mult: fuel.mult od: inner liner.id inner liner: shape: Circle material: HT9 Tinput: 20.0 Thot: 430.0 id: 0.878 mult: fuel.mult od: 0.898 gap3: shape: Circle material: Void Tinput: 20.0 Thot: 430.0 id: inner liner.od mult: fuel.mult od: outer liner.id outer liner: shape: Circle material: Zr Tinput: 20.0 Thot: 430.0 id: 0.898 mult: fuel.mult od: 0.900 gap4: shape: Circle material: Void Tinput: 20.0 Thot: 430.0 id: outer liner.od mult: fuel.mult od: clad.id clad: shape: Circle material: HT9 Tinput: 20.0 Thot: 430.0 id: 0.900 mult: fuel.mult od: 1.000 wire: *component_fuel_wire coolant: *component_fuel_coolant duct: *component_fuel_duct intercoolant: *component_fuel_intercoolant assemblies: heights: &standard_heights [25.0, 25.0, 25.0, 25.0, 75.0] axial mesh points: &standard_axial_mesh_points [1, 1, 1, 1, 4] igniter fuel: specifier: IC blocks: &igniter_fuel_blocks [ *block_grid_plate, *block_fuel, *block_fuel, *block_fuel, *block_plenum, ] height: *standard_heights axial mesh points: *standard_axial_mesh_points material modifications: U235_wt_frac: &igniter_fuel_u235_wt_frac ["", 0.11, 0.11, 0.11, ""] ZR_wt_frac: &igniter_fuel_zr_wt_frac ["", 0.06, 0.06, 0.06, ""] xs types: &igniter_fuel_xs_types [A, A, A, A, A] nozzleType: Inner middle fuel: specifier: MC blocks: [ *block_grid_plate, *block_fuel2, *block_fuel2, *block_fuel2, *block_plenum, ] height: *standard_heights axial mesh points: *standard_axial_mesh_points xs types: &middle_fuel_xs_types [Z, Z, Z, Z, Z] annular fuel: specifier: AF blocks: [ *block_grid_plate, *block_fuel3, *block_fuel3, *block_fuel3, *block_plenum, ] height: *standard_heights axial mesh points: *standard_axial_mesh_points xs types: &annular_fuel_xs_types [U, U, U, U, U] lta fuel: specifier: LA blocks: [ *block_grid_plate, *block_lta1_fuel, *block_lta1_fuel, *block_lta1_fuel, *block_plenum, ] height: *standard_heights axial mesh points: *standard_axial_mesh_points material modifications: U235_wt_frac: <a_fuel_u235_wt_frac ["", 0.2, 0.2, 0.2, ""] ZR_wt_frac: <a_fuel_zr_wt_frac ["", 0.07, 0.07, 0.06, ""] xs types: *igniter_fuel_xs_types nozzleType: lta lta fuel b: specifier: LB blocks: [ *block_grid_plate, *block_lta2_fuel, *block_lta2_fuel, *block_lta2_fuel, *block_plenum, ] height: *standard_heights axial mesh points: *standard_axial_mesh_points material modifications: U235_wt_frac: *lta_fuel_u235_wt_frac ZR_wt_frac: *lta_fuel_zr_wt_frac xs types: *igniter_fuel_xs_types nozzleType: lta feed fuel: specifier: OC blocks: *igniter_fuel_blocks height: *standard_heights axial mesh points: *standard_axial_mesh_points material modifications: U235_wt_frac: *igniter_fuel_u235_wt_frac ZR_wt_frac: *igniter_fuel_zr_wt_frac xs types: *igniter_fuel_xs_types nozzleType: Outer primary control: specifier: PC blocks: [ *block_grid_plate, *block_duct, *block_control, *block_plenum, *block_duct, ] height: *standard_heights axial mesh points: *standard_axial_mesh_points xs types: *igniter_fuel_xs_types radial shield: specifier: SH blocks: [ *block_grid_plate, *block_axial_shield, *block_axial_shield, *block_axial_shield, *block_plenum, ] height: *standard_heights axial mesh points: *standard_axial_mesh_points xs types: *igniter_fuel_xs_types ================================================ FILE: armi/tests/refSmallReactorShuffleLogic.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from armi.physics.fuelCycle.fuelHandlers import FuelHandler class EquilibriumShuffler(FuelHandler): """Convergent divergent equilibrium shuffler.""" def chooseSwaps(self, factorList): if self.cycle == 0: # no fuel shuffling at cycle 0 return cycleMoves = [ [(2, 1), (3, 3), (4, 2), (5, 1), (6, 7)], [(2, 2), (3, 2), (4, 1), (5, 4), (6, 4)], [(2, 1), (3, 1), (4, 3), (5, 2), (6, 7)], ] cascade = [] for ring, pos in cycleMoves[self.cycle - 1]: loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(ring, pos) a = self.r.core.childrenByLocator[loc] if not a: raise RuntimeError("No assembly in {0} {1}".format(ring, pos)) cascade.append(a) self.swapCascade(cascade) fresh = self.r.blueprints.constructAssem(self.cs, name="igniter fuel") self.dischargeSwap(fresh, cascade[0]) if self.cycle > 1: # do a swap where the assembly comes from the sfp if self.r.excore.get("sfp") is None: raise RuntimeError("No SFP found.") incoming = self.r.excore["sfp"].getChildren().pop() if not incoming: raise RuntimeError(f"No assembly in SFP {self.r.excore['sfp'].getChildren()}") outLoc = self.r.core.spatialGrid.getLocatorFromRingAndPos(5, 1 + self.cycle) self.dischargeSwap(incoming, self.r.core.childrenByLocator[outLoc]) def getFactorList(cycle, cs=None, fallBack=False): # prefer to keep these 0 through 1 since this is what the branch search can do. defaultFactorList = {} factorSearchFlags = [] defaultFactorList["divergentConvergent"] = 1 return defaultFactorList, factorSearchFlags ================================================ FILE: armi/tests/refSmallSfpGrid.yaml ================================================ sfp: symmetry: full geom: cartesian lattice pitch: x: 50.0 y: 50.0 grid contents: [0, 0]: MC [1, 0]: MC [0, 1]: MC [1, 1]: MC ================================================ FILE: armi/tests/refTestCartesian.yaml ================================================ settings: # global beta: 0.003454 buGroups: - 100 burnSteps: 0 comment: Full-core Cartesian input file with a 10x10 cm square pitch. cycleLength: 2000.0 freshFeedType: igniter fuel loadingFile: refSmallCartesian.yaml outputFileExtension: png power: 400000000.0 startNode: 1 targetK: 1.002 versions: armi: uncontrolled # fuel cycle jumpRingNum: 9 # neutronics epsFSAvg: 1e-06 epsFSPoint: 1e-06 loadPadElevation: 200.0 # report summarizeAssemDesign: false ================================================ FILE: armi/tests/smallestTestReactor/armiRunSmallest.yaml ================================================ # This is a non-physical test reactor. # This is designed to speed up testing of code that only technically needs a full reactor object. # This is a single-hex-assembly reactor, with only one block. settings: # global availabilityFactor: 1 beta: 0.003454 branchVerbosity: debug buGroups: - 100 burnSteps: 2 comment: Simple test input. cycleLength: 2000.0 detailAssemLocationsBOL: - 002-001 freshFeedType: igniter fuel loadingFile: refSmallestReactor.yaml moduleVerbosity: armi.reactor.reactors: info nCycles: 2 outputFileExtension: png power: 1000000.0 rmExternalFilesAtEOL: true startCycle: 1 startNode: 2 targetK: 1.002 verbosity: extra versions: armi: uncontrolled # cross section crossSectionControl: DA: geometry: 0D blockRepresentation: Median criticalBuckling: true externalDriver: true useHomogenizedBlockComposition: false numInternalRings: 1 numExternalRings: 1 XA: xsFileLocation: - ISOXA YA: geometry: 0D fluxFileLocation: rzmflxYA ZA: geometry: 1D cylinder blockRepresentation: ComponentAverage1DCylinder validBlockTypes: - fuel externalDriver: false mergeIntoClad: - gap numInternalRings: 1 numExternalRings: 1 # database db: false # fuel cycle fuelHandlerName: EquilibriumShuffler jumpRingNum: 9 # neutronics epsFSAvg: 1e-06 epsFSPoint: 1e-06 loadPadElevation: 200.0 # report genReports: false summarizeAssemDesign: false ================================================ FILE: armi/tests/smallestTestReactor/refOneBlockReactor.yaml ================================================ blocks: fuel: &block_fuel fuel: &component_fuel_fuel shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 mult: 169.0 od: 0.86 clad: &component_fuel_clad shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 mult: fuel.mult od: 1.09 bond: &component_fuel_bond shape: Circle material: Sodium Tinput: 450.0 Thot: 450.0 id: fuel.od mult: fuel.mult od: clad.id wire: &component_fuel_wire shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30 helixDiameter: 1.20 id: 0.0 mult: fuel.mult od: 0.10056 coolant: &component_fuel_coolant shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 duct: &component_fuel_duct shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 16.0 mult: 1.0 op: 16.7 intercoolant: &component_fuel_intercoolant shape: Hexagon material: Sodium Tinput: 450.0 Thot: 450.0 ip: duct.op mult: 1.0 op: 16.8 assemblies: heights: &standard_heights [25.0] axial mesh points: &standard_axial_mesh_points [1] igniter fuel: specifier: IC blocks: &igniter_fuel_blocks [*block_fuel] height: *standard_heights axial mesh points: *standard_axial_mesh_points material modifications: U235_wt_frac: &igniter_fuel_u235_wt_frac [0.11] ZR_wt_frac: &igniter_fuel_zr_wt_frac [0.06] xs types: &igniter_fuel_xs_types [A] ================================================ FILE: armi/tests/smallestTestReactor/refSmallestReactor.yaml ================================================ !include refOneBlockReactor.yaml systems: core: grid name: core origin: x: 0.0 y: 0.0 z: 0.0 Spent Fuel Pool: type: sfp grid name: sfp origin: x: 5000.0 y: 5000.0 z: 6000.0 grids: core: geom: hex_corners_up lattice map: | IC symmetry: full sfp: geom: cartesian symmetry: full lattice pitch: x: 32.0 y: 32.0 ================================================ FILE: armi/tests/test_apps.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the App class.""" import copy import unittest from armi import ( configure, context, getApp, getDefaultPluginManager, isStableReleaseVersion, meta, plugins, ) from armi.__main__ import main from armi.reactor.flags import Flags class TestPlugin1(plugins.ArmiPlugin): """This should be fine on its own.""" @staticmethod @plugins.HOOKIMPL def defineParameterRenames(): return {"oldType": "type"} class TestPlugin2(plugins.ArmiPlugin): """This should lead to an error if it coexists with Plugin1.""" @staticmethod @plugins.HOOKIMPL def defineParameterRenames(): return {"oldType": "type"} class TestPlugin3(plugins.ArmiPlugin): """This should lead to errors, since it collides with the framework `type` param.""" @staticmethod @plugins.HOOKIMPL def defineParameterRenames(): return {"type": "newType"} class TestPlugin4(plugins.ArmiPlugin): """This should be fine on its own, and safe to merge with TestPlugin1.""" @staticmethod @plugins.HOOKIMPL def defineParameterRenames(): return {"arealPD": "arealPowerDensity"} class TestApps(unittest.TestCase): """Test the base apps.App interfaces.""" def setUp(self): """ Manipulate the standard App. We can't just configure our own, since the pytest environment bleeds between tests. """ self._backupApp = copy.deepcopy(getApp()) def tearDown(self): """Restore the App to its original state.""" import armi armi._app = self._backupApp context.APP_NAME = "armi" def test_getParamRenames(self): # a basic test of the method app = getApp() app.pluginManager.register(TestPlugin1) app.pluginManager.register(TestPlugin4) app._paramRenames = None # need to implement better cache invalidation rules renames = app.getParamRenames() self.assertIn("oldType", renames) self.assertEqual(renames["oldType"], "type") self.assertIn("arealPD", renames) self.assertEqual(renames["arealPD"], "arealPowerDensity") # test an invalid param manager situation app._paramRenames[0][1] = -3 renames = app.getParamRenames() self.assertIn("oldType", renames) self.assertEqual(renames["oldType"], "type") self.assertIn("arealPD", renames) self.assertEqual(renames["arealPD"], "arealPowerDensity") # test the exceptions that get raised app.pluginManager.register(TestPlugin2) app._paramRenames = None # need to implement better cache invalidation rules with self.assertRaisesRegex( plugins.PluginError, ".*parameter renames are already defined by another plugin.*", ): app.getParamRenames() app.pluginManager.unregister(TestPlugin2) app.pluginManager.register(TestPlugin3) with self.assertRaisesRegex(plugins.PluginError, ".*currently-defined parameters.*"): app.getParamRenames() def test_registerPluginFlags(self): # set up the app, pm, and register some plugins app = getApp() # validate our flags have been registered self.assertEqual(Flags.fromString("FUEL"), Flags.FUEL) self.assertEqual(Flags.fromString("PRIMARY"), Flags.PRIMARY) # validate we can only register the flags once for _ in range(3): with self.assertRaises(RuntimeError): app.registerPluginFlags() def test_getParamRenamesInvalids(self): # a basic test of the method app = getApp() app.pluginManager.register(TestPlugin1) app.pluginManager.register(TestPlugin4) app._paramRenames = None # need to implement better cache invalidation rules renames = app.getParamRenames() self.assertIn("oldType", renames) self.assertEqual(renames["oldType"], "type") self.assertIn("arealPD", renames) self.assertEqual(renames["arealPD"], "arealPowerDensity") # test the strange, invalid case self.assertIsNotNone(app._paramRenames) app._pm._counter = -1 renames = app.getParamRenames() self.assertIn("oldType", renames) self.assertEqual(renames["oldType"], "type") self.assertIn("arealPD", renames) self.assertEqual(renames["arealPD"], "arealPowerDensity") def test_version(self): app = getApp() ver = app.version self.assertEqual(ver, meta.__version__) def test_getSettings(self): app = getApp() settings = app.getSettings() self.assertGreater(len(settings), 100) self.assertEqual(settings["nTasks"].value, 1) self.assertEqual(settings["nCycles"].value, 1) def test_splashText(self): app = getApp() splash = app.splashText self.assertIn("========", splash) self.assertIn("Advanced", splash) self.assertIn("version", splash) self.assertIn(meta.__version__, splash) def test_splashTextDifferentApp(self): import armi app = getApp() name = "DifferentApp" app.name = name armi._app = app context.APP_NAME = name splash = app.splashText self.assertIn("========", splash) self.assertIn("Advanced", splash) self.assertIn("version", splash) self.assertIn(meta.__version__, splash) self.assertIn("DifferentApp", splash) def test_isStableReleaseVersion(self): self.assertTrue(isStableReleaseVersion(None)) self.assertTrue(isStableReleaseVersion("0.1.2")) self.assertFalse(isStableReleaseVersion("1.2.3-asda132a")) def test_disableFutureConfigures(self): import armi # save off, in in case of poorly parallelized tests old = armi._ignoreConfigures # test it works (should be False to start) armi.disableFutureConfigures() self.assertTrue(armi._ignoreConfigures) # reset, in case of poorly parallelized tests armi._ignoreConfigures = old class TestArmiHighLevel(unittest.TestCase): """Tests for functions in the ARMI __init__ module.""" def test_getDefaultPluginManager(self): """Test the default plugin manager. .. test:: The default application consists of a list of default plugins. :id: T_ARMI_APP_PLUGINS :tests: R_ARMI_APP_PLUGINS """ pm = getDefaultPluginManager() pm2 = getDefaultPluginManager() self.assertNotEqual(pm, pm2) pluginsList = "".join([str(p) for p in pm.get_plugins()]) self.assertIn("BookkeepingPlugin", pluginsList) self.assertIn("EntryPointsPlugin", pluginsList) self.assertIn("NeutronicsPlugin", pluginsList) self.assertIn("ReactorPlugin", pluginsList) def test_overConfigured(self): with self.assertRaises(RuntimeError): configure() def test_main(self): with self.assertRaises(SystemExit): main() ================================================ FILE: armi/tests/test_armiTestHelper.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests to demonstrate the test helper is functional.""" import os from armi.tests import ArmiTestHelper THIS_DIR = os.path.dirname(__file__) class TestArmiTestHelper(ArmiTestHelper): def setUp(self): self.goodFilePath = os.path.join(THIS_DIR, "goodFile" + self._testMethodName) self.badFilePath = os.path.join(THIS_DIR, "badFile" + self._testMethodName) self.BLOCK_TEXT = ( "TerraPower aims to develop a sustainable and economic nuclear energy technology using:\n" "Next-generation safe, affordable, clean and secure technologies\n" "Advanced materials for more durable metallic fuels\n" "World-class leadership for dynamic reactor engineering and innovation\n" "Supercomputing for reliable and comprehensive modeling\n" ) self.BAD_TEXT = self.BLOCK_TEXT.replace("class", "NEGATIVE") for path, text in zip([self.goodFilePath, self.badFilePath], (self.BLOCK_TEXT, self.BAD_TEXT)): with open(path, "w") as fileObj: fileObj.write(text) def tearDown(self): for path in [self.goodFilePath, self.badFilePath]: if os.path.exists(path): os.remove(path) def test_compareFilesSucess(self): self.compareFilesLineByLine(self.goodFilePath, self.goodFilePath) def test_compareFilesFail(self): self.assertRaises( AssertionError, self.compareFilesLineByLine, self.goodFilePath, self.badFilePath, ) def test_compareFilesSucceedFalseNegative(self): self.compareFilesLineByLine(self.goodFilePath, self.badFilePath, falseNegList=["NEGATIVE"]) ================================================ FILE: armi/tests/test_cartesian.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Cartesian reactors.""" import unittest from armi.reactor import geometry from armi.reactor.flags import Flags from armi.reactor.tests import test_reactors from armi.tests import TEST_ROOT from armi.utils import directoryChangers class CartesianReactorTests(unittest.TestCase): @classmethod def setUpClass(cls): # prepare the input files. This is important so the unit tests run from wherever # they need to run from. cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT) cls.directoryChanger.open() @classmethod def tearDownClass(cls): cls.directoryChanger.close() def setUp(self): """Use the related setup in the testFuelHandlers module.""" self.o, self.r = test_reactors.loadTestReactor( self.directoryChanger.destination, inputFileName="refTestCartesian.yaml" ) def test_custom(self): """Test Custom material with custom density.""" fuel = self.r.core.getFirstAssembly(Flags.MIDDLE | Flags.FUEL).getFirstBlock(Flags.FUEL) custom = fuel.getComponent(Flags.FUEL) self.assertEqual(self.r.core.geomType, geometry.GeomType.CARTESIAN) # from blueprints input file self.assertAlmostEqual(custom.getNumberDensity("U238"), 0.0134125) ================================================ FILE: armi/tests/test_context.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Serial tests for the Context module.""" import unittest from armi import context class TestContextSerial(unittest.TestCase): """Serial tests for the Context module.""" @unittest.skipIf(context.MPI_SIZE > 1, "Serial test only") def test_rank(self): self.assertEqual(context.MPI_RANK, 0) self.assertEqual(context.MPI_SIZE, 1) @unittest.skipIf(context.MPI_SIZE > 1, "Serial test only") def test_nonNoneData(self): self.assertGreater(len(context.APP_DATA), 0) self.assertGreater(len(context.DOC), 0) self.assertGreater(len(context.getFastPath()), 0) self.assertGreater(len(context.PROJECT_ROOT), 0) self.assertGreater(len(context.RES), 0) self.assertGreater(len(context.ROOT), 0) self.assertGreater(len(context.USER), 0) ================================================ FILE: armi/tests/test_interfaces.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the Interface.""" import unittest from armi import interfaces, settings class DummyInterface(interfaces.Interface): name = "Dummy" purpose = "dummyAction" class TestCodeInterface(unittest.TestCase): """Test Code interface.""" def setUp(self): self.cs = settings.Settings() def test_isRequestedDetailPoint(self): """Tests notification of detail points.""" newSettings = {"dumpSnapshot": ["000001", "995190"]} cs = self.cs.modified(newSettings=newSettings) i = DummyInterface(None, cs) self.assertEqual(i.isRequestedDetailPoint(0, 1), True) self.assertEqual(i.isRequestedDetailPoint(995, 190), True) self.assertEqual(i.isRequestedDetailPoint(5, 10), False) def test_enabled(self): """Test turning interfaces on and off.""" i = DummyInterface(None, self.cs) self.assertEqual(i.enabled(), True) i.enabled(False) self.assertEqual(i.enabled(), False) i.enabled(True) self.assertEqual(i.enabled(), True) def test_nameContains(self): i = DummyInterface(None, self.cs) self.assertFalse(i.nameContains("nope")) self.assertTrue(i.nameContains("Dum")) def test_distributable(self): i = DummyInterface(None, self.cs) self.assertEqual(i.distributable(), 1) def test_preDistributeState(self): i = DummyInterface(None, self.cs) self.assertEqual(i.preDistributeState(), {}) def test_duplicate(self): i = DummyInterface(None, self.cs) iDup = i.duplicate() self.assertEqual(type(i), type(iDup)) self.assertEqual(i.enabled(), iDup.enabled()) class TestTightCoupler(unittest.TestCase): """Test the tight coupler class.""" def setUp(self): cs = settings.Settings() cs["tightCoupling"] = True cs["tightCouplingSettings"] = {"dummyAction": {"parameter": "nothing", "convergence": 1.0e-5}} self.interface = DummyInterface(None, cs) def test_couplerActive(self): self.assertIsNotNone(self.interface.coupler) def test_storePreviousIterationValue(self): self.interface.coupler.storePreviousIterationValue(1.0) self.assertEqual(self.interface.coupler._previousIterationValue, 1.0) def test_storePreviousIterationValueException(self): with self.assertRaises(TypeError) as cm: self.interface.coupler.storePreviousIterationValue({5.0}) the_exception = cm.exception self.assertEqual(the_exception.error_code, 3) def test_isConvergedValueError(self): with self.assertRaises(ValueError) as cm: self.interface.coupler.isConverged(1.0) the_exception = cm.exception self.assertEqual(the_exception.error_code, 3) def test_isConverged(self): """Ensure TightCoupler.isConverged() works with float, 1D list, and ragged 2D list. .. test:: The tight coupling logic is based around a convergence criteria. :id: T_ARMI_OPERATOR_PHYSICS1 :tests: R_ARMI_OPERATOR_PHYSICS Notes ----- 2D lists can end up being ragged as assemblies can have different number of blocks. Ragged lists are easier to manage with lists as opposed to numpy.arrays, namely, their dimension is preserved. """ # show a situation where it doesn't converge previousValues = { "float": 1.0, "list1D": [1.0, 2.0], "list2D": [[1, 2, 3], [1, 2]], } updatedValues = { "float": 5.0, "list1D": [5.0, 6.0], "list2D": [[5, 6, 7], [5, 6]], } for previous, current in zip(previousValues.values(), updatedValues.values()): self.interface.coupler.storePreviousIterationValue(previous) self.assertFalse(self.interface.coupler.isConverged(current)) # show a situation where it DOES converge previousValues = updatedValues for previous, current in zip(previousValues.values(), updatedValues.values()): self.interface.coupler.storePreviousIterationValue(previous) self.assertTrue(self.interface.coupler.isConverged(current)) def test_isConvergedRuntimeError(self): """Test to ensure 3D arrays do not work.""" previous = [[[1, 2, 3]], [[1, 2, 3]], [[1, 2, 3]]] updatedValues = [[[5, 6, 7]], [[5, 6, 7]], [[5, 6, 7]]] self.interface.coupler.storePreviousIterationValue(previous) with self.assertRaises(RuntimeError) as cm: self.interface.coupler.isConverged(updatedValues) the_exception = cm.exception self.assertEqual(the_exception.error_code, 3) def test_getListDimension(self): a = [1, 2, 3] self.assertEqual(interfaces.TightCoupler.getListDimension(a), 1) a = [[1, 2, 3]] self.assertEqual(interfaces.TightCoupler.getListDimension(a), 2) a = [[[1, 2, 3]]] self.assertEqual(interfaces.TightCoupler.getListDimension(a), 3) ================================================ FILE: armi/tests/test_lwrInputs.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for C5G7 input files.""" import os import unittest from logging import WARNING from armi import runLog from armi.reactor.flags import Flags from armi.reactor.tests import test_reactors from armi.testing import TESTING_ROOT from armi.tests import mockRunLogs from armi.utils import directoryChangers TEST_INPUT_TITLE = "c5g7-settings.yaml" class C5G7ReactorTests(unittest.TestCase): def setUp(self): self.td = directoryChangers.TemporaryDirectoryChanger() self.td.__enter__() def tearDown(self): self.td.__exit__(None, None, None) def test_loadC5G7(self): """ Load the C5G7 case from input and check basic counts. (Also, check that we are getting warnings when reading the YAML). """ with mockRunLogs.BufferLog() as mock: # we should start with a clean slate self.assertEqual("", mock.getStdout()) runLog.LOG.startLog("test_loadC5G7") runLog.LOG.setVerbosity(WARNING) # load the reactor _o, r = test_reactors.loadTestReactor( os.path.join(TESTING_ROOT, "reactors", "c5g7"), inputFileName=TEST_INPUT_TITLE, ) # test warnings are being logged for malformed isotopics info in the settings file streamVal = mock.getStdout() self.assertIn("Case Information", streamVal, msg=streamVal) self.assertIn("Input File", streamVal, msg=streamVal) # test that there are 100 of each high, medium, and low MOX pins b = r.core.getFirstBlock(Flags.MOX) fuelPinsHigh = b.getComponent(Flags.HIGH | Flags.MOX) self.assertEqual(fuelPinsHigh.getDimension("mult"), 100) # test the Guide Tube dimensions gt = b.getComponent(Flags.GUIDE_TUBE) self.assertEqual(gt.getDimension("mult"), 24) ================================================ FILE: armi/tests/test_mpiActions.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for MPI actions.""" import unittest from collections import defaultdict from unittest.mock import patch from armi import context from armi.mpiActions import ( DistributeStateAction, DistributionAction, MpiAction, _disableForExclusiveTasks, _makeQueue, runActions, runBatchedActions, ) from armi.reactor.tests import test_reactors from armi.tests import mockRunLogs from armi.utils import iterables class MockMpiComm: """Mock MPI Communication library.""" def allgather(self, name): return ["1", "2", "3", "4"] def bcast(self, data, root=0): return defaultdict(int) def Get_rank(self): return 1 def Get_size(self): return 4 def scatter(self, actions, root=0): return None def Split(self, num): return self class MockMpiAction(MpiAction): """Mock MPI Action, to simplify tests.""" runActionExclusive = False def __init__(self, broadcastResult: int = 3, invokeResult: int = 7): self.broadcastResult = broadcastResult self.invokeResult = invokeResult def broadcast(self, obj=None): return self.broadcastResult def invoke(self, o, r, cs): return self.invokeResult @unittest.skipUnless(context.MPI_RANK == 0, "test only on root node") class MpiIterTests(unittest.TestCase): def setUp(self): """Save MPI size on entry.""" self._mpiSize = context.MPI_SIZE self.action = MpiAction() def tearDown(self): """Restore MPI rank and size on exit.""" context.MPI_SIZE = self._mpiSize context.MPI_RANK = 0 def test_parallel(self): self.action.serial = False self.assertTrue(self.action.parallel) self.action.serial = True self.assertFalse(self.action.parallel) def test_serialGather(self): self.action.serial = True self.assertEqual(len(self.action.gather()), 1) def test_mpiIter(self): allObjs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] distObjs = [[0, 1, 2], [3, 4, 5], [6, 7], [8, 9], [10, 11]] context.MPI_SIZE = 5 for rank in range(context.MPI_SIZE): context.MPI_RANK = rank myObjs = list(self.action.mpiIter(allObjs)) self.assertEqual(myObjs, distObjs[rank]) def _distributeObjects(self, allObjs, numProcs): context.MPI_SIZE = numProcs objs = [] for context.MPI_RANK in range(context.MPI_SIZE): objs.append(list(self.action.mpiIter(allObjs))) return objs def test_perfectBalancing(self): """Test load balancing when numProcs divides numObjects. In this case, all processes should get the same number of objects. """ numObjs, numProcs = 25, 5 allObjs = list(range(numObjs)) objs = self._distributeObjects(allObjs, numProcs) counts = [len(o) for o in objs] imbalance = max(counts) - min(counts) # ensure we haven't missed any objects self.assertEqual(iterables.flatten(objs), allObjs) # check imbalance self.assertEqual(imbalance, 0) def test_excessProcesses(self): """Test load balancing when numProcs exceeds numObjects. In this case, some processes should receive a single object and the rest should receive no objects. """ numObjs, numProcs = 5, 25 allObjs = list(range(numObjs)) objs = self._distributeObjects(allObjs, numProcs) counts = [len(o) for o in objs] imbalance = max(counts) - min(counts) # ensure we haven't missed any objects self.assertEqual(iterables.flatten(objs), allObjs) # check imbalance self.assertLessEqual(imbalance, 1) def test_typicalBalancing(self): """Test load balancing for typical case (numProcs < numObjs). In this case, the total imbalance should be 1 (except for the perfectly balanced case). """ numObjs, numProcs = 25, 6 allObjs = list(range(numObjs)) objs = self._distributeObjects(allObjs, numProcs) # typical case (more objects than processes) counts = [len(o) for o in objs] imbalance = max(counts) - min(counts) self.assertLessEqual(imbalance, 1) self.assertEqual(iterables.flatten(objs), allObjs) @patch("armi.context.MPI_COMM", MockMpiComm()) @patch("armi.context.MPI_SIZE", 4) def test_runActionsDistributionAction(self): o, r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") act = DistributionAction([self.action]) results = runActions(o, r, o.cs, [act]) self.assertEqual(len(results), 1) self.assertIsNone(results[0]) o.cs["verbosity"] = "debug" res = act.invokeHook() self.assertIsNone(res) @patch("armi.context.MPI_COMM", MockMpiComm()) @patch("armi.context.MPI_SIZE", 4) @patch("armi.context.MPI_NODENAMES", ["node0", "node0", "node1", "node1"]) @patch("armi.context.MPI_DISTRIBUTABLE", True) def test_runBatchedActions(self): o, r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") actionsByNode = { "node0": [MockMpiAction(invokeResult=1)], "node1": [MockMpiAction(invokeResult=5), MockMpiAction(invokeResult=11)], } # run in serial with mockRunLogs.BufferLog() as mock: results = runBatchedActions(o, r, o.cs, actionsByNode, serial=True) self.assertIn("Running 3 MPI actions in serial", mock.getStdout()) self.assertEqual(len(results), 3) self.assertListEqual(results, [1, 5, 11]) # run in parallel with mockRunLogs.BufferLog() as mock: results = runBatchedActions(o, r, o.cs, actionsByNode) self.assertIn("Running 3 MPI actions in parallel over 2 nodes.", mock.getStdout()) self.assertEqual(len(results), 1) self.assertIsNone(results[0]) @patch("armi.context.MPI_COMM", MockMpiComm()) @patch("armi.context.MPI_SIZE", 4) @patch("armi.context.MPI_NODENAMES", ["node0", "node0", "node1", "node1"]) @patch("armi.context.MPI_DISTRIBUTABLE", True) def test_runBatchedActionsOverload(self): """Test that an error is thrown if the number of tasks exceeds number of ranks.""" o, r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") actionsByNode = { "node0": [MockMpiAction()], "node1": [MockMpiAction(), MockMpiAction(), MockMpiAction()], } # run in parallel with mockRunLogs.BufferLog() as mock: with self.assertRaises(ValueError): runBatchedActions(o, r, o.cs, actionsByNode) self.assertIn("There are more actions (3) than ranks available (2) on node1!", mock.getStdout()) @patch("armi.context.MPI_COMM", MockMpiComm()) @patch("armi.context.MPI_SIZE", 4) def test_runActionsDistributeStateAction(self): o, r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") act = DistributeStateAction([self.action]) results = runActions(o, r, o.cs, [act]) self.assertEqual(len(results), 1) self.assertIsNone(results[0]) @patch("armi.context.MPI_COMM", MockMpiComm()) @patch("armi.context.MPI_SIZE", 4) @patch("armi.context.MPI_DISTRIBUTABLE", True) def test_runActionsDistStateActionParallel(self): o, r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") act = DistributeStateAction([self.action]) results = runActions(o, r, o.cs, [act]) self.assertEqual(len(results), 1) self.assertIsNone(results[0]) def test_invokeAsMaster(self): """Verify that calling invokeAsMaster calls invoke.""" self.assertEqual(7, MockMpiAction.invokeAsMaster(1, 2, 3)) class QueueActionsTests(unittest.TestCase): def test_disableForExclusiveTasks(self): num = 5 actionsThisRound = [MpiAction() for _ in range(num - 1)] actionsThisRound.append(None) useForComputation = [True] * num exclusiveIndices = [1, 3] for i in exclusiveIndices: actionsThisRound[i].runActionExclusive = True useForComputation = _disableForExclusiveTasks(actionsThisRound, useForComputation) for i in range(num): if i in exclusiveIndices: # won't be used for computation in future round self.assertFalse(useForComputation[i]) else: self.assertTrue(useForComputation[i]) def test_makeQueue(self): num = 5 actions = [MpiAction() for _ in range(num)] for i, action in enumerate(actions): action.runActionExclusive = True action.priority = 10 - i # make it reverse so it actually has to sort useForComputation = [True] * (num - 1) queue, numBatches = _makeQueue(actions, useForComputation) self.assertEqual(numBatches, 2) self.assertEqual(len(queue), len(actions)) lastPriority = -999 for action in queue: # check that when more exclusive than cpus they go to non-exclusive self.assertFalse(action.runActionExclusive) self.assertGreaterEqual(action.priority, lastPriority) lastPriority = action.priority exclusiveIndices = [1, 3] for i in exclusiveIndices: actions[i].runActionExclusive = True useForComputation = [True] * (num - 2) queue, numBatches = _makeQueue(actions, useForComputation) # 3 batches since 2 are exclusive and 3 left over tasks self.assertEqual(numBatches, 3) # check that they remain exclusive for i in exclusiveIndices: self.assertTrue(actions[i].runActionExclusive) lastPriority = -999 foundFirstNonExclusive = False for action in queue: if not action.runActionExclusive: foundFirstNonExclusive = True # priority order resets for non-exclusive lastPriority = -999 if foundFirstNonExclusive: # all after the first nonExclusive should be non-exclusive self.assertFalse(action.runActionExclusive) self.assertGreaterEqual(action.priority, lastPriority) lastPriority = action.priority ================================================ FILE: armi/tests/test_mpiFeatures.py ================================================ # Copyright 2021 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests for featurest that need MPI, and thus require special testing. These tests will be generally ignored by pytest if you are trying to run them in an environment without MPI installed. To run these tests from the commandline, install MPI, mpi4py, and do: mpiexec -n 2 python -m pytest armi/tests/test_mpiFeatures.py or mpiexec.exe -n 2 python -m pytest armi/tests/test_mpiFeatures.py """ import os import shutil import unittest from unittest.mock import patch from armi import context, mpiActions, settings from armi.interfaces import Interface from armi.mpiActions import DistributeStateAction from armi.operators import OperatorMPI from armi.physics.neutronics.const import CONF_CROSS_SECTION from armi.reactor import blueprints, reactors from armi.reactor.parameters import parameterDefinitions from armi.reactor.tests import test_reactors from armi.tests import ARMI_RUN_PATH, TEST_ROOT, mockRunLogs from armi.utils import pathTools from armi.utils.directoryChangers import TemporaryDirectoryChanger # determine if this is a parallel run, and MPI is installed MPI_EXE = None if shutil.which("mpiexec.exe") is not None: MPI_EXE = "mpiexec.exe" elif shutil.which("mpiexec") is not None: MPI_EXE = "mpiexec" MPI_COMM = context.MPI_COMM class FailingInterface1(Interface): """utility classes to make sure the logging system fails properly.""" name = "failer" def interactEveryNode(self, cycle, node): raise RuntimeError("Failing interface failure") class FailingInterface2(Interface): """utility class to make sure the logging system fails properly.""" name = "failer" def interactEveryNode(self, cycle, node): raise RuntimeError("Failing interface critical failure") class FailingInterface3(Interface): """fails on worker operate.""" name = "failer" def fail(self): raise RuntimeError("Failing interface critical worker failure") def interactEveryNode(self, c, n): context.MPI_COMM.bcast("fail", root=0) def workerOperate(self, cmd): if cmd == "fail": self.fail() return True return False class MockInterface(Interface): name = "mockInterface" def interactInit(self): pass class MpiOperatorTests(unittest.TestCase): """Testing the MPI parallelization operator.""" def setUp(self): self.old_op, self.r = test_reactors.loadTestReactor( TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml" ) self.o = OperatorMPI(cs=self.old_op.cs) self.o.r = self.r @patch("armi.operators.Operator.operate") @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_basicOperatorMPI(self, mockOpMpi): """Test we can drive a parallel operator. .. test:: Run a parallel operator. :id: T_ARMI_OPERATOR_MPI0 :tests: R_ARMI_OPERATOR_MPI """ with mockRunLogs.BufferLog() as mock: self.o.operate() self.assertIn("OperatorMPI.operate", mock.getStdout()) @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_primaryException(self): """Test a custom interface that only fails on the main process. .. test:: Run a parallel operator that fails online on the main process. :id: T_ARMI_OPERATOR_MPI1 :tests: R_ARMI_OPERATOR_MPI """ self.o.removeAllInterfaces() failer = FailingInterface1(self.o.r, self.o.cs) self.o.addInterface(failer) if context.MPI_RANK == 0: self.assertRaises(RuntimeError, self.o.operate) else: self.o.operate() @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_primaryCritical(self): self.o.removeAllInterfaces() failer = FailingInterface2(self.o.r, self.o.cs) self.o.addInterface(failer) if context.MPI_RANK == 0: self.assertRaises(Exception, self.o.operate) else: self.o.operate() @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_finalizeInteract(self): """Test to make sure workers are reset after interface interactions.""" # Add a random number of interfaces interface = MockInterface(self.o.r, self.o.cs) self.o.addInterface(interface) with mockRunLogs.BufferLog() as mock: if context.MPI_RANK == 0: self.o.interactAllInit() context.MPI_COMM.bcast("quit", root=0) context.MPI_COMM.bcast("finished", root=0) else: self.o.workerOperate() logMessage = "Workers have been reset." if context.MPI_RANK == 0 else "Workers are being reset." numCalls = len([line for line in mock.getStdout().splitlines() if logMessage in line]) self.assertGreaterEqual(numCalls, 1) # these two must be defined up here so that they can be pickled class BcastAction1(mpiActions.MpiAction): def invokeHook(self): nItems = 50 results = [None] * nItems for objIndex in range(nItems): if objIndex % context.MPI_SIZE == context.MPI_RANK: results[objIndex] = objIndex allResults = self.gather(results) if allResults: return [allResults[ai % context.MPI_SIZE][ai] for ai in range(nItems)] else: return [] class BcastAction2(mpiActions.MpiAction): def invokeHook(self): results = [] for num in self.mpiIter(range(50)): results.append(num) allResults = self.gather(results) if allResults: return self.mpiFlatten(allResults) else: return [] class MpiDistributeStateTests(unittest.TestCase): def setUp(self): self.cs = settings.Settings(fName=ARMI_RUN_PATH) bp = blueprints.loadFromCs(self.cs) self.o = OperatorMPI(self.cs) self.o.r = reactors.factory(self.cs, bp) self.action = DistributeStateAction() self.action.o = self.o self.action.r = self.o.r @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_distributeSettings(self): """Under normal circumstances, we would not test "private" methods; however, distributeState is quite complicated. """ self.action._distributeSettings() if context.MPI_RANK == 0: self.assertEqual(self.cs, self.action.o.cs) else: self.assertNotEqual(self.cs, self.action.o.cs) original = {ss.name: ss.value for ss in self.cs.values()} current = {ss.name: ss.value for ss in self.action.o.cs.values()} # remove values that are *expected to be* different... # CONF_CROSS_SECTION is removed because unittest is being mean about # comparing dicts... for key in ["stationaryBlockFlags", "verbosity", CONF_CROSS_SECTION]: if key in original: del original[key] if key in current: del current[key] for key in original.keys(): self.assertEqual(original[key], current[key]) @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_distributeReactor(self): """Under normal circumstances, we would not test "private" methods; however, distributeState is quite complicated. """ original_reactor = self.action.r self.action._distributeReactor(self.cs) if context.MPI_RANK == 0: self.assertEqual(original_reactor, self.action.r) else: self.assertNotEqual(original_reactor, self.action.r) self.assertIsNone(self.action.r.core.lib) @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_distributeInterfaces(self): """Under normal circumstances, we would not test "private" methods; however, distributeState is quite complicated. """ original_interfaces = self.o.interfaces self.action._distributeInterfaces() if context.MPI_RANK == 0: self.assertEqual(original_interfaces, self.o.interfaces) else: self.assertEqual(original_interfaces, self.o.interfaces) @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_distributeState(self): original_reactor = self.o.r original_lib = self.o.r.core.lib original_interfaces = self.o.interfaces original_bolassems = self.o.r.blueprints.assemblies self.action.invokeHook() if context.MPI_RANK == 0: self.assertEqual(self.cs, self.o.cs) self.assertEqual(original_reactor, self.o.r) self.assertEqual(original_interfaces, self.o.interfaces) self.assertDictEqual(original_bolassems, self.o.r.blueprints.assemblies) self.assertEqual(original_lib, self.o.r.core.lib) else: self.assertNotEqual(self.cs, self.o.cs) self.assertNotEqual(original_reactor, self.o.r) self.assertNotEqual(original_bolassems, self.o.r.blueprints.assemblies) self.assertEqual(original_interfaces, self.o.interfaces) self.assertEqual(original_lib, self.o.r.core.lib) for pDef in parameterDefinitions.ALL_DEFINITIONS: self.assertFalse(pDef.assigned & parameterDefinitions.SINCE_LAST_DISTRIBUTE_STATE) @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_compileResults(self): action1 = BcastAction1() context.MPI_COMM.bcast(action1) results1 = action1.invoke(None, None, None) action2 = BcastAction2() context.MPI_COMM.bcast(action2) results2 = action2.invoke(None, None, None) self.assertEqual(results1, results2) class MpiPathToolsTests(unittest.TestCase): @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_cleanPathMpi(self): """Simple tests of cleanPath(), in the MPI scenario.""" with TemporaryDirectoryChanger(): # TEST 0: File is not safe to delete, due to not being a temp dir or under FAST_PATH filePath0 = "test0_cleanPathNoMpi" open(filePath0, "w").write("something") self.assertTrue(os.path.exists(filePath0)) with self.assertRaises(Exception): pathTools.cleanPath(filePath0, mpiRank=context.MPI_RANK) MPI_COMM.barrier() # TEST 1: Delete a single file under FAST_PATH filePath1 = os.path.join(context.getFastPath(), "test1_cleanPathNoMpi") open(filePath1, "w").write("something") self.assertTrue(os.path.exists(filePath1)) pathTools.cleanPath(filePath1, mpiRank=context.MPI_RANK) MPI_COMM.barrier() self.assertFalse(os.path.exists(filePath1)) # TEST 2: Delete an empty directory under FAST_PATH dir2 = os.path.join(context.getFastPath(), "gimmeonereason") os.mkdir(dir2) self.assertTrue(os.path.exists(dir2)) pathTools.cleanPath(dir2, mpiRank=context.MPI_RANK) MPI_COMM.barrier() self.assertFalse(os.path.exists(dir2)) # TEST 3: Delete an empty directory with forceClean=True dir3 = "tostayhere" os.mkdir(dir3) self.assertTrue(os.path.exists(dir3)) pathTools.cleanPath(dir3, mpiRank=context.MPI_RANK, forceClean=True) MPI_COMM.barrier() self.assertFalse(os.path.exists(dir3)) # TEST 3: Delete a directory with two files inside with forceClean=True dir4 = "andilldirrightbackaround" os.mkdir(dir4) open(os.path.join(dir4, "file1.txt"), "w").write("something1") open(os.path.join(dir4, "file2.txt"), "w").write("something2") self.assertTrue(os.path.exists(dir4)) self.assertTrue(os.path.exists(os.path.join(dir4, "file1.txt"))) self.assertTrue(os.path.exists(os.path.join(dir4, "file2.txt"))) pathTools.cleanPath(dir4, mpiRank=context.MPI_RANK, forceClean=True) MPI_COMM.barrier() self.assertFalse(os.path.exists(dir4)) class TestContextMpi(unittest.TestCase): """Parallel tests for the Context module.""" @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_rank(self): self.assertGreater(context.MPI_RANK, -1) @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_nonNoneData(self): self.assertGreater(len(context.APP_DATA), 0) self.assertGreater(len(context.DOC), 0) self.assertGreater(len(context.getFastPath()), 0) self.assertGreater(len(context.PROJECT_ROOT), 0) self.assertGreater(len(context.RES), 0) self.assertGreater(len(context.ROOT), 0) self.assertGreater(len(context.USER), 0) ================================================ FILE: armi/tests/test_mpiParameters.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests of the MPI portion of the Parameters class.""" import shutil import unittest from armi import context from armi.reactor import composites, parameters # determine if this is a parallel run, and MPI is installed MPI_EXE = None if shutil.which("mpiexec.exe") is not None: MPI_EXE = "mpiexec.exe" elif shutil.which("mpiexec") is not None: MPI_EXE = "mpiexec" class MockSyncPC(parameters.ParameterCollection): pDefs = parameters.ParameterDefinitionCollection() with pDefs.createBuilder(default=0.0, location=parameters.ParamLocation.AVERAGE) as pb: pb.defParam("param1", "units", "p1 description", categories=["cat1"]) pb.defParam("param2", "units", "p2 description", categories=["cat2"]) pb.defParam("param3", "units", "p3 description", categories=["cat3"]) def makeComp(name): """Helper method for MPI sync tests: mock up a Composite with a minimal param collections.""" c = composites.Composite(name) c.p = MockSyncPC() return c class SynchronizationTests(unittest.TestCase): """Some tests that must be run with mpirun instead of the standard unittest system.""" def setUp(self): self.r = makeComp("reactor") self.r.core = makeComp("core") self.r.add(self.r.core) for ai in range(context.MPI_SIZE * 3): a = makeComp("assembly{}".format(ai)) self.r.core.add(a) for bi in range(3): a.add(makeComp("block{}-{}".format(ai, bi))) self.comps = [self.r.core] + self.r.core.getChildren(deep=True) @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_noConflicts(self): """Make sure sync works across processes. .. test:: Synchronize a reactor's state across processes. :id: T_ARMI_CMP_MPI0 :tests: R_ARMI_CMP_MPI """ _syncCount = self.r.syncMpiState() for ci, comp in enumerate(self.comps): if ci % context.MPI_SIZE == context.MPI_RANK: comp.p.param1 = (context.MPI_RANK + 1) * 30.0 else: self.assertNotEqual((context.MPI_RANK + 1) * 30.0, comp.p.param1) syncCount = self.r.syncMpiState() self.assertEqual(len(self.comps), syncCount) for ci, comp in enumerate(self.comps): self.assertEqual((ci % context.MPI_SIZE + 1) * 30.0, comp.p.param1) @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_withConflicts(self): """Test conflicts arise correctly if we force a conflict. .. test:: Raise errors when there are conflicts across processes. :id: T_ARMI_CMP_MPI1 :tests: R_ARMI_CMP_MPI """ self.r.core.p.param1 = (context.MPI_RANK + 1) * 99.0 with self.assertRaises(ValueError): self.r.syncMpiState() @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_withConflictsButSameValue(self): """Test that conflicts are ignored if the values are the same. .. test:: Don't raise errors when multiple processes make the same changes. :id: T_ARMI_CMP_MPI2 :tests: R_ARMI_CMP_MPI """ self.r.core.p.param1 = (context.MPI_SIZE + 1) * 99.0 self.r.syncMpiState() self.assertEqual((context.MPI_SIZE + 1) * 99.0, self.r.core.p.param1) @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_conflictsMaintainWithStateRetainer(self): """Test that the state retainer fails correctly when it should.""" with self.r.retainState(parameters.inCategory("cat2")): for _, comp in enumerate(self.comps): comp.p.param2 = 99 * context.MPI_RANK with self.assertRaises(ValueError): self.r.syncMpiState() ================================================ FILE: armi/tests/test_plugins.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides functionality for testing implementations of plugins.""" import unittest from copy import deepcopy from typing import Optional import yamlize from armi import ( context, getApp, getPluginManagerOrFail, interfaces, plugins, settings, utils, ) from armi.bookkeeping.db import loadOperator from armi.bookkeeping.db.databaseInterface import DatabaseInterface from armi.physics.neutronics import NeutronicsPlugin from armi.reactor.blocks import Block from armi.reactor.converters.axialExpansionChanger import AxialExpansionChanger from armi.reactor.flags import Flags from armi.testing import loadTestReactor from armi.tests import TEST_ROOT from armi.utils.directoryChangers import TemporaryDirectoryChanger class PluginFlags1(plugins.ArmiPlugin): """Simple Plugin that defines a single, new flag.""" @staticmethod @plugins.HOOKIMPL def defineFlags(): """Function to provide new Flags definitions.""" return {"SUPER_FLAG": utils.flags.auto()} class SillyAxialExpansionChanger(AxialExpansionChanger): """Fake, test-specific axial expansion changer that a plugin will register.""" class SillyAxialPlugin(plugins.ArmiPlugin): """Trivial plugin that implements the axial expansion hook.""" @staticmethod @plugins.HOOKIMPL def getAxialExpansionChanger() -> type[SillyAxialExpansionChanger]: return SillyAxialExpansionChanger class BeforeReactorPlugin(plugins.ArmiPlugin): """Trivial plugin that implements the before reactor construction hook.""" @staticmethod @plugins.HOOKIMPL def beforeReactorConstruction(cs) -> None: cs.beforeReactorConstructionFlag = True class TestPluginRegistration(unittest.TestCase): def setUp(self): """ Manipulate the standard App. We can't just configure our own, since the pytest environment bleeds between tests. """ self.app = getApp() self._backupApp = deepcopy(self.app) def tearDown(self): """Restore the App to its original state.""" import armi armi._app = self._backupApp context.APP_NAME = "armi" def test_defineFlags(self): """Define a new flag using the plugin defineFlags() method. .. test:: Define a new, unique flag through the plugin pathway. :id: T_ARMI_FLAG_EXTEND1 :tests: R_ARMI_FLAG_EXTEND .. test:: Load a plugin into an app and show it is loaded. :id: T_ARMI_PLUGIN_REGISTER :tests: R_ARMI_PLUGIN """ app = getApp() # show the new plugin isn't loaded yet pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()] self.assertNotIn("PluginFlags1", pluginNames) # show the flag doesn't exist yet with self.assertRaises(AttributeError): Flags.SUPER_FLAG # load the plugin app.pluginManager.register(PluginFlags1) # show the new plugin is loaded now pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()] self.assertIn("PluginFlags1", pluginNames) # force-register new flags from the new plugin app._pluginFlagsRegistered = False app.registerPluginFlags() # show the flag exists now self.assertEqual(type(Flags.SUPER_FLAG._value), int) def test_axialExpansionHook(self): """Test that plugins can override the axial expansion of assemblies via a hook.""" pm = self.app.pluginManager first = pm.hook.getAxialExpansionChanger() # By default, make sure we get the armi-shipped expansion class self.assertIs(first, AxialExpansionChanger) pm.register(SillyAxialPlugin) try: second = pm.hook.getAxialExpansionChanger() # Registering a plugin that implements the hook means we get that plugin's axial expander self.assertIs(second, SillyAxialExpansionChanger) finally: pm.unregister(SillyAxialPlugin) def test_beforeReactorConstructionHook(self): """Test that plugin hook successfully injects code before reactor initialization.""" pm = getPluginManagerOrFail() pm.register(BeforeReactorPlugin) try: o, r = loadTestReactor(TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml", useCache=False) self.assertTrue(o.cs.beforeReactorConstructionFlag) # Check that hook is called for database loading with TemporaryDirectoryChanger(): dbi = DatabaseInterface(r, o.cs) dbi.initDB(fName=self._testMethodName + ".h5") db = dbi.database db.writeToDB(r) db.close() o = loadOperator(self._testMethodName + ".h5", 0, 0, callReactorConstructionHook=True) self.assertTrue(o.cs.beforeReactorConstructionFlag) finally: pm.unregister(BeforeReactorPlugin) class TestPluginBasics(unittest.TestCase): def test_defineParameters(self): """Test that the default ARMI plugins are correctly defining parameters. .. test:: ARMI plugins define parameters, which appear on a new Block. :id: T_ARMI_PLUGIN_PARAMS :tests: R_ARMI_PLUGIN_PARAMS """ # create a block b = Block("fuel", height=10.0) # unless a plugin has registered a param, it doesn't exist with self.assertRaises(AttributeError): b.p.fakeParam # Check the default values of parameters defined by the neutronics plugin self.assertIsNone(b.p.axMesh) self.assertEqual(b.p.flux, 0) self.assertEqual(b.p.power, 0) self.assertEqual(b.p.pdens, 0) # Check the default values of parameters defined by the fuel performance plugin self.assertEqual(b.p.gasPorosity, 0) self.assertEqual(b.p.liquidPorosity, 0) def test_exposeInterfaces(self): """Make sure that the exposeInterfaces hook is properly implemented. .. test:: Plugins can add interfaces to the interface stack. :id: T_ARMI_PLUGIN_INTERFACES0 :tests: R_ARMI_PLUGIN_INTERFACES """ plugin = NeutronicsPlugin() cs = settings.Settings() results = plugin.exposeInterfaces(cs) # each plugin should return a list self.assertIsInstance(results, list) self.assertGreater(len(results), 0) for result in results: # Make sure all elements in the list satisfy the constraints of the hookspec self.assertIsInstance(result, tuple) self.assertEqual(len(result), 3) order, interface, kwargs = result self.assertIsInstance(order, (int, float)) self.assertTrue(issubclass(interface, interfaces.Interface)) self.assertIsInstance(kwargs, dict) def test_pluginsExposeInterfaces(self): """Make sure that plugins properly expose their interfaces, by checking some known examples. .. test:: Check that some known plugins correctly add interfaces to the stack. :id: T_ARMI_PLUGIN_INTERFACES1 :tests: R_ARMI_PLUGIN_INTERFACES """ # generate a test operator, with a full set of interfaces from plugsin o = loadTestReactor(TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml")[0] pm = getPluginManagerOrFail() # test the plugins were generated plugins = pm.get_plugins() self.assertGreater(len(plugins), 0) # test interfaces were generated from those plugins ints = o.interfaces self.assertGreater(len(ints), 0) # test that certain plugins exist and correctly registered their interfaces pluginStrings = " ".join([str(p) for p in plugins]) interfaceStrings = " ".join([str(i) for i in ints]) # Test that the BookkeepingPlugin registered the DatabaseInterface self.assertIn("BookkeepingPlugin", pluginStrings) self.assertIn("DatabaseInterface", interfaceStrings) # Test that the BookkeepingPlugin registered the history interface self.assertIn("BookkeepingPlugin", pluginStrings) self.assertIn("history", interfaceStrings) # Test that the EntryPointsPlugin registered the main interface self.assertIn("EntryPointsPlugin", pluginStrings) self.assertIn("main", interfaceStrings) # Test that the FuelHandlerPlugin registered the fuelHandler interface self.assertIn("FuelHandlerPlugin", pluginStrings) self.assertIn("fuelHandler", interfaceStrings) class TestPlugin(unittest.TestCase): """This contains some sanity tests that can be used by implementing plugins.""" plugin: Optional[plugins.ArmiPlugin] = None def test_defineBlueprintsSections(self): """Make sure that the defineBlueprintsSections hook is properly implemented.""" if self.plugin is None: return if not hasattr(self.plugin, "defineBlueprintsSections"): return results = self.plugin.defineBlueprintsSections() if results is None: return # each plugin should return a list self.assertIsInstance(results, (list, type(None))) for result in results: self.assertIsInstance(result, tuple) self.assertEqual(len(result), 3) self.assertIsInstance(result[0], str) self.assertIsInstance(result[1], yamlize.Attribute) self.assertTrue(callable(result[2])) def test_exposeInterfaces(self): """Make sure that the exposeInterfaces hook is properly implemented.""" if self.plugin is None: return cs = settings.Settings() results = self.plugin.exposeInterfaces(cs) if results is None or not results: return # each plugin should return a list self.assertIsInstance(results, list) for result in results: # Make sure all elements in the list satisfy the constraints of the hookspec self.assertIsInstance(result, tuple) self.assertEqual(len(result), 3) order, interface, kwargs = result self.assertIsInstance(order, (int, float)) self.assertTrue(issubclass(interface, interfaces.Interface)) self.assertIsInstance(kwargs, dict) ================================================ FILE: armi/tests/test_runLog.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests of the runLog tooling.""" import logging import os import unittest from io import StringIO from pathlib import Path from shutil import rmtree from armi import runLog from armi.tests import mockRunLogs from armi.utils.directoryChangers import TemporaryDirectoryChanger class TestRunLog(unittest.TestCase): def test_setVerbosityFromInteger(self): """Test that the log verbosity can be set with an integer. .. test:: The run log verbosity can be configured with an integer. :id: T_ARMI_LOG0 :tests: R_ARMI_LOG """ log = runLog._RunLog(1) expectedStrVerbosity = "debug" verbosityRank = log.getLogVerbosityRank(expectedStrVerbosity) runLog.setVerbosity(verbosityRank) self.assertEqual(verbosityRank, runLog.getVerbosity()) self.assertEqual(verbosityRank, logging.DEBUG) def test_setVerbosityFromString(self): """ Test that the log verbosity can be set with a string. .. test:: The run log verbosity can be configured with a string. :id: T_ARMI_LOG1 :tests: R_ARMI_LOG """ log = runLog._RunLog(1) expectedStrVerbosity = "error" verbosityRank = log.getLogVerbosityRank(expectedStrVerbosity) runLog.setVerbosity(expectedStrVerbosity) self.assertEqual(verbosityRank, runLog.getVerbosity()) self.assertEqual(verbosityRank, logging.ERROR) def test_verbosityOutOfRange(self): """Test that the log verbosity setting resets to a canonical value when it is out of range.""" runLog.setVerbosity(-50) self.assertEqual(runLog.LOG.logger.level, min([v[0] for v in runLog.LOG.logLevels.values()])) runLog.setVerbosity(5000) self.assertEqual(runLog.LOG.logger.level, max([v[0] for v in runLog.LOG.logLevels.values()])) def test_invalidSetVerbosityByString(self): """Test that the log verbosity setting fails if the integer is invalid.""" with self.assertRaises(KeyError): runLog.setVerbosity("taco") with self.assertRaises(TypeError): runLog.setVerbosity(["debug"]) def test_parentRunLogging(self): """A basic test of the logging of the parent runLog.""" # init the _RunLog object log = runLog.LOG = runLog._RunLog(0) log.startLog("test_parentRunLogging") runLog.createLogDir(0) log.setVerbosity(logging.INFO) # divert the logging to a stream, to make testing easier stream = StringIO() handler = logging.StreamHandler(stream) log.logger.handlers = [handler] # log some things log.log("debug", "You shouldn't see this.", single=False, label=None) log.log("warning", "Hello, ", single=False, label=None) log.log("error", "world!", single=False, label=None) log.logger.flush() log.logger.close() runLog.close(99) # test what was logged streamVal = stream.getvalue() self.assertIn("Hello", streamVal, msg=streamVal) self.assertIn("world", streamVal, msg=streamVal) def test_getWhiteSpace(self): log = runLog._RunLog(0) space0 = len(log.getWhiteSpace(0)) space1 = len(log.getWhiteSpace(1)) space9 = len(log.getWhiteSpace(9)) self.assertGreater(space1, space0) self.assertEqual(space1, space9) def test_warningReport(self): """A simple test of the warning tracking and reporting logic. .. test:: Generate a warning report after a simulation is complete. :id: T_ARMI_LOG2 :tests: R_ARMI_LOG """ # create the logger and do some logging log = runLog.LOG = runLog._RunLog(321) log.startLog("test_warningReport") runLog.createLogDir(0) # divert the logging to a stream, to make testing easier stream = StringIO() handler = logging.StreamHandler(stream) log.logger.handlers = [handler] # log some things log.setVerbosity(logging.INFO) log.log("warning", "test_warningReport", single=True, label=None) log.log("debug", "invisible due to log level", single=False, label=None) log.log("warning", "test_warningReport", single=True, label=None) log.log("warning", "simple_warning", single=False, label=None) log.log("error", "high level something", single=False, label=None) # test that the logging found some duplicate outputs dupsFilter = log.getDuplicatesFilter() self.assertIsNotNone(dupsFilter) warnings = dupsFilter.warningCounts self.assertGreater(len(warnings), 0) # run the warning report log.warningReport() runLog.close(1) runLog.close(0) # test what was logged streamVal = stream.getvalue() self.assertIn("Final Warning Count", streamVal, msg=streamVal) self.assertIn("simple_warning", streamVal, msg=streamVal) self.assertIn("test_warningReport", streamVal, msg=streamVal) self.assertIn("Total Number of Warnings", streamVal, msg=streamVal) self.assertNotIn("invisible", streamVal, msg=streamVal) self.assertEqual(streamVal.count("test_warningReport"), 2, msg=streamVal) # bonus check: edge case in duplicates filter backupLog, log.logger = log.logger, None self.assertIsNone(log.getDuplicatesFilter()) log.logger = backupLog def test_warningReportInvalid(self): """A test of warningReport in an invalid situation. .. test:: Test an important edge case for a warning report. :id: T_ARMI_LOG3 :tests: R_ARMI_LOG """ # create the logger and do some logging testName = "test_warningReportInvalid" log = runLog.LOG = runLog._RunLog(323) log.startLog(testName) runLog.createLogDir(0) # divert the logging to a stream, to make testing easier stream = StringIO() handler = logging.StreamHandler(stream) log.logger.handlers = [handler] # log some things log.setVerbosity(logging.INFO) log.log("warning", testName, single=True, label=None) log.log("debug", "invisible due to log level", single=False, label=None) log.log("warning", testName, single=True, label=None) log.log("error", "high level something", single=False, label=None) # test that the logging found some duplicate outputs def returnNone(*args, **kwargs): return None log.logger.getDuplicatesFilter = returnNone self.assertIsNone(log.logger.getDuplicatesFilter()) # run the warning report log.warningReport() runLog.close(1) runLog.close(0) # test what was logged streamVal = stream.getvalue() self.assertIn(testName, streamVal, msg=streamVal) self.assertIn("None Found", streamVal, msg=streamVal) self.assertNotIn("invisible", streamVal, msg=streamVal) self.assertEqual(streamVal.count(testName), 1, msg=streamVal) def test_closeLogging(self): """A basic test of the close() functionality.""" def validate_loggers(log): """Little test helper, to make sure our loggers still look right.""" handlers = [str(h) for h in log.logger.handlers] self.assertEqual(len(handlers), 1, msg=",".join(handlers)) stderrHandlers = [str(h) for h in log.stderrLogger.handlers] self.assertEqual(len(stderrHandlers), 1, msg=",".join(stderrHandlers)) # init logger log = runLog.LOG = runLog._RunLog(777) validate_loggers(log) # start the logging for real log.startLog("test_closeLogging") runLog.createLogDir() validate_loggers(log) # close() and test that we have correctly nullified our loggers runLog.close(1) validate_loggers(log) # in a real run, the parent process would close() after all the children runLog.close(0) def test_setVerbosity(self): """Let's test the setVerbosity() method carefully. .. test:: The run log has configurable verbosity. :id: T_ARMI_LOG4 :tests: R_ARMI_LOG .. test:: The run log can log to stream. :id: T_ARMI_LOG_IO0 :tests: R_ARMI_LOG_IO """ with mockRunLogs.BufferLog() as mock: # we should start with a clean slate self.assertEqual("", mock.getStdout()) runLog.LOG.startLog("test_setVerbosity") runLog.LOG.setVerbosity(logging.INFO) # we should start at info level, and that should be working correctly self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO) runLog.info("hi") self.assertIn("hi", mock.getStdout()) mock.emptyStdout() runLog.debug("invisible") self.assertEqual("", mock.getStdout()) # setVerbosity() to WARNING, and verify it is working runLog.LOG.setVerbosity(logging.WARNING) runLog.info("still invisible") self.assertEqual("", mock.getStdout()) runLog.warning("visible") self.assertIn("visible", mock.getStdout()) mock.emptyStdout() # setVerbosity() to DEBUG, and verify it is working runLog.LOG.setVerbosity(logging.DEBUG) runLog.debug("Visible") self.assertIn("Visible", mock.getStdout()) mock.emptyStdout() # setVerbosity() to ERROR, and verify it is working runLog.LOG.setVerbosity(logging.ERROR) runLog.warning("Still Invisible") self.assertEqual("", mock.getStdout()) runLog.error("Visible!") self.assertIn("Visible!", mock.getStdout()) # we shouldn't be able to setVerbosity() to a non-canonical value (logging module defense) self.assertEqual(runLog.LOG.getVerbosity(), logging.ERROR) runLog.LOG.setVerbosity(logging.WARNING + 1) self.assertEqual(runLog.LOG.getVerbosity(), logging.WARNING) def test_setVerbosityBeforeStartLog(self): """The user/dev may accidentally call ``setVerbosity()`` before ``startLog()``, this should be mostly supportable. This is just an edge case. .. test:: Test that we support the user setting log verbosity BEFORE the logging starts. :id: T_ARMI_LOG5 :tests: R_ARMI_LOG """ with mockRunLogs.BufferLog() as mock: # we should start with a clean slate, before debug logging self.assertEqual("", mock.getStdout()) runLog.LOG.setVerbosity(logging.DEBUG) runLog.LOG.startLog("test_setVerbosityBeforeStartLog") # we should start at info level, and that should be working correctly self.assertEqual(runLog.LOG.getVerbosity(), logging.DEBUG) runLog.debug("hi") self.assertIn("hi", mock.getStdout()) mock.emptyStdout() # we should start with a clean slate, before info logging self.assertEqual("", mock.getStdout()) runLog.LOG.setVerbosity(logging.INFO) runLog.LOG.startLog("test_setVerbosityBeforeStartLog2") # we should start at info level, and that should be working correctly self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO) runLog.debug("nope") runLog.info("hi") self.assertIn("hi", mock.getStdout()) self.assertNotIn("nope", mock.getStdout()) mock.emptyStdout() def test_callingStartLogMultipleTimes(self): """Calling startLog() multiple times will lead to multiple output files, but logging should still work.""" with mockRunLogs.BufferLog() as mock: # we should start with a clean slate self.assertEqual("", mock.getStdout()) runLog.LOG.startLog("test_callingStartLogMultipleTimes1") runLog.LOG.setVerbosity(logging.INFO) # we should start at info level, and that should be working correctly self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO) runLog.info("hi1") self.assertIn("hi1", mock.getStdout()) mock.emptyStdout() # call startLog() again runLog.LOG.startLog("test_callingStartLogMultipleTimes2") runLog.LOG.setVerbosity(logging.INFO) # we should start at info level, and that should be working correctly self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO) runLog.info("hi2") self.assertIn("hi2", mock.getStdout()) mock.emptyStdout() # call startLog() again runLog.LOG.startLog("test_callingStartLogMultipleTimes3") runLog.LOG.setVerbosity(logging.INFO) # we should start at info level, and that should be working correctly self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO) runLog.info("hi3") self.assertIn("hi3", mock.getStdout()) mock.emptyStdout() # call startLog() again, with a duplicate logger name runLog.LOG.startLog("test_callingStartLogMultipleTimes3") runLog.LOG.setVerbosity(logging.INFO) # we should start at info level, and that should be working correctly self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO) runLog.info("hi333") self.assertIn("hi333", mock.getStdout()) mock.emptyStdout() def test_deduplicationFilter(self): """Test that the logic to only print a log message once works correctly.""" with mockRunLogs.BufferLog() as mock: # we should start with a clean slate self.assertEqual("", mock.getStdout()) runLog.LOG.startLog("test_deduplicationFilter") runLog.LOG.setVerbosity(logging.INFO) msgInfo = "singleInfoMessage" for i in range(4): runLog.info(f"{msgInfo}: {i}", single=True, label=msgInfo) msgWarn = "singleWarnMessage" for j in range(4): runLog.warning(f"{msgWarn}: {j}", single=True, label=msgWarn) logs = mock.getStdout() self.assertEqual(logs.count(msgInfo), 1) self.assertEqual(logs.count(msgWarn), 1) def test_concatenateLogs(self): """ Simple test of the concat logs function. .. test:: The run log combines logs from different processes. :id: T_ARMI_LOG_MPI :tests: R_ARMI_LOG_MPI .. test:: The run log can log to file. :id: T_ARMI_LOG_IO1 :tests: R_ARMI_LOG_IO """ with TemporaryDirectoryChanger(): # create the log dir logDir = "test_concatenateLogs" if os.path.exists(logDir): rmtree(logDir) runLog.createLogDir(logDir) # create as stdout file stdoutFile1 = os.path.join(logDir, "{}.runLogTest.0000.stdout".format(runLog.STDOUT_LOGGER_NAME)) with open(stdoutFile1, "w") as f: f.write("hello world\n") stdoutFile2 = os.path.join(logDir, "{}.runLogTest.0001.stdout".format(runLog.STDOUT_LOGGER_NAME)) with open(stdoutFile2, "w") as f: f.write("hello other world\n") # verify behavior for a corner case stdoutFile3 = os.path.join(logDir, "{}..0000.stdout".format(runLog.STDOUT_LOGGER_NAME)) with open(stdoutFile3, "w") as f: f.write("hello world again\n") self.assertTrue(os.path.exists(stdoutFile1)) self.assertTrue(os.path.exists(stdoutFile2)) self.assertTrue(os.path.exists(stdoutFile3)) # create a stderr file stderrFile = os.path.join(logDir, "{}.runLogTest.0000.stderr".format(runLog.STDOUT_LOGGER_NAME)) with open(stderrFile, "w") as f: f.write("goodbye cruel world\n") self.assertTrue(os.path.exists(stderrFile)) # concat logs runLog.concatenateLogs(logDir=logDir) # verify output combinedLogFile = os.path.join(logDir, "runLogTest-mpi.log") self.assertTrue(os.path.exists(combinedLogFile)) self.assertFalse(os.path.exists(stdoutFile1)) self.assertFalse(os.path.exists(stdoutFile2)) self.assertFalse(os.path.exists(stdoutFile3)) self.assertFalse(os.path.exists(stderrFile)) # verify behavior for a corner case stdoutFile3 = os.path.join(logDir, "{}..0000.stdout".format(runLog.STDOUT_LOGGER_NAME)) with open(stdoutFile3, "w") as f: f.write("hello world again\n") # concat logs runLog.concatenateLogs(logDir=logDir) # verify output combinedLogFile = os.path.join(logDir, "armi-workers-mpi.log") self.assertTrue(os.path.exists(combinedLogFile)) self.assertFalse(os.path.exists(stdoutFile3)) def test_createLogDir(self): """Test the createLogDir() method. .. test:: Test that log directories can be created for logging output files. :id: T_ARMI_LOG6 :tests: R_ARMI_LOG """ with TemporaryDirectoryChanger(): logDir = "test_createLogDir" self.assertFalse(os.path.exists(logDir)) for _ in range(10): runLog.createLogDir(logDir) self.assertTrue(os.path.exists(logDir)) class TestRunLogEnvEdits(unittest.TestCase): """Tests that will use monkeypatch to alter an environment variable.""" def setUp(self): # We cannot import pytest at the top of the file right now. The ARMI unit tests are currently imported at # runtime, and until that is changed, we don't want pytest to be a runtime dependency. For now, hide the import # down here. Once the testing module is complete and ARMI's unit tests aren't all imported, the pytest import # can move up to where it belongs. import pytest self.monkeypatch = pytest.MonkeyPatch() def tearDown(self): self.monkeypatch.undo() def test_createLogDirNonDefault(self): """Test the scenario where a user sets the environment variable that edits the log dir location.""" with TemporaryDirectoryChanger() as td: self.monkeypatch.setenv("ARMI_TEMP_ROOT_PATH", str(Path(td.destination) / "logzGoHere")) runLog.createLogDir() # assert the env variable-edits logs path exists p = Path(td.destination) / "logzGoHere" / "logs" self.assertTrue(p.exists()) # assert the default logs path doesn't exist p = Path(os.getcwd()) / "logs" self.assertFalse(p.exists()) def test_getLogDir(self): """Test getLogDir with and without an environment variable edit.""" default = Path(runLog.getLogDir()) self.assertEqual(default, Path(os.getcwd()) / "logs") root = Path("somewhere") / "else" self.monkeypatch.setenv("ARMI_TEMP_ROOT_PATH", str(root)) altered = Path(runLog.getLogDir()) self.assertEqual(altered, root / "logs") class TestRunLogger(unittest.TestCase): def setUp(self): self.rl = runLog.RunLogger("ARMI|things_and_stuff|0") def test_getDuplicatesFilter(self): df = self.rl.getDuplicatesFilter() self.assertEqual(type(df), runLog.DeduplicationFilter) self.rl.filters = [] self.assertIsNone(self.rl.getDuplicatesFilter()) def test_allowStopDuplicates(self): # the usual case, where the DeduplicateFilter already exists self.assertEqual(len(self.rl.filters), 1) self.rl.allowStopDuplicates() self.assertEqual(len(self.rl.filters), 1) # the unusual case, where the DeduplicateFilter isn't there self.rl.filters = [] self.assertEqual(len(self.rl.filters), 0) self.rl.allowStopDuplicates() self.assertEqual(len(self.rl.filters), 1) def test_write(self): """Test that we can write text to the logger output stream. .. test:: Write logging text to the logging stream and/or file. :id: T_ARMI_LOG7 :tests: R_ARMI_LOG """ # divert the logging to a stream, to make testing easier stream = StringIO() handler = logging.StreamHandler(stream) self.rl.handlers = [handler] # log some things testName = "test_write" self.rl.write(testName) # test what was logged streamVal = stream.getvalue() self.assertIn(testName, streamVal, msg=streamVal) ================================================ FILE: armi/tests/test_symmetry.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Audit symmetry-aware parameters in baseline ARMI. See Also -------- armi.testing.symmetryTesting """ from armi.reactor.assemblyParameters import getAssemblyParameterDefinitions from armi.reactor.blockParameters import getBlockParameterDefinitions from armi.reactor.reactorParameters import defineCoreParameters from armi.testing import symmetryTesting class ArmiSymmetryTest(symmetryTesting.BasicArmiSymmetryTestHelper): """Run symmetry intentionality tests for ARMI.""" def setUp(self): self.coreParamsToTest = defineCoreParameters() self.assemblyParamsToTest = getAssemblyParameterDefinitions() self.blockParamsToTest = getBlockParameterDefinitions() self.expectedSymmetricBlockParams = [ "molesHmNow", "molesHmBOL", "massHmBOL", "initialB10ComponentVol", "kgFis", "kgHM", ] self.expectedSymmetricAssemblyParams = ["THmassFlowRate"] self.parameterOverrides = {"xsType": ["A"], "xsTypeNum": 65, "notes": ""} self.paramsToIgnore = ["maxAssemNum"] super().setUp() ================================================ FILE: armi/tests/test_tests.py ================================================ # Copyright 2021 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the test helpers.""" import unittest from armi import tests class TestCompareFiles(unittest.TestCase): def test_compareFileLine(self): expected = "oh look, a number! 3.14 and some text and another number 1.5 and another 0.0" # any line compared with itself should pass self.assertTrue(tests.ArmiTestHelper.compareLines(expected, expected)) self.assertTrue(tests.ArmiTestHelper.compareLines(expected, expected, eps=0.01)) # if we vary the numbers a tiny bit, the epsilon parameter should correctly control the comparison actual = "oh look, a number! 3.15 and some text and another number 1.6 and another 0.0 " self.assertFalse(tests.ArmiTestHelper.compareLines(expected, actual, eps=0.04)) self.assertTrue(tests.ArmiTestHelper.compareLines(expected, actual, eps=0.07)) # if we add an extra, non-number word, the comparison should fail actual = "oh look, a number! 3.15 and some text and another number 1.6 extra and another 0.0" self.assertFalse(tests.ArmiTestHelper.compareLines(expected, actual, eps=0.04)) # if we replace a number with not a number, the comparison should fail actual = "oh look, a number! notANumber and some text and another number 1.5 and another 0.0" self.assertFalse(tests.ArmiTestHelper.compareLines(expected, actual, eps=0.04)) def test_onlySomeMatch(self): # only the first number in the line matches, so the line should fail expected = "oh look, a number! 3.14 and some text and another number 1.5 and another 0.0" actual = "oh look, a number! 3.14 and some text and another number 2.2 and another 9.9" self.assertFalse(tests.ArmiTestHelper.compareLines(expected, actual, eps=0.01)) # only the second number in the line matches, so the line should fail expected = "oh look, a number! 3.14 and some text and another number 1.5 and another 0.0" actual = "oh look, a number! 7.7 and some text and another number 1.5 and another 9.9" self.assertFalse(tests.ArmiTestHelper.compareLines(expected, actual, eps=0.01)) # only the last number in the line matches, so the line should fail expected = "oh look, a number! 3.14 and some text and another number 1.5 and another 0.0" actual = "oh look, a number! 7.7 and some text and another number 8.5 and another 0.0" self.assertFalse(tests.ArmiTestHelper.compareLines(expected, actual, eps=0.01)) def test_strangeCases(self): # comparing the same string should return True, even if there are no numbers expected = "There are no numbers" self.assertTrue(tests.ArmiTestHelper.compareLines(expected, expected)) # comparing different strings should return False, even if there are no numbers actual = "There are SOME numbers" self.assertFalse(tests.ArmiTestHelper.compareLines(expected, actual)) # comparing empty strings should return True self.assertTrue(tests.ArmiTestHelper.compareLines("", "")) # comparing equal strings of whitespace should return True whiteSpace3 = " " self.assertTrue(tests.ArmiTestHelper.compareLines(whiteSpace3, str(whiteSpace3))) ================================================ FILE: armi/tests/test_user_plugins.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the UserPlugin class.""" import copy import os import unittest from armi import context, getApp, interfaces, plugins, utils from armi.reactor.flags import Flags from armi.reactor.tests import test_reactors from armi.settings import caseSettings from armi.tests import TEST_ROOT from armi.utils import directoryChangers class UserPluginFlags(plugins.UserPlugin): """Simple UserPlugin that defines a single, new flag.""" @staticmethod @plugins.HOOKIMPL def defineFlags(): """Function to provide new Flags definitions.""" return {"SPECIAL": utils.flags.auto()} class UserPluginFlags2(plugins.UserPlugin): """Simple UserPlugin that defines a single, new flag.""" @staticmethod @plugins.HOOKIMPL def defineFlags(): """Function to provide new Flags definitions.""" return {"FLAG2": utils.flags.auto()} class UserPluginFlags3(plugins.UserPlugin): """Simple UserPlugin that defines a single, new flag.""" @staticmethod @plugins.HOOKIMPL def defineFlags(): """Function to provide new Flags definitions.""" return {"FLAG3": utils.flags.auto()} # text-file version of a stand-alone Python file for a simple User Plugin upFlags4 = """ from armi import plugins from armi import utils class UserPluginFlags4(plugins.UserPlugin): @staticmethod @plugins.HOOKIMPL def defineFlags(): return {"FLAG4": utils.flags.auto()} """ class UserPluginBadDefinesSettings(plugins.UserPlugin): """This is invalid/bad because it implements defineSettings().""" @staticmethod @plugins.HOOKIMPL def defineSettings(): """Define settings for the plugin.""" return [1, 2, 3] class UserPluginBadDefineParameterRenames(plugins.UserPlugin): """This is invalid/bad because it implements defineParameterRenames().""" @staticmethod @plugins.HOOKIMPL def defineParameterRenames(): """Return a mapping from old parameter names to new parameter names.""" return {"oldType": "type"} class UserPluginOnProcessCoreLoading(plugins.UserPlugin): """ This plugin flex-tests the onProcessCoreLoading() hook, and arbitrarily adds "1" to the height of every block, after the DB is loaded. """ @staticmethod @plugins.HOOKIMPL def onProcessCoreLoading(core, cs, dbLoad): """Function to call whenever a Core object is newly built.""" blocks = core.getBlocks(Flags.FUEL) for b in blocks: b.p.height += 1.0 class UpInterface(interfaces.Interface): """ A mostly meaningless little test interface, just to prove that we can affect the reactor state from an interface inside a UserPlugin. """ name = "UpInterface" def interactEveryNode(self, cycle, node): """Logic to be carried out at every time node in the simulation.""" self.r.core.p.power += 100 class UserPluginWithInterface(plugins.UserPlugin): """A little test UserPlugin, just to show how to add an Interface through a UserPlugin.""" @staticmethod @plugins.HOOKIMPL def exposeInterfaces(cs): """Function for exposing interface(s) to other code.""" return [interfaces.InterfaceInfo(interfaces.STACK_ORDER.PREPROCESSING, UpInterface, {"enabled": True})] class TestUserPlugins(unittest.TestCase): def setUp(self): """ Manipulate the standard App. We can't just configure our own, since the pytest environment bleeds between tests. """ self._backupApp = copy.deepcopy(getApp()) def tearDown(self): """Restore the App to its original state.""" import armi armi._app = self._backupApp context.APP_NAME = "armi" def test_userPluginsFlags(self): # a basic test that a UserPlugin is loaded app = getApp() pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()] self.assertNotIn("UserPluginFlags", pluginNames) app.pluginManager.register(UserPluginFlags) pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()] self.assertIn("UserPluginFlags", pluginNames) # we shouldn't be able to register the same plugin twice with self.assertRaises(ValueError): app.pluginManager.register(UserPluginFlags) def test_validateUserPluginLimitations(self): # this should NOT raise any errors _up = UserPluginFlags() # this should raise an error because it has a defineSettings() method with self.assertRaises(AssertionError): _bad0 = UserPluginBadDefinesSettings() def test_registerUserPlugins(self): app = getApp() pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()] self.assertNotIn("UserPluginFlags2", pluginNames) plugins = ["armi.tests.test_user_plugins.UserPluginFlags2"] app.registerUserPlugins(plugins) pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()] self.assertIn("UserPluginFlags2", pluginNames) self.assertIn("FLAG2", dir(Flags)) def test_registerUserPluginsAbsPath(self): app = getApp() with directoryChangers.TemporaryDirectoryChanger(): # write a simple UserPlugin to a simple Python file with open("plugin4.py", "w") as f: f.write(upFlags4) # register that plugin using an absolute path cwd = os.getcwd() plugins = [os.path.join(cwd, "plugin4.py") + ":UserPluginFlags4"] app.registerUserPlugins(plugins) pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()] self.assertIn("UserPluginFlags4", pluginNames) self.assertIn("FLAG4", dir(Flags)) def test_registerUserPluginsFromSettings(self): app = getApp() cs = caseSettings.Settings().modified( caseTitle="test_registerUserPluginsFromSettings", newSettings={ "userPlugins": ["armi.tests.test_user_plugins.UserPluginFlags3"], }, ) pNames = [p[0] for p in app.pluginManager.list_name_plugin()] self.assertNotIn("UserPluginFlags3", pNames) cs.registerUserPlugins() pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()] self.assertIn("UserPluginFlags3", pluginNames) self.assertIn("FLAG3", dir(Flags)) def test_userPluginOnProcessCoreLoading(self): """ Test that a UserPlugin can affect the Reactor state, by implementing onProcessCoreLoading() to arbitrarily increase the height of all the blocks by 1.0. """ # register the plugin app = getApp() name = "UserPluginOnProcessCoreLoading" pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()] self.assertNotIn(name, pluginNames) app.pluginManager.register(UserPluginOnProcessCoreLoading) # validate the plugins was registered pluginz = app.pluginManager.list_name_plugin() pluginNames = [p[0] for p in pluginz] self.assertIn(name, pluginNames) # grab the loaded plugin plug0 = [p[1] for p in pluginz if p[0] == name][0] # load a reactor and grab the fuel assemblies o, r = test_reactors.loadTestReactor(TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml") fuels = r.core.getBlocks(Flags.FUEL) # prove that our plugin affects the core in the desired way heights = [float(f.p.height) for f in fuels] plug0.onProcessCoreLoading(core=r.core, cs=o.cs, dbLoad=False) for i, height in enumerate(heights): self.assertEqual(fuels[i].p.height, height + 1.0) def test_userPluginWithInterfaces(self): """Test that UserPlugins can correctly inject an interface into the stack.""" # register the plugin app = getApp() pNames = [p[0] for p in app.pluginManager.list_name_plugin()] self.assertNotIn("UserPluginWithInterface", pNames) # register custom UserPlugin, that has an plugins = ["armi.tests.test_user_plugins.UserPluginWithInterface"] app.registerUserPlugins(plugins) pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()] self.assertIn("UserPluginWithInterface", pluginNames) # load a reactor and grab the fuel assemblieapps o, r = test_reactors.loadTestReactor(TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml") _fuels = r.core.getAssemblies(Flags.FUEL) # This is here because we have multiple tests altering the App() o.interfaces = [] o.initializeInterfaces(r) app.pluginManager.hook.exposeInterfaces(cs=o.cs) # This test is not set up for a full run through all the interfaces, for # instance, there is not database prepped. So let's skip some interfaces. for skipIt in ["fuelhandler", "history"]: for i, interf in enumerate(o.interfaces): if skipIt in str(interf).lower(): o.interfaces = o.interfaces[:i] + o.interfaces[i + 1 :] break # test that the core power goes up power0 = float(r.core.p.power) o.cs["nCycles"] = 2 o.operate() self.assertGreater(r.core.p.power, power0) def test_registerRepeatedUserPlugins(self): app = getApp() # Test plugin registration with two userPlugins with the same name with directoryChangers.TemporaryDirectoryChanger(): # write a simple UserPlugin to a simple Python file with open("plugin4.py", "w") as f: f.write(upFlags4) # register that plugin using an absolute path cwd = os.getcwd() plugins = [os.path.join(cwd, "plugin4.py") + ":UserPluginFlags4"] * 2 app.registerUserPlugins(plugins) pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()] self.assertEqual(pluginNames.count("UserPluginFlags4"), 1) # Repeat test for other type of path cs = caseSettings.Settings().modified( caseTitle="test_registerUserPluginsFromSettings", newSettings={ "userPlugins": [ "armi.tests.test_user_plugins.UserPluginFlags3", "armi.tests.test_user_plugins.UserPluginFlags3", ], }, ) cs.registerUserPlugins() pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()] self.assertEqual(pluginNames.count("UserPluginFlags3"), 1) ================================================ FILE: armi/tests/tutorials/data_model.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Hands-on at the ARMI Terminal\n", "\n", "This tutorial will walk you through some exploration with ARMI on the command\n", "line with the goal of exposing you to some of the capabilities\n", "and organization of information in the ARMI system.\n", "\n", "## Initializing and Exploring the ARMI Model\n", "First we need to get some inputs. We built some from scratch in\n", "[Building input files for a fast reactor](walkthrough_inputs.html)\n", "and we pick those up and use them `here <https://github.com/terrapower/armi/tree/main/armi/testing/reactors/anl-afci-177>`_ as well:\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "You can load these inputs using armi's ``init`` function. This will build an **Operator**, a **Reactor**, and an **Interface Stack** full of various interfaces." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# you can only configure an app once\n", "import armi\n", "\n", "if not armi.isConfigured():\n", " armi.configure(armi.apps.App())" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "\n", "# Depending on which test runs this, the test reactor will be in a different place.\n", "filePath1 = \"../../testing/reactors/anl-afci-177/anl-afci-177.yaml\"\n", "filePath2 = \"../anl-afci-177/anl-afci-177.yaml\"\n", "\n", "if os.path.exists(filePath1):\n", " filePath = filePath1\n", "else:\n", " filePath = filePath2\n", "\n", "o = armi.init(fName=filePath)\n", "o.r.core.sortAssemsByRing() # makes innermost assemblies appear first" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "You have now created an ``operator`` object, which contains a ``Reactor`` object (called ``o.r``) that\n", "represents the beginning-of-life (BOL) state of the nuclear reactor defined in the inputs. The reactor looks\n", "like this:\n", "\n", ".. figure:: /.static/armi_reactor_objects.png\n", " :align: center\n", "\n", " **Figure 1.** The primary data containers in ARMI\n" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "The data hierarchy in an ARMI model often is made up of:\n", "\n", "* :py:mod:`Reactors <armi.reactor.reactors>`, often named ``reactor`` or ``r`` contain a Core and possibly other equipment. They represent collections of assemblies. \n", "* :py:mod:`Assemblies <armi.reactor.assemblies>`, often named ``assembly`` or ``a``, are individual pieces that collect into a System.\n", "* :py:mod:`Blocks <armi.reactor.blocks>`, often called ``block`` or ``b`` are divisions of the assemblies into sections one on top of the other.\n", "* :py:mod:`Components <armi.reactor.components>` The geometrically defined objects (Circles, Hexagons, Helices, Dodecagons) and their dimensions.\n", "* :py:mod:`Materials <armi.materials>` are objects which have material properties like linear expansion coefficients, thermal conductivities, isotopic mass fractions, and densities.\n", "\n", "Each of these objects house more than the listed objects, they also are responsible for a variety of state information\n", "like the reactor's overall keff, flux, height, temperature, etc. In this section, we will explore these objects,\n", "see how to access them, and how to view their contained information.\n", "\n", "Exploring it a little, we can list all the assemblies in the reactor with:\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "core = o.r.core\n", "core.getAssemblies()[:25] # only print the first 25" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ ".. admonition:: Quiz Question 1 \n", " \n", " How many assemblies does the model have? (see answers at bottom)\n", " \n", " \n", ".. tip::\n", " A reactor is made up of assemblies, which are made up of blocks, and so on. Each composite ARMI\n", " object has a ``getChildren`` method that will retrieve a list of its contents. For clarity,\n", " reactors have a ``getAssemblies()`` method and assemblies have a ``getBlocks()`` method,\n", " but these do exactly the same thing as ``getChildren()`` in both cases.\n", "\n", " Reactor, assembly, blocks, etc. objects act like lists as well, so you can get the fifth\n", " assembly out of a reactor just like you'd get the fifth item out of any other list\n", " (don't forget that Python uses `zero-based numbering <http://en.wikipedia.org/wiki/Zero-based_numbering>`_)::\n", "\n", " >>> fifthAssem = core[4]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "You can drill down the hierarchy for a particular assembly:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "core = o.r[0]\n", "print(core)\n", "assem = core[1]\n", "print(assem)\n", "block = assem[5]\n", "print(block)\n", "print(f\"Block's parent is: {block.parent}\")\n", "components = block.getChildren()\n", "print(components)\n", "material = components[0].material\n", "print(material)" ] }, { "cell_type": "markdown", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "## Exploring the *state* of the reactor\n", "State can be explored using a variety of framework methods, as well as looking at state *parameters*. Let's first try out some methods to find out how much U-235 is in the model and what the average uranium enrichment is:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "u235 = core.getMass(\"U235\")\n", "u238 = core.getMass(\"U238\")\n", "print(f\"The core contains {u235} grams of U-235\")\n", "print(f\"The average fissile enrichment is {u235 / (u235 + u238)}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "That's how much U-235 is in the 1/3 core. If we want the total mass (including all nuclides), we can just leave the argument out:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "core.getMass() / 1.0e6" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "core.getMass?" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Furthermore, you can get a list of available methods by pressing the tab key. Try `core.` followed by `[Tab]`. Try out some options!" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "Use tab completion to explore other methods of ARMI reactors assemblies and blocks. You can\n", "view a summary of the methods of any object in the :ref:`API documentation <modindex>`.\n", "For a good example, see :py:class:`the API docs for a block <armi.reactor.blocks.Block>`." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Next, lets find out what the number density of U235 is in a particular fuel block. We'll use the *FLAGS* system to select a particular type of block (in this case, a fuel block):" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from armi.reactor.flags import Flags\n", "\n", "b = core.getFirstBlock(Flags.FUEL)\n", "print(f\"U-235 ndens: {b.getNumberDensity('U235'):.4e} (atoms/bn-cm)\")\n", "print(f\"Block name: {b.getName()}\")\n", "print(f\"Block type: {b.getType()}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "You can find lots of other details about this block with:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "b.printContents(includeNuclides=False)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Modifying the state of the reactor\n", "Each object in the Reactor model has a bunch of *state parameters* contained in its special `.p` attribute, called its *Parameter Collection*. The state parameters are defined both by the ARMI framework and the collection of plugins. For instance, you can look at the core's keff parameters or each individual block's power and multi-group flux parameters like this:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print(b.p.power)\n", "print(core.p.keff)\n", "print(b.p.mgFlux)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "As you might expect, the values are zero because we have not performed any physics calculations yet. We could run a physics plugin at this point to add physics state, but for this tutorial, we'll just apply dummy data. Here's a fake physics kernel that just sets a power distribution based on spatial location of each block (e.g. a spherical distribution):" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "\n", "midplane = core[0].getHeight() / 2.0\n", "center = np.array([0, 0, midplane])\n", "peakPower = 1e6\n", "mgFluxBase = np.arange(5)\n", "\n", "\n", "def setFakePower(core):\n", " for a in core:\n", " sf = a.getSymmetryFactor()\n", " for b in a:\n", " vol = b.getVolume()\n", " coords = b.spatialLocator.getGlobalCoordinates()\n", " r = np.linalg.norm(abs(coords - center))\n", " fuelFlag = 10 if b.isFuel() else 1.0\n", " # Use the symmetry factor to account for the central assembly being split\n", " b.p.power = peakPower / r**2 * fuelFlag / sf\n", " b.p.pdens = b.p.power / vol\n", " b.p.mgFlux = mgFluxBase * b.p.pdens\n", " if b.isFuel():\n", " print(b.p.power, b.getLocation())\n", "\n", "\n", "setFakePower(core)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print(b.p.power)\n", "print(b.p.pdens)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "\n", "a = b.parent\n", "z = [b.spatialLocator.getGlobalCoordinates()[2] for b in a]\n", "power = a.getChildParamValues(\"power\")\n", "plt.plot(z, power, \".-\")\n", "plt.title(\"Fake power distribution on reactor\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We can take a look at the spatial distribution as well:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from armi.utils import plotting\n", "\n", "# Note, if you were plotting outside jupyter, you could click\n", "# on different depths at the bottom to view different axial planes.\n", "plotting.plotBlockDepthMap(core, \"power\", depthIndex=5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Modifying number densities\n", "Analysts frequently want to modify number densities. For example, if you needed to compute a coolant density coefficient, you could simply reduce the amount of coolant in the core. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "sodiumBefore = core.getMass(\"NA\")\n", "print(f\"Before: {sodiumBefore / 1e6:.2f} MT Sodium\")\n", "for b in core.getBlocks(): # loop through all blocks\n", " refDens = b.getNumberDensity(\"NA23\")\n", " b.setNumberDensity(\"NA23\", refDens * 0.98) # reduce Na density by 2%\n", "sodiumAfter = core.getMass(\"NA\")\n", "print(f\"After: {sodiumAfter / 1e6:.2f} MT Sodium\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "If you analyze the keff with a physics plugin before and after, the change in the `core.p.keff` param would determine your density coefficient of reactivity. \n", "\n", "## Saving state to disk\n", "During analysis, it's often useful to save the reactor state to disk in a database. The ARMI database package handles this, and writes it out to an [HDF-formatted](https://en.wikipedia.org/wiki/Hierarchical_Data_Format) file. This is typically done automatically at each point in time in a normal simulation, and can also be done manually, like this:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "dbi = o.getInterface(\"database\")\n", "dbi.initDB()\n", "dbi.database.writeToDB(o.r)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Fuel management\n", "One plugin that comes with the framework is the Fuel Handler. It attaches the Fuel Handler interface, which we can grab now to move fuel around. In a typical ARMI run, the detailed fuel management choices are specified by the user-input custom shuffle logic file. In this particular example, we will simply swap the 10 highest-power fuel assemblies with the 10 lowest-power ones. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from armi.physics.fuelCycle import fuelHandlers\n", "\n", "fh = fuelHandlers.fuelHandlerFactory(o)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "moved = []\n", "for n in range(10):\n", " high = fh.findAssembly(param=\"power\", compareTo=1.0e6, blockLevelMax=True, exclusions=moved)\n", " low = fh.findAssembly(param=\"power\", compareTo=0.0, blockLevelMax=True, exclusions=moved)\n", " fh.swapAssemblies(high, low)\n", " moved.extend([high, low])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "plotting.plotBlockDepthMap(core, \"power\", depthIndex=5)\n", "# You can also plot total assembly params, which are the sum of block params\n", "plotting.plotFaceMap(core, \"power\", vals=\"sum\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We can write this new state to DB as well, since we've shuffled the fuel" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "o.r.p.timeNode += 1\n", "dbi.database.writeToDB(o.r)\n", "dbi.database.close()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Loading from the database\n", "Once you have a database, you can use it to load a Reactor object from any of the states that were written to it. First, create a Database object, then open it and call its `load()` method." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from armi.bookkeeping import db\n", "\n", "databaseLocation = \"../tutorials/anl-afci-177.h5\"\n", "cycle, timeNode = 0, 1\n", "dbo = db.databaseFactory(databaseLocation, \"r\")\n", "with dbo:\n", " # Load a new reactor object from the requested cycle and time node\n", " r = dbo.load(cycle, timeNode)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We can see that the time node is what we expect (node 1), and there is some fission product mass since we loaded from a cycle after a depletion step." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print(r.p.timeNode)\n", "print(o.r.getFissileMass())" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Having a Reactor object by itself can be very useful for all sorts of post-processing tasks. However, sometimes we may wish initialize more ARMI components to do more advanced tasks and interactive follow-on analysis. Lucky for us, the database stores the settings that were used to run the case in the first place. We can get them like this:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "with dbo:\n", " cs = dbo.loadCS()\n", " print(cs[\"neutronicsKernel\"])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "With this `Settings` object, we could create a brand new `Case` and `Operator` and do all sorts of magic. This way of interacting with ARMI is rather advanced, and beyond the scope of this tutorial." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "That's just a brief exploration of the data model. Hopefully it helped orient you to the underlying ARMI structure." ] } ], "metadata": { "celltoolbar": "Raw Cell Format", "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.2" } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: armi/tests/tutorials/param_sweep.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Parameter sweeps\n", "Parameter sweeps allow you to quickly and easily build a series of related cases that all change one or more aspects of the input model or modeling approximations. Because ARMI automates full-scope engineering analysis, ARMI-driven parameter sweeps are extremely useful for design exploration, sensitivity studies, and statistical analysis. \n", "\n", "To get started with a parameter sweep, you first need some inputs. " ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "* :download:`Blueprints <anl-afci-177-blueprints.yaml>`\n", "* :download:`Settings <anl-afci-177.yaml>`\n", "* :download:`Core map <anl-afci-177-coreMap.yaml>`\n", "* :download:`Fuel management <anl-afci-177-fuelManagement.py>`" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Next, you need an app and a `Case` object as the starting point. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# you can only configure an app once\n", "import armi\n", "\n", "if not armi.isConfigured():\n", " armi.configure(armi.apps.App())" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "\n", "from armi import cases, settings\n", "from armi.cases import suiteBuilder\n", "from armi.cases.inputModifiers import inputModifiers\n", "\n", "fPath = \"../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml\"\n", "if not os.path.exists(fPath):\n", " fPath = \"../../testing/reactors/anl-afci-177/anl-afci-177.yaml\"\n", "\n", "cs = settings.Settings(fPath)\n", "case = cases.Case(cs)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Next, you make a SuiteBuilder, which is the thing that will perturb the input files to generate a suite of related cases from the base case. There are two basic choices, the `FullFactorialSuiteBuilder` which will expand each degree of freedom in every combination (a full multi-dimensional matrix), and the `SeparateEffectsSuiteBuilder` builder, which varies each degree of freedom in isolation. We'll make a FullFactorial case for this demo.\n", "\n", "Once you have a `SuiteBuilder`, you start adding one or more degrees of freedom, each of which will adjust one aspect of the input definitions (modeling options, reactor design, etc.).\n" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ ".. note:: You may also find the :py:mod:`more detailed API documentation useful<armi.cases.suiteBuilder>`. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## A simple one-dimensional parameter sweep" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "builder = suiteBuilder.SeparateEffectsSuiteBuilder(case)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Each degree of freedom is defined by an `InputModifier` and a range of values. ARMI contains a few basic `InputModifier` for simple things (like changing settings), and for design-specific param sweeps you can make your own design-specific modifiers. \n", "\n", "The simplest form of parameter sweep just adjusts settings. For example, we could adjust the reactor power from 10 MW to 100 MW in a few steps. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "\n", "powers = np.linspace(10, 100, 4)\n", "print(f\"Building power modifiers with powers: {powers}\")\n", "powerModifications = [inputModifiers.SettingsModifier(\"power\", mw * 1e6) for mw in powers]\n", "builder.addDegreeOfFreedom(powerModifications)\n", "print(f\"There are {len(builder.modifierSets)} cases in this suite so far.\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Now we can build the suite. The `Suite` object itself can write input files or just run on the local computer with `suite.run`.\n", "\n", "The suite will generate copies of the base case with the power modified across the defined range. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "suite = builder.buildSuite()\n", "suite.echoConfiguration()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "On the other hand, if you want to write inputs and then submit them all to a high-performance computer, you can do that too with `suite.writeInputs()`" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "suite.writeInputs()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "You can now see that perturbed input files have been produced in the `case-suite` folder." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!grep -R \"power:\" case-suite/*" ] }, { "cell_type": "raw", "metadata": { "raw_mimetype": "text/restructuredtext" }, "source": [ "To submit this suite to a computer cluster, one would run a series of ``python -m armi run`` commands from the ``case-suite`` folder. On a HPC, one would submit these commands to the HPC using the queuing system. \n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Modifying the reactor design\n", "Modifying settings is one thing, but the real power of parameter sweeps comes from programmatically perturbing the reactor component designs themselves. We accomplish this by modifying ARMI Blueprint objects as derived from the base input. \n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class CladThicknessModifier(inputModifiers.InputModifier):\n", " \"\"\"Modifier that adjusts the cladding outer diameter.\"\"\"\n", "\n", " def __call__(self, cs, bp):\n", " for blockDesign in bp.blockDesigns:\n", " for componentDesign in blockDesign:\n", " if componentDesign.name == \"clad\":\n", " # by default, values passed to a modifier end up in the\n", " # independentVariable dict\n", " componentDesign.od = self.independentVariable[\"cladThickness\"]\n", " return cs, bp\n", "\n", "\n", "cladThicknesses = np.linspace(0.8, 0.9, 5)\n", "builder = suiteBuilder.SeparateEffectsSuiteBuilder(case)\n", "cladModifications = [CladThicknessModifier({\"cladThickness\": float(od)}) for od in cladThicknesses]\n", "builder.addDegreeOfFreedom(cladModifications)\n", "suite = builder.buildSuite()\n", "suite.echoConfiguration()\n", "suite.writeInputs()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Now we can inspect the input files and see that the cladding outer diameter definition has indeed been modified" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "!grep -R \"clad:\" -A6 case-suite/* | grep \"od:\"" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## A full factorial parameter sweep\n", "Of course, one can use factorial sweeps as well. Below we add two degrees of freedom, one of length 5 and another of length 20. This suite has 100 cases total with all combinations of each setting." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "builder = suiteBuilder.FullFactorialSuiteBuilder(case)\n", "powers = np.linspace(10, 100, 5)\n", "powerModifications = [inputModifiers.SettingsModifier(\"power\", mw * 1e6) for mw in powers]\n", "builder.addDegreeOfFreedom(powerModifications)\n", "\n", "cycleLengths = np.linspace(200, 1000, 20)\n", "cycleLengthMods = [inputModifiers.SettingsModifier(\"cycleLength\", cL) for cL in cycleLengths]\n", "builder.addDegreeOfFreedom(cycleLengthMods)\n", "print(f\"There are {len(builder.modifierSets)} cases in this suite.\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Post-processing the results of the sweep\n", "After all the runs have completed in a parameter sweep, you will want to post-process them to come to some kind of useful conclusion. Because post-processing is very design-specific, you need to make a simple post-processing script. The ARMI framework has useful functions that will assist you in this task. \n", "\n", "First, we assume you're in a new shell and we discover all the cases that ran:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def loadSuite():\n", " print(\"Loading suite results...\")\n", " import os\n\n", " fPath = \"../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml\"\n", " if not os.path.exists(fPath):\n", " fPath = \"../../testing/reactors/anl-afci-177/anl-afci-177.yaml\"\n", " cs = settings.Settings(fPath)\n", " suite = cases.CaseSuite(cs)\n", " suite.discover(patterns=[\"anl-afci-177-????.yaml\"])\n", " suite = sorted(suite, key=lambda c: c.cs.inputDirectory)\n", " return suite\n", "\n", "\n", "suite = loadSuite()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "At this point, you have two options based on your needs:\n\n", "- Read the ARMI HDF5 output databases directly (useful if you just need to pull certain scalar parameters directly out of the database)\n", "- Have ARMI load HDF5 output databases into full ARMI reactor objects and use the ARMI API to extract data (useful if you want to loop over certain parts of the plant to sum things up)\n", "\n", "Directly reading the database will be inherently less stable (e.g. in case the underlying DB format changes), but can be very fast. Loading ARMI reactors for each case is slower, but should also be more powerful and more stable.\n", "\n", "After you extract the data, you can plot it or make tables or anything else you need. We often pass it to non-parametric regression systems like the [Alternating Conditional Expectation](https://github.com/partofthething/ace) (ACE) and then on to a multi-objective optimization system (like [Physical Programming](https://github.com/partofthething/physprog)). " ] } ], "metadata": { "celltoolbar": "Raw Cell Format", "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.4" } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: armi/tests/tutorials/pin-rotations.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "403d57f2", "metadata": {}, "source": [ "# Pin data, selection, and rotation\n", "\n", "This tutorial is here to help make sense of how ARMI stores data on a `Block` for things that exist within the `Block`. For example, the parameter `Block.p.linPowByPin` is a `(N, )` vector of linear pin powers with one entry per pin. You may be wondering\n", "\n", "1. Where do those powers exist in the block?\n", "2. What component produces those powers? `linPowByPin` is a `Block` parameter, not a `Component` parameter.\n", "3. What happens when the block is rotated?\n", "\n", "By the end of this tutorial, these questions should be answered." ] }, { "cell_type": "code", "execution_count": 1, "id": "8feec552", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", " +===================================================+\n", " | _ ____ __ __ ___ |\n", " | / \\ | _ \\ | \\/ | |_ _| |\n", " | / _ \\ | |_) | | |\\/| | | | |\n", " | / ___ \\ | _ < | | | | | | |\n", " | /_/ \\_\\ |_| \\_\\ |_| |_| |___| |\n", " | Advanced Reactor Modeling Interface |\n", " | |\n", " | version 0.5.1 |\n", " | |\n", " +===================================================+\n" ] } ], "source": [ "import armi\n", "\n", "if not armi.isConfigured():\n", " armi.configure()" ] }, { "cell_type": "markdown", "id": "e5806ba5", "metadata": {}, "source": [ "## Single pin demonstration\n", "This tutorial uses the same `anl-afci-177/anl-afci-177.yaml` inputs that exist for the fast reactor example. We'll start by initializing the reactor and grabbing a fuel block, it doens't really matter what one. This reactor has a single fuel pin type which means we won't immediately see interesting behavior, but it makes for easier discussion on the fundamentals. Towards the end, we'll look at demonstrative assembly with multiple fuel pin types per block, a more realistic scenario." ] }, { "cell_type": "code", "execution_count": 2, "id": "bba88e02", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "=========== Settings Validation Checks ===========\n", "=========== Case Information ===========\n", "[info] --------------------- --------------------------------------------------------------------------------\n", " Case Title: anl-afci-177\n", " Case Description: ANL-AFCI-177 CR 1.0 metal core but with HALEU instead of TRU\n", " Run Type: Standard - Operator\n", " Current User: aeinstein\n", " ARMI Location: C:\\Users\\aeinstein\\codes\\armi\\armi\n", " Working Directory: c:\\Users\\aeinstein\\codes\\armi\\armi\\testing\\reactors\\anl-afci-177\n", " Python Interpreter: 3.13.3 (tags/v3.13.3:6280bb5, Apr 8 2025, 14:47:33) [MSC v.1943 64 bit (AMD64)]\n", " Python Executable: c:\\Users\\aeinstein\\codes\\armi\\.venv\\armi\\Scripts\\python.exe\n", " Master Machine: TP011870\n", " Number of Processors: 1\n", " Date and Time: Tue Aug 12 15:18:24 2025\n", " --------------------- --------------------------------------------------------------------------------\n", "=========== Input File Information ===========\n", "[info] -------------------------------------------------------------------- ------------------------------ ------------\n", " Input Type Path SHA-1 Hash\n", " -------------------------------------------------------------------- ------------------------------ ------------\n", " Case Settings anl-afci-177.yaml 93a5105368\n", " Blueprints anl-afci-177-blueprints.yaml b7b2c74028\n", " Included blueprints anl-afci-177-coreMap.yaml 35ab97dadc\n", " <Setting shuffleLogic value:anl-afci-177-fuelManagement.py default:> anl-afci-177-fuelManagement.py baedb35785\n", " -------------------------------------------------------------------- ------------------------------ ------------\n", "=========== Machine Information ===========\n", "[info] --------- ---------------------- -------\n", " Machine Number of Processors Ranks\n", " --------- ---------------------- -------\n", " local 1 0\n", " --------- ---------------------- -------\n", "=========== System Information ===========\n", "[info] OS Name: Microsoft Windows 10 Enterprise\n", " OS Version: 10.0.19045 N/A Build 19045\n", " Processor(s): 1 Processor(s) Installed.\n", " [01]: Intel64 Family 6 Model 186 Stepping 2 GenuineIntel ~1425 Mhz\n", "=========== Reactor Cycle Information ===========\n", "[info] --------------------------- -----------------------------------------------------------------\n", " Reactor Thermal Power (MW): 1000.0\n", " Number of Cycles: 10\n", " Cycle Lengths: 411.11, 411.11, 411.11, 411.11, 411.11, 411.11, 411.11, 411.11,\n", " 411.11, 411.11\n", " Availability Factors: 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9\n", " Power Fractions: [1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0,\n", " 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]\n", " Step Lengths (days): [184.9995, 184.9995], [184.9995, 184.9995], [184.9995, 184.9995],\n", " [184.9995, 184.9995], [184.9995, 184.9995], [184.9995, 184.9995],\n", " [184.9995, 184.9995], [184.9995, 184.9995], [184.9995, 184.9995],\n", " [184.9995, 184.9995]\n", " --------------------------- -----------------------------------------------------------------\n", "=========== Constructing Reactor and Verifying Inputs ===========\n", "[info] Constructing the `core`\n", "=========== Adding Composites to <Core: core id:1902501464784> ===========\n", "[info] Will expand HE, NA, AL, SI, V, CR, MN, FE, CO, NI, ZR, NB, MO, W elementals to have natural isotopics\n", "[info] Constructing assembly `inner fuel`\n", "[warn] Some component was missing in <reflector block-bol-000 at ExCore XS: A ENV GP: A> so pin-to-duct gap not calculated\n", "[warn] The gap between wire wrap and clad in block <plenum block-bol-006 at ExCore XS: A ENV GP: A> was 3.999999999998449e-05 cm. Expected 0.0.\n", "[info] Constructing assembly `middle core fuel`\n", "[warn] Some component was missing in <reflector block-bol-000 at ExCore XS: B ENV GP: A> so pin-to-duct gap not calculated\n", "[warn] The gap between wire wrap and clad in block <plenum block-bol-006 at ExCore XS: B ENV GP: A> was 3.999999999998449e-05 cm. Expected 0.0.\n", "[info] Constructing assembly `outer core fuel`\n", "[warn] Some component was missing in <reflector block-bol-000 at ExCore XS: C ENV GP: A> so pin-to-duct gap not calculated\n", "[warn] The gap between wire wrap and clad in block <plenum block-bol-006 at ExCore XS: C ENV GP: A> was 3.999999999998449e-05 cm. Expected 0.0.\n", "[info] Constructing assembly `radial reflector`\n", "[warn] Some component was missing in <reflector block-bol-000 at ExCore XS: A ENV GP: A> so pin-to-duct gap not calculated\n", "[info] Constructing assembly `radial shield`\n", "[warn] Temperature 597.0 out of range (25 to 500) for B4C linear expansion percent\n", "[info] Constructing assembly `control`\n", "[info] Constructing assembly `ultimate shutdown`\n", "=========== Verifying Assembly Configurations ===========\n", "=========== Applying Geometry Modifications ===========\n", "[info] Resetting the state of the converted reactor core model in <EdgeAssemblyChanger>\n", "[info] Updating spatial grid pitch data for hex geometry\n", "=========== Summarizing Source of Material Data for <Core: core id:1902501464784> ===========\n", "[info] --------------- -----------------\n", " Material Name Source Location\n", " --------------- -----------------\n", " B4C ARMI\n", " HT9 ARMI\n", " Sodium ARMI\n", " UZr ARMI\n", " Void ARMI\n", " --------------- -----------------\n", "=========== Initializing Mesh, Assembly Zones, and Nuclide Categories ===========\n", "[info] Nuclide categorization for cross section temperature assignments:\n", " ------------------ -----------------------------------------------------\n", " Nuclide Category Nuclides\n", " ------------------ -----------------------------------------------------\n", " Fuel PU236, LFP40, HE4, LFP41, LFP38, DUMP1, AM241, NP237,\n", " AM243, U236, PU242, U234, NB93, DUMP2, CM245, LFP39,\n", " NP238, U238, ZR90, PU240, U235, ZR91, PU239,\n", " CO59, PU238, CM243, CM244, PU241, ZR92, AM242M,\n", " ZR96, CM242, CM246, LFP35, ZR94, CM247, AL27\n", " Coolant NA23\n", " Structure SI29, NI64, MO98, CR54, MO97, FE58, W182, B11, CR53,\n", " NI61, MO92, MO94, FE54, SI28, NI62, CR52, MN55,\n", " MO96, CR50, MO100, V51, MO95, SI30, NI60, V50,\n", " NI58, FE56, C, W183, B10, W184, FE57, W186\n", " ------------------ -----------------------------------------------------\n", "[info] Constructing the `Spent Fuel Pool`\n", "[warn] Changing the name of the Spent Fuel Pool to 'sfp'.\n", "=========== Creating Interfaces ===========\n", "=========== Interface Stack Summary ===========\n", "[info] ------- ------------------------ --------------- ---------- --------- ----------- ------------\n", " Index Type Name Function Enabled EOL order BOL forced\n", " ------- ------------------------ --------------- ---------- --------- ----------- ------------\n", " 01 Main main Yes Reversed No\n", " 02 FissionProductModel fissionProducts Yes Normal No\n", " 03 FuelHandler fuelHandler Yes Normal No\n", " 04 CrossSectionGroupManager xsGroups Yes Normal No\n", " 05 HistoryTracker history Yes Normal No\n", " 06 Report report Yes Normal No\n", " 07 Database database Yes Normal No\n", " 08 MemoryProfiler memoryProfiler Yes Normal No\n", " 09 Snapshot snapshot Yes Normal No\n", " ------- ------------------------ --------------- ---------- --------- ----------- ------------\n", "=========== Triggering Init Event ===========\n", "=========== 01 - main Init ===========\n", "=========== 02 - fissionProducts Init ===========\n", "=========== 03 - fuelHandler Init ===========\n", "=========== 04 - xsGroups Init ===========\n", "=========== 05 - history Init ===========\n", "=========== 06 - report Init ===========\n", "=========== 07 - database Init ===========\n", "=========== 08 - memoryProfiler Init ===========\n", "=========== 09 - snapshot Init ===========\n", "=========== Completed Init Event ===========\n" ] } ], "source": [ "o = armi.init(fName=\"../../testing/reactors/anl-afci-177/anl-afci-177.yaml\")\n", "o.r.core.sortAssemsByRing()" ] }, { "cell_type": "code", "execution_count": 3, "id": "af28a55b", "metadata": {}, "outputs": [], "source": [ "from armi.reactor.blocks import HexBlock\n", "from armi.reactor.flags import Flags" ] }, { "cell_type": "code", "execution_count": 4, "id": "76832152", "metadata": {}, "outputs": [], "source": [ "import numpy as np" ] }, { "cell_type": "code", "execution_count": 5, "id": "15633a30", "metadata": {}, "outputs": [], "source": [ "fuelBlock = o.r.core.getFirstBlock(Flags.FUEL)" ] }, { "cell_type": "markdown", "id": "82fbb27c", "metadata": {}, "source": [ "Next, assign _some_ power profile to the block. We'll pick a 2D function `p(x, y) = x + y` for each pin centered at `(x, y)`. This way, the rotation of the block be visible.\n", "\n", "This introduces the first big point: pin-related data assigned as a block parameter **must** be ordered according to `Block.getPinLocations()`. That is the key connection between how data are ordered, where data exist in space, and what components are associated with those data." ] }, { "cell_type": "code", "execution_count": 6, "id": "e3795419", "metadata": {}, "outputs": [], "source": [ "def setPinPow(b: HexBlock):\n", " \"\"\"Fake a pin power p(x, y) = x + y.\"\"\"\n", " pinPow = np.empty(b.getNumPins(), dtype=float)\n", " for ix, loc in enumerate(b.getPinLocations()):\n", " x, y, _z = loc.getLocalCoordinates()\n", " pinPow[ix] = x + y\n", " b.p.linPowByPin = pinPow" ] }, { "cell_type": "code", "execution_count": 7, "id": "e99ee9c3", "metadata": {}, "outputs": [], "source": [ "setPinPow(fuelBlock)" ] }, { "cell_type": "code", "execution_count": 8, "id": "dd71cf83", "metadata": {}, "outputs": [], "source": [ "from matplotlib import pyplot" ] }, { "cell_type": "markdown", "id": "6bcb5620", "metadata": {}, "source": [ "To demonstrate this, we'll make a plot of the block-level pin powers by iterating jointly over the locations in `Block.getPinLocations` and scalar pin values in `Block.p.linPowByPin`. It's not immediately useful because the function\n", "above already set that for us. But this will be helpful to show off rotation too." ] }, { "cell_type": "code", "execution_count": 9, "id": "4d0d56e7", "metadata": {}, "outputs": [], "source": [ "def plotPinPow(b: HexBlock, **kwargs):\n", " pinPows = b.p.linPowByPin\n", " xs: list[float] = []\n", " ys: list[float] = []\n", " ps: list[float] = []\n", " for ix, loc in enumerate(b.getPinLocations()):\n", " x, y, _z = loc.getLocalCoordinates()\n", " xs.append(x)\n", " ys.append(y)\n", " ps.append(pinPows[ix])\n", " # finely tuned scatter plot size to make nice images here\n", " kwargs.setdefault(\"s\", 150)\n", " return pyplot.scatter(xs, ys, c=ps, **kwargs)" ] }, { "cell_type": "code", "execution_count": 10, "id": "a3936a90", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "<matplotlib.collections.PathCollection at 0x1baf72d38c0>" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiIAAAGdCAYAAAAvwBgXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzsnWecHFeVt59b1XFy1ARJMxrlnJMl5yDnnLMNmLywsOwC+7KkhTVpWWBhwSY454izLQdJtmRbOecwGk3OM51D1X0/tEbI0lR1ddfYyKYe/wbsuafvv7unu+6pc889R0gpJQ4ODg4ODg4OfweUv/cTcHBwcHBwcPjHxXFEHBwcHBwcHP5uOI6Ig4ODg4ODw98NxxFxcHBwcHBw+LvhOCIODg4ODg4OfzccR8TBwcHBwcHh74bjiDg4ODg4ODj83XAcEQcHBwcHB4e/G66/9xMwQ9d1mpubyc/PRwjx9346Dg4ODg4ODhaQUhIIBKiurkZRzGMeJ7Qj0tzczMiRI//eT8PBwcHBwcEhCw4dOsSIESNMbU5oRyQ/Px9IvZCCgoK/87NxcHBwcHBwsEJ/fz8jR448so6bcUI7IgPbMQUFBY4j4uDg4ODg8DHDSlqFk6zq4ODg4ODg8HfDcUQcHBwcHBwc/m44joiDg4ODg4PD3w3HEXFwcHBwcHD4u3FCJ6s6OBzLoVA3TzdsoCHYTSgZI9flpSavhCtrZjMit3hINFojvTzftIaGcAehZAy/6mF4TikXVc+hJrd8SDS6Yr0sbXuPQ+EWQskoPtXDMG8pZ1UsoDa3akg0+hJ9vNP5DofChwhrYbyKlxJPCYvLFlOTUzMkGqFkP+t63qIpcoCoFsKteClwFTOr+DRG5owdEo2oFmBX32u0RXcS04KowkOuq4TxBWdR6Z8yJDWGEnqQhsBLdEc3Edf7UYUHr1rCiLxzKfPNGRINTQ/THf4rgehqknofQrhwq6UU51xIgXcRQti/L5QyQiT8PLHYSnS9ByFUFKUUn/98vN4zhkgjDtFXkbF3QO8FoYBSjPCeCd7TEcJZVhwyQ0gp5d/7SRjR399PYWEhfX19zqmZf3Debd/HPXtXsapjH4oQSAk6EgWBEKBLyeJhY7l97CIWlo/OSmND934eql/Bqs5dDCw7OhKBQBECTerMLRnDDbWnclL5hKw0dvYf4JnGN3i3a/PxGgg0dKYUjOHS4WdwUtmMrDTqQ/W80voKa7rXIEl9veVhDYFAR2d07miWVCxhfsn8rBbZ1mgDy9ufZVPvSiQ6II78v4KCjka1r47F5Rcyq+hUlCwWwJ5YAxu6H2dX/1I0mUQcpSFQkGiUeOqYUXIFkwrPQxFqxhqhRCO7e++nIfAcmowPqpHnHsXYwusZVXA5inBnrBFLNtMW+CMdwcfQZRhQAQ0Qh/89iVetYVj+bQzLvxFFeDPW0LQ2gsG7CIceQsrAoBqKWk1e7qfIyb0NRcnJWEPq3cjQPRB+FGTfURr87d+VckTOTZBzM0LJy1jD4ZNDJuu344g4nNBIKbl799v8ZuebqEKgmXxcB8b/edJZfGbcyRktsI8efIdf73oBVShoUje0UxDoSG4bfSafHXNORhqvtqzkd3sfO+IMpNO4pPp0Pj368owW8Xe73uVP+/8EYKqRWnAlp5adyi2jbkHNYBHf1reahxt+iZS6JY0ZhYu5euSXcSnWF/GDwTW83PRdNJlEHlnsBlcBSV3eIpZUfwe34rOs0RlZx6qWr6DJmAUNGOZfwILKX+BWci1rBGMb2d1xK5oeBAsaeZ65jBv2J1xKoWWNRGIbXZ03oOvdaTQAFNzuqZSUPoiqllnWkMl9yO7bQe+wpIFrNKL4Lwi10rKGwyeLTNZvJ0fE4YTmj3tSTghg6oQcPf6rHW/w570rLWs80bCSX+964fAcxgsrpKIXAPfuf5O7975mWeP11vf47d5HkUjTxftojeeal/Gn/U9b1ljTvYa799+NjrmDAByJlLzd+Tb31t+L1fuRnf3refDgz9Fk0rLG5r5VPHboN+hp3tsBGkMbeaHx30nKeBoHIaUCUB98j1eavo8u09mn6I5u4Z3mL5CUUYsako7I6sOOS9ySRji+g13t16PpAdIv3imNYHw9u9pvQtMjljSSiX10dlxh0QkB0A87Lleh6wFLGlJrQnbdYNEJSWmQPIDsvhGp91jScPjHxnFEHE5Y3uvYz693vJnVY/9n++us6axPa7ett4Ff7nw+K417D7zFyo6dae3qQ838756Hs9J4vnk5K9rXpbVrj7Zz1/67Mp5fInmn8x1WdK5Ia9uf6ObBg78g0xCqRLKl711Wdr6Y1jaq9fNi03cAHTJQkugcDK1hbdeDaW2TeoRVLf+EjnZYx7pGV3QD27t+m9ZWlzF2d9yGLuMZaYBGOL6Nhp4fpn8+UqOr62akDGPNQfibRjK5j97ef7WgIZE9nwfZn7EGWjPSgoaDg+OIOJyw3Lf3XdQskwRVoXDv3lVp7R5teAc1ywQ+BcFD9cvT2r3QvALI7nUIBE83vpHW7s32Ny1HNQbj5ZaX0z5+dffraDJJJg7C0azoeC5txGJH7ysk9MiRaEpmSDZ1P42mm0csDgVfIq73kZmD8DeN/f1PkNTDplY94VdIaG1ktngPoNMVeoKkZh5NiEXfRNPqs9TQiEZeQNOazc0SayC5K2sN4iuQyQNZPNbhHwnHEXE4IWkK9/B2+5602zFGaFJnedtumsO9hjbdsQBvtW1Nux1jhI5kQ88B6oPthjahZIQ3295Pu41hhESyL3SIPYEGQ5u4Hmd5x/KsNQDaYm3sCuwyHNdkknc7Xz2cyJkdgWQPOwMbDMel1Nnc8wzZOjoAMT3A3sDbJhqSvb0Pk61jCKDJKA2Bl0xt2gL3YefyKtHoCD1hahMK/YVUkmj2hEIPmT+P0IM2NVRk+BEbj3f4R8BxRBxOSJ5p2Ihi88ikEIJnGzYajr/cssFWFAFSkZfnmtYYjq/oWEdSJu1poPBaq3F0Z13POqJ61JaGgsKyjmWG47sCGwlpfbY0BAqru4zzaprCmwgk22xrbO19znC8N7adQGI/dpwdEBzoN3YSIol9hOLryS7iMoCkPfCA4aimNROLrSC7SMUAOuHQ/cbPQO+F2Gs2NTSIPIG0+R1w+GTjOCIOJyQNoW7snucSwKGwcXi7Mdxluz6EJnWaI92G4y2RzqyOlX5AA53mSIfheHu0HdXmnbGOTmu01XC8K9aCsHm5kOh0xIy3AvoSabYJLGr0xZsMx0PJRtsaIAkljOeJJY2jV5kQ15oMHeVksgF7zlQKXe9CSoPEWK0Ze87UYWQIdHtOrMMnG8cRcTghCSfjR06PZIsuJeFkzHA8osVsR0QAgknjaERUM9bPhFDS+BRFTI/Z2Wk4QkQz1ojrUcQQiMRMIjcJPWLb2RmYx4h0uR1WSUrj16HroSHRAB0pB//8SDlUGibPdwg1hnQuh08cjiPicEKS6/Kg2Fz4FKGQ4zIuDpWjeu1v/wB5LuPaFX6Xdyh8BHJdfsMxr5J5AazB8KvGGh7FZys/ZACfYqzhVvxDouE20XBlUchr0HmEsYaSQZ0Rc1SEQXEzIYauWJhiVHhMDNXrAJziZg4mOI6IwwnJqLwy23f5UkpG5ZYajtfklmedDDuAIhRqcowLQw33V5C0WNvCUAOFkTkVhuOVvkq0IdAY7h9uOF7uHZ7lSZYPagzzjTAcL/aMtDU/pHJEir3G5evz3KNsa4Agz22s4XPVDYmG11VjuHXoco1iKMJgijIMIQwcaXU4Q9IFROSDsF6gzeEfD8cRcTghubxm5lBsgafmMeC8qllZHw8eQJM6F4+YZzh+ctksvIrHloaOzrmViwzHZxfPJke1d6evo3N6+emG4+PzZ5DvstfLR0dnQekSw/Eq/zQK3NXYWWAlOtOKLjEcL/JOoNAzAXuXPsnowmsMR33uUeR5F2D3RMuwvJsNx1S1Aq/vLJsaCrm5txmOCqUQfOfb1FAh5zqEzTwph082jiPicEJS6S/ktMrxtuqInFU1kWF+49LCRZ5czq6ckX0dEaEwr2QsI00iIjkuH2dVLEDJ8qumIBifX0tdnnEkwa24Ob389Kw1BIIqXxVj84yb1ClC5aTS82zliRS5yxiXZ9w/RwjBjOIrsp4fwKcWUpd/sqnNmMLrsZOE6RI5jMg719SmIv8W7Jw2Ebgpy7vS1CY393ZbGiDIyb3B3CLnBpsaOsJ/rY3HO/wj4DgiDicst49dhJ7l1okudW4de1Jau+tqT846YVWXOjeOOjWt3UXVp5Jt4EVHcuWIs9PanTHsjIz6xRyNRHJB1QVpTxDNKzkLt+LJ2hk5tfzStH1zJhYuwavkZp20OrPkatQ03V9H5p2LVy3NUkMwuvA6XCZ5KABF/iV41OFkF00QlOddn7bfjNd7Gi7XuCw1FPz+K1DVYeZm7tngmp61Bt6zEa6h6fTs8MnFcUQcTljmlNbyb1PN7zyN+Pa085lVkv4COKFgON+cnN1d+OfGnsuCsvFp7UbmVPL18bdkpXHViHNYVDYzrV2Zt4wvjv3ikQ67mXBm+ZksLl2c1i7fXcQto76VhYZgdtFpnFR6XlpLr5rHRSPvRAglQ0dBMDrvZOaUXJfWUlV8LK76HYrwkNklUGGYfwGTSz6f3lK4GT/sPhThJ7NFXCHPM4eRxf+e1lIIhZLSBxFKQYYaKm73FAqL7rSgIRDFvwOlJGMN1FGIwp9k8BiHf1QcR8ThhOaWMSfxrannpZqZp7mbVkXqnM2/TzufG0cvsKxxyYh5fGvyFSgISxoAXxx3HrfWnW5Z49Rhc/iXCbeiHP7HjIHxa0eexy2jLrasMbNoJl8e+2VUoVrWWFKxhBtrb7RcT2Vs3jRuq/t3XMKdVmPAkZhXfCZXjvyiZY0q/xQuHflzXIo3rTMyMD4u/wzOrf4OwuI2W5F3AqdU/xG3kodIu8Cm5qzKOZWFlb9EEda6CPvdY5lU8QQupZj0i3jqvSnwLWb8sHtRDE7LHIvLNZKysmdR1ArSX84FIHB75lBa9hiKxRNEQq1AlDx6OHnVmgauiYiShxBKviUNh39shByKQgofEpm0EXb4ZLOhu4H7977L6607Qabu1HQpUYRIba0IOKdqEreMOYmZJdmdvtjR18ijB9/hjbbNR+Y+WkMCp5RP4trak5ldMjorjQPBRv7atIzlHWvRpH5EYyDCoKMzt3gylww/g1nFE7PSaIo0sbRtKas6V5GQCVShokv9AxqTCyazpGIJM4qMczbM6Iy1sLLzRdZ2v0VCxlBQkXxQoy53EovLLmRKwYKsCsf1x1vZ1PMU23tfIiEjhx0GnYHFUKJR6ZvM9JIrGJd/RlYakWQ7+/oe4UD/kyT0IALXB16HRKPIM5kxRddRk3dBVkmXCa2DtsB9tAcfRNN7SZ1E0TmyaJPE755IRf5tlOZeadnRORpN6yIcupdQ6D50vXNQDZdrLLm5nyIn93rDY8FmSL0fwg8jww+C3n6UBqT+JklQaxA5N0POtcancRz+Ichk/XYcEYcTgpiWRCDwqOYX+o5ogGcbNnIo1E0wGSPP5aUmr5RLR86g3Gd+9xXXUmWmPap5DkF3LMjLLes5GGonlIyR4/Iy3F/CBdVzGOYz37dP6El0KfGq5otJfyLEW+2raQi3EE5G8alehnmLObNiARU+4yPHAEldQ5c6njQa4WSYVV2raIw0Ek6G8apeSjwlnFR6EpW+yjQaSXR03MJtusDHtAgbe9+mKXKAqBbCpXgodJcwq+hU06O6AJrU0GQSt/CYaiT0CHv636ItuouYFkAVHnJdJYwvOIsy3xhTDV1qaDKBS3hNNTQ9RlNoKV3RjST0AApuvK4yRuQtodg72YJGHJfwmWroMk5P+BUCsffR9D4EHlxqCSU5F5DrmWX6WCk1dBlDEf40dgmi0deIxd5JlWjHhaKU4PNfgMczP81jdaSMItJqaBBbjoy/fbhiqgJKEcJ7FngWptVARiGNhsPHH8cRcTjhkVKyqvUg9+9ex/KmfcT0VGa+T3Vxzohx3DxhDnPLR9i6WEkpWdvVwEP71vBW626ihx0Rr+Li1Mqx3Dh6HgvLR9nW2Np7iMcb3uPNtu1EtQQAbkVlYelYrqk9iYVlY9ImaaZjd+AQzzW9w4qOjUS0VHdZl1CZWTSWS4efwrzSSVmf/hngYKiRpW3LWNW15kiVVVWoTMgfw7mVZzKneEbWCbEDtEWbWNm5lLU9bxPRUtU2FVRqc8dyStm5TCucj0uxV7uiO9bExp6X2Nq3lKgWAECgUuEbw5ySS5hQcAoum0eqg4lmdvc9y77+54ke7pKbqmEylgmFVzMq/xxcir2IQCzZSnPgMVqDjxPXOkmdZ1fJcY9meMHNVORejGqzeFpSa6Mv9DB9oQfRtNbDGgpu12iK8m4lP+dqVMXetVdqXSQjj6OFH0JqzYc1BEKtQc25CVfOVQilyJaGw4mH44g4nNAsb97Hd1e/RkOwF1WI44qKqUJBkzpjC0v58YLzmT8s862W9zvq+f6GF9kf7Doy3wc1Uro1uSV8f+b5LK4wv7MejC09Dfxo27PsDbQZaKR+V+Ur4l8nX8RpFZMy1tgbaOKXux5lT7BxUA0FBR2dMk8hnxt7KacPm5WxRlOkhbv23cee4P4j8w2mUegu4PqaKzit3LimiRGdsVYebbiLfaEdg2oIBBJJrprH+VXXsLjMuN6IEf2JDl5p/hX1ofUIlOOqtA5oeJVcFpXfwNySyzN2QiPJLt5ru5PG8DuDaqS2KHRcIoepJTcztfhWy3krAyS0HnZ3fZfO8GuktlWO1RCARBF+RhTcxqiir2S8ZaTp/XT0/DuByF9JOQaDaYDAQ2HebZQV/jsiwy0jqYdJ9P8ALfI0qSPAxy41A++9CzXnOtwF/y+rLSOHExPHEXE4YXli32a+9e5LSAt1OhUEihD86uRLuLDW+iL+UuM2vrHmGaSUafvVpHbQBf815xIur7WeL7GibQf/tuERNKlb7onzzckXc03tQssa63t2890tf0pt91jUuGP0xVxTc6ZljV2Bvfxkx2+I6/HjnAMjrhh+EVePNC4adiyHwvv5/b4fE9MiljVOK7+AS6tvtuwodEYP8ujBbxHR+i2XiZ9edB7nVv2TZUehP36IpU1fIpLsQlqsrTEqbwmLK7+LkuZI8QDRZDObWm8mmmzGav2OUv+ZTB72m8OngNKT1Npp7LiaRHK/RQ2B33sy1WX3Hj4FlB6p9xLrugmZ3IG1mi0Kwj0Lb8m9CKcc/CeCTNZv59SMw0fG0kN7+Oa7L6JbLBauI0lKna++81dWtdZb0ljVvp9/WfO0ZQdBHtb59rq/8lbLbksam3oO8q8bHiYhtYwa8/10+/O81rLZku3eQCP/seWPxDNwQgD+uP95Xml535JtU6SFn+z4DTE9ZtlBAHi66QVebnnDkm1XrJ0/7Psx0QycEIDlHS/xetuzlmwDiU4eO/jtjJwQgM29r7Ci/V5LttFkD683fTkjJwSgPriUNR2/tFSrJqn1s7n1dmIZOCEAXZG32NX575Y0dD1MU+eNGTghAJJIbCWtXV9M5Yeks5YxYt2fRiZ3Yr1wnI5MbCDe8wWkTFh8jMMnBccRcfhIiGlJvvHuC1k9Vkr4l5XPo+nmF7WkrvOva54l2xjft9b+9UhCq/FzkXx385NZFVoTwA+3PGPaEXhA4xc7HyGpa1n1d/n17ifoT6Tvdvqn/Q8Q1+NZaTxw8HG64z1p7Z5uvIeoFsmqmd1LrY/REWtJa7es7U+Etb6sNN7veoK2yN60dhu77iac7MzICUkh2d33NO3RTWktD/b9nkiyISuN9tBzdEeWp7XsCf6BeGIHmVdL1QlFXyUYSf8d1kIPIhMbstLQ4++gRZ7K8HEOH3ccR8ThI+HFgzvoj0ezah+jI2mNBFnWvM/UblnrbjpjwawWVgn0JiK81rzT1G5N134aw90ZRSmO1ohocV5pNl+UdgUa2BdqzkoDUv1vXm1dbWrTGG5mZ2BvRlGKY3mz7W3T8e5YO9sDG7LWUFBY1fm6qU0o2cvO/rez7torUNnQ86KpTVwLsj/wYhYOwt80dvU+aWqj6VFaAo+RfTl1lebAg6YWUiboC95L9uXtFXqD96TR0EmG781yfgBBMnRv1tWOHT6eOI6Iw0fCfbvWodjoU6IKwf271pnaPLBvta0mdgqCB/aZL+CPHXzP1ukUATxy8F3TC+1zTe/Y0pBInm18G10aLzhL25Zn3ZtmQGNp2zKSunEE6d2uN7Iu1Q6pWiTvdr1JXDeOIG3pfRU73RElGtv63iCqBQ1t9gdeRrOxXSDRaAi+RSTZZWjTEX4JTRo/h/RodEfeJpI4ZGgRii5F0zttaOhE46uJJXYZW8RXIrVGsv+bSGRyFzKxMcvHO3wccRwRhw+d1nCAzV0tWd/hA2hSsqLlAMHE4ItSfzzKex31x53AyQQdycbuRtojgUHHE3qSFe07jju5kgkS2B9s51B48EVJSsnyjo22NADaYz3sCzYZjq/qWm0rGgLQnwyyO2gcpVrXszLrSMUAMT3CnsBWw/HtfcuyioAdjSYT7A+uNRyvDyzFbitoicahkHEEqSP0MvYvx+LwSZvBCYSfx25HYFAJhp83HNUiL5EqdGYHF1rUPErl8MnCcUQcPnS6ounzFazSE4sM+vvu+NBpdMfDg/6+LxGx5Ux9UGPw5xvV48RNogyZ0Bsf/A5blzqh5OCvMVP6E4M7bQAhzXgsE4LJfsOxcLLX9vwCYTqPWSTDuoZKTDPOqYlp7djpCJzSUEho3YbjmtaOvU66KRVNN34/pN4F2P38SqRu/DocPnl86I5IU1MTN910E6Wlpfj9fqZNm8batcZ3Hw6fPJJpkkwzIaEPfiFNl8iaCcm/o4bZdkrGGiYnHOxGEQbQTDSG6rWYatheWAEEujRePLPNDclIw2QsE6SJE2A2NlQa9p2QlAJD9H44fDywG0Mzpaenh8WLF3PGGWfw8ssvU15ezp49eyguLv4wZR1OMAo8Q9dzotAzeB2DAoPfD6mGe+g0jObyq94jhbfsku8avKmZIhS8ioeYHretkasaN07zqX6CSftHMf2qcfVQn5J3pHpqtkh0fKpxewCPUkCIVpsaGh6TCqVuxf41USJxKcYtCFSlhIFiaHZQhbGGUApJbf/Ycd4UMHkdDp88PtSIyE9/+lNGjhzJPffcw/z586mrq2PJkiWMGZN5FUuHjy8j84oo99krRS2AuvwSSryDL+Bl3lxqcottpMOmqPDlU50z+EXQ7/IwPr/SVtItpJyQUbnlg44pQmFqYZ1tDZ/iYUzecMPxyQUTbCWrQqrE/Ji8OsPxcXlTbWsIBHW5EwzHa/NmWeiem54ROVMNx6py5tlKuk0hqfDPNBwt8i/E/uVYo9A3z3DU7z3J5vwASdN5FM8C7G//JA/P4/CPwofqiDz33HPMnTuXq6++mmHDhjFr1iz++Mc/GtrHYjH6+/s/8OPw8celKNw8YY7txfW2iXMNK20KIbhpzHxb8ysIbhozz/TEynW1J9nKE1EQXFUz37Tx3qXDT7GloaJwXtUC/C7jctlLKs+wlayqoLCodD75buMqmCeXnWtbY2rhXIo8JYY2s4ovtLV1IlCozZ1JidfYaRtfeLnNpFuFUu9kSnzGDlVV3tU25gcQ+N2jKfTONbQoyL0GQeadfY/GpVaT4zvDcFz1XQLCOEpmCVGM6su8xL/Dx5cP1RHZv38/v//97xk3bhyvvvoqX/jCF/jKV77CfffdN6j9nXfeSWFh4ZGfkSOza+fucOJx3dgZ2Gm26VFdXF5nfNcKcHnNDNxK9nfHihBcWWveq2VJ9XRy1OwbpkngipHGd60Ai8umUejOPoKkoXNR9WJTm+mFkyk1WeDToaOzpNJ4QQKoyx1PhXcEZOmA6uicUnauqc0w32iq/BMRWWpIdGaXmJerz/eMoMo/30ZURGdi0TWmFl5XBWU555D9qRbJiPxbTUviq0oReTlX2NBQKMr7lGlJfKHkoPqvtaXhyr0JYbFcvcMngw/VEdF1ndmzZ/Nf//VfzJo1i89+9rPccccd/OEPfxjU/tvf/jZ9fX1Hfg4dMj4T7/Dxotyfx2cnW++zcixfnXYy+R7zhlgFHh9fmnhq1hqfGb+I0jRbSH7Vw5fGZ3e3JoBraxdS5TfPB3ApKp8ZfXGWGoIllfOoza0wtVOEwk21V2WtMa94FmPyRpnbCcGlw2/MykUQKEzIn87YvClpbU8bdjvZODsCheH+yYzJSx9Jm1n2ucOOSGY6ApVi73hq89L3/6kt+vLhnjSZa/hddVTkXZrWtqTgnxDCT+aXfhWXWk1B7o1pLV15nwGRn5UGSimunFsyfJzDx50P1RGpqqpi8uTJH/jdpEmTaGhoGNTe6/VSUFDwgR+HTw7fmHkaF2XQvG6A68fO5PNTrDkxn5twMlePyrwD7cUjpvLVyeZ3+ANcW7uQG0eZRxyORQCnDpvE1yaeb8n+vKoF3FSbmcMjEMwsGss/jze/+x5gYelcbqzJzBkRCMbm1fHlcZ+2ZD+pYBZXjrg9Qw2Fan8Nt436mqWmdzW507mg+usMtDC0qlHsGc4VNd9HsdC5tsw3hZMrf3g48mLtsilQyXEN46zq/0FV0t/h53kmMKX8d4dzXqxruNVSplfeg6qk3xLxuOqoLrvv8BaN1aiFiqoUMrzsUVQLSaSKWo235B4Qvow0EDl4S+5HqGUWH+PwSeFDdUQWL17Mrl0frMK3e/duamtrP0xZhxMURQh+tfgSPj0pdQdqVgVVFalL/lenncyPF5xnuQurEIL/nHURX5x4CsKCBsCnx53Ez+ZdjpKBxtcmns+Xxy9JdQg2WfwG8k2uqlnAz2ZdjyuDraNb687nS2MvR0GYbj2oh7/GZ1fM5cfTP4tbsX4Y7qLqJXx29C2oQjXVGEg6nV8yi+9M/joeCwvrAIvLlnBz7VdwCZcljYn50/mnsd/Hp1o/pTSl6CwuH/ld3GlC+gOJrSNzpnFT3S/xm5yWOZba/DM5s/p/cCsDz8sgX+mwRql3IheM/At+V6lljZKcU5leeR8uZeB5GV2iUxo57nHMrnoKn6vaskaO9yRGDHsG9chJHXMNt6uOkcNexuMebVlD8czEW/oUKANJ2eYaQq3GW/YsinuiZQ2HTw5CfohF/desWcOiRYv4wQ9+wDXXXMPq1au54447uPvuu7nxxvQhvkzaCDt8vDjQ383Dezbw6N6NBBMfPEZa6PFx4/jZXD92JiPysj/G1xjq5bED63j0wDr6E9EPjOW5vFxTN5vr6+ZQk5d9rkRbpI+nD63hyYb36U18sEiYX/Vw2Yi5XFkzn7q8wU/JWKE71s/LLe/x1+Z36Il/8KiqV3GzpHI+F1cvpi6vKmuN/kSAZe0rebXtreOa2bmEi1PKFnJO5WnU5WZ/ExFKBlnTvZy3O1+hO97xgTFVqMwqWszJZUuoyRlj2fE8lpgWYlvfG6zr/is98eYPjAkUJhScwuySixjun5K1RkIPcyDwKjt7H6cvfuCYUYWa3FOZUHQ1Ff7ZWWtoeoT20Is09d9PKHFs/yNBqf8Mqgtuoti3yDRnwwxdRgmGX6Q3+Gdix5VUF+R4T6Uw71Pk+s5EWIgaDYaUcbToa6n+MYnjWzQonpNw5d6K4j0LIT7UahIOHzGZrN8fqiMC8MILL/Dtb3+bPXv2UFdXx9e//nXuuOMOS491HJGPF1JKtnW209DfSyiRIN/tYUxxCeNKjEOt0WSCjV0t9MUiCCEo8vqZUVqF1+RUyY7uduoDPQQTcfLcHkblFzOpZJihfVxLsrmnid54BCmhyOtnWnE1PtX4BMHuvg72BzoJJeLkuDzU5BUzuajCcGFJ6Brb+hrpjYfQpaTQncPkwuH4XcZ36PXBDvYG2gklY/hVD9U5RUwpHG6ooekauwKH6E0E0aROvsvPuPyR5LqM67Q0hjvZH2whlIziVT1U+IqYXFBjqKFLnf3BevqSAZJ6klxXLnW5NeQa1CQBaI92cSDUSEiL4BFuSryFTMwfjWKwQOpSpzFygECil6RM4ldzGO4fRa7LODrRE+/mYPgAES2MS7gochczJm+8oYaUkrboPkLJbpIyhlfJo9xXR66ryFAjkOimKbKHqBZCFS7yXEXU5E5GNVggpZT0xPcSSXaQ1KN41DwKPaPJcRl/3iPJHtqj24nrARRc+FxFVPpnoArjz2IovptoshldRlCVAnLdY/C6Kg3t41ovPbGNJLQ+hHDhUYsp8c5BVYxzrOKJPSSSh9BlCEXJx+Mai9s1wtBe0/sIxdaR1HsQKKhKMXneBSiKcRRLT+5HJg+BDIKSh1DrUFw1hvZSD5KIr0bqKcdYKMW4PfMRivFJLYcThxPKEbGD44h8PAgl4vx19w7u3bKe3d3Hl3+eVVHFbdNmc96YcaYOhhnRZILnDuzgvp3r2dbddtz41JIKbps0h4tGTcTnyu6IYkxL8nLjDh7Ys4ZN3c3HjY8vKOeWcfO4pHYqOSYOhhkJPcmbrTt45MB7bOg5PldqVG4ZN9Qt5MLhM8hzZ1cILqlrrOrczlONK9nQc3wvmGp/KVeOWMx5VXPJz7JImyZ1NvRs56WW5Wzo3X7ceJmnmAuqTuOsipMoMDnia4YudXYGtrGsfSlb+jYeV+St0F3E6eXnsLjsNArc2UXOpJTUh7awuvsldva/f9wx3Vy1kHmlFzC7+BwK3Na3WI7VaItuYXvvMxwIvHXccWOvUsCkosuYWHgJeW7zJGMzjb74Vg72PUJz6KXjqp+6lHxq8q+iJv8actzZn0YMx7fSFbifntDTSD7Y90kRuZTkXUdp3s343NnXikomdhINPUAs/BjwwUgmwo/Xfw2+3JtxOds4JzSOI+LwkbGupYlPvfQMfbGoYc1GRQh0KanKy+eBi65ibElmF/QtXa3c/voTdEbDKIhBa2wM/L7Ml8t951zNlJLMLui7+zq4fcXDtEUChhoDr6/I4+ePp1zLrFLjO8bBaAh18YX376Mx3GOqAZDj8vKruTcwv8z6vjxAW7SHf9nwJxrC7Sgog9bxGNDwKm5+OO0WFpZldkHvjvfxo+3/x4FQo6FGSkfgEipfG38bJ5VllkAcTAb43d5fciC0N62GIhRurv0MC0tPzkgjqoV4tOFO6kNb0mikoi4XVn+OuSXnZaSR0CO82fJ9DoVWIVANa54IFCSSBeVfZGrRtRlt6Wh6jE0d/05r+NU0GioSnXFFX2Js0ecy0tBlnMbub9ETegLzyqmpsfKCL1JV+M2Mto2k1Aj1fY9Y+F5LGt6c28gt/EHW20YOHy6OI+LwkbCqsYFbX3gSTUp0Cx8jVQh8LjdPXn49k8qs5Uysa2/ihtceJaFrljXcisoj517PrHJrCXzbe1q57q37iWkJS917FQSqonDPqdezcNgoSxoHgh3cvPJuwsm4pc66CgIhBL+eeyOnVIy3pNEc6eYLa/6X/mTYkoY4/L/fn3ojZ1TMsKTRHevl3zb/nJ54v6ViZQPO2z+NvYkzK6xV9gwk+vnZrh/QFevMqCDadSNv4fRh51iyjWhB/rL/23TGGjMqVnZ2xS2cXH6lJduEHuHFQ1+hK7Y7I42ZJbcwt8za9rUm46xuuYOe2AYyaZpXm38jk0u/ZckZkTLBgY5PEYguJ5Py8MW51zCy5BcWNTSCPV8kHn0pAw2Bx3cBecW/zzpPxuHDI5P12/nrOWTF/t5uPvPSM5adEABNSqLJBDc//wSd4fTdchuDfdz++hOWnZABjbiucdvrT9AcSl+ZtzMa5LYVD1t2QgB0JJqu89l3Hqc+kL5LaH88wufeu9eyEzKgoUudf1n3CHv60/c5CSdj/MuGu+lPWHNCYOByL/nPbY+wvW/wI/VHk9AT/GD77yw7IX/TgN/tfYitfbvT2mtS43d7/ztjJwTg0UP3s6VvY/rnJCWPNfwkYycE4PW2+9na944ljbdafpCxEwKwsft+dve9aMl2S8d36YmtJ9POvQcDD3Gw/yFLtk0938vYCQHoCT1Oe///WrINB35KPPpihhqSePRFwoGfZPS8HE48HEfEISv+sH41MS1p2UEYQJOS7miEB7dtSmv75+1rCCXjGWvoUhJMxLhne/ouzw/sWUtvPGLZCTmigSSmJfjTrnfT2j7VsJb2aMCygzCABJJS5097V6S1fa11HU2RLrQMFyRJKhfj3gNL09qu7NxAQ7g5q7LtEnj44Atp7bb0baQ+vD8rDYHg2abHSRfkPRDaQn1oS9Zl219vvT9tZ+GO6A4aQiuz1ljTeZdpt16AQHwfzaEXyLaJ3e6e36LpUVObePIQXcEHs9Zo6/8Nmm7elFDXOokG78pqfoBo8G50rTPrxzv8/XEcEYeM6YtGeWb39owX7wF0KXlgywYSmnGPkHAizmN7NmetoUnJI3s2ETHp/hrXNB7aty5jR+dojafrN9MfN76Ya1Lnkfr3su6mq0mdpS3b6IoFDW2klDx56J2sO/noSN7r2klLxDy681LLMhul1CU7AvtoCB+fBHw0b7W/lnWjPImkKXKI+vDxCbpHs7rrBVvN+HoTbRwIbTa12dH7jK1mfBGth4bgKlObhsBjtjSSMkhL6FVTm67gQ9hZJqSM0xN6ytQmFn4Uex2BtcNzOHxccRwRh4x5YtdWkrqdJmDQFY2wtH6v4fhzB3YQttlCPpiI8/yBHYbjS5t20huP2NJI6BrP1BsvSqva99IWtde8UUrJMw3H12AYYHPvARrCHbYu5QqC55reMxw/EDzEnuDBrB2qlIbCKy1vG463RVvZFdhuu1HesvbXDcf7E13sCqy2pSFQWN31kuF4VOtjX+B12834tvUaL+BJPUxj4GlbGqCYbs/oMn44GmKvm25n4B7DKJWUGpHQPWS6tXTMLERC9yKl3a6/Dn8vHEfEIWPWtTTZnsOlKKxtMb47XtveaFoV1QqqUFjXYfxc13U24rKZ5CYQrOtqNBzf2HPQtoaOZH33QcPxLX31tu7wBzQ2DnLUd4AdgX02eyenmtht7dtjOL4/ZDyWicbuwLEFwP5GY3i3LWcKUo3yDoa3GY53RnejY76tYkWjLbrFcDwQ34MmzbdV0qPTF99uuAUUTxxA0/tsakhiyf3ocnBnXNdakfrxx/EzVtFb0YdgHoe/D44j4pAxPdGozUs5SInplkZ/PJb1tswAutTNNRJR24uSjqQvZhxVCSTsLhYpeuNhw7FAMmK5PL0Z/Unj1xFKRgyLh2VCMGn8OsLJcNZbP0cT0Yw1onr6JGkrxE004rrxNlom6DKBpscHHUumybvIBKO5NAPnITuNwR0aKe06OkfNpfcO2VwOHy2OI+KQMR7V/rl9IcBt0nfFrSi2FyVx+CivES4l826qg2H2fmTSW8ZUw+y9Eir29tiPnmdwXMLFUBz0N3s/XIpq2zGEVLl447GhKSOumMwzVBqAYY2MoSyHLgyqugqGTkPBqMhgdsUHB0OQXZFBh78/jiPikDHlObm2t02klJT5jcuGl/ntaygISn0mGt5cW/NDavun1Gc8T6k3N+tk2AEUBOU+4/LnxZ5829EjgaDUa3zWv9CTbyuvYoBij3EF1AJX9n2FPjCPSZXVPJMS75mQ4zJ+r/xq9r2Ljsaj5Bt2Bvaq2VV5PRYFDy4x+OfXpWbfH+lYFVUtHnxkCDvtiiF6Txw+ehxHxCFjLho7wfbCp0nJRWMnGGuMmkQyw+Oux5KUOheNmmQ4fsHIyRkfqT0WTepcOHKy4fg5VVMHraCaCTqSc6unGY6fNmyq7biORHJ2pXH103nF03DZrGApgNPK5xqOTyqYhsekH4o1DcGCkkWG47U5U/Cr9nqVCBSmF55uOF7mm0iOzUVcoDK2wLg4W557LLmuWuxE9AQqVbnGna09rhH4PdOxt0yoFPjPQRGDtytQlGJcnkVg4/QPqLg8i1GUwZ0dhxMfxxFxyJjTauqoyrPePv1YFCGYW1nNhFLji/XcYcMZW1ia9WVWABOKypltUl11cnElM0qqUbJUEcCI3CIWVxiXYa/JLWVh2RhbORwlnlxOrzAuw17mLeTk8qmoNr7OuaqPM4cZV1fNd+dyavk8W0mxbsXNaeULDMd9qo/FZafZ0hAIFpWdZjjuUtzMLTn/SNn2bJDozC0513BcESpTiq/AjpMg0ZhUeJnhuBCC2sKbsp5/QKO28HpTm7K827F3okWjLP82Uwtf7u3YO5mj4c+93cbjHf7eOI6IQ8YoQnD7tNnZ162QklunzTa1EUJw+yTju+d0SOD2SXPSlpe+ddx8WxGLW8bOS+tkXF+3MOvtGQXBtaMWpM01uXLk4oyLmR2tcfHwBXhNuhEDnF95atbbMwoKp5cvINdl3mTvtPKzbGnMKV6QtgHenOJzyTanRqAwLm8uRR7jbs8A4wsuRMnyLl+gUOmfTrG3ztRueN7FKMJLdg6PQr5nAoWeqaZWRbkXoYrCrDU8rlryvItNrTy+cxDKMLJbjhSEMgy37+wsHutwouA4Ig5Zcev0WcytGp5xHociBBeMGc+FJtsyA1w7bjonV4/KOJqgCMHpw0dz1Vjj7YwBLqqZwpLhEzKOiqhCMLe8hhvHzklre9qwCVw8YmbGl3JVCCYWVnHrGPMLOcDMotFcPnxRFhoKNbnDuLUu/YV8bH4tVwxfkqFCykEo95ZwY+3FaW0rfdVcWn11VhoF7kKuGnlDWtsiTznnVX0mYw2Bgl/N48Lqz6W19buKObniG1lpuJUcTqn4Zlpbt5LHjPIfk7lTpaAKLzPK/yuto64IHzVlvyFzR0QgcFFb+r9pNYRwkV/828MamegIQCG/+LdDmrzr8NHjOCIOWeFVXfzp/MuZUlZh2VEQwGkjR/HLsy6w9BiXovD70y9jbvlwy5cngWB+xUh+d9qlh0/FmKMIwS8XXMbJlaMtaygIphZXcdfia/Cq6S+AQgi+N/1Szqg0zlcZTGNMXgW/m38LfjX9aQAhBF+ZcCnnVJpHmj6ooTDcX8p/z7yDXNfge/jHcmPtxZxbcUpGGmXeYn4w9Z8ocFvLzTiv8mKWVFyYkUa+u5B/Hv9tCt1Flh6zoPQizhiW3mkZYMAJuWXUD9NGQwYYX3ghC8q/nJGGW8nhvOH/TaGnxtJjqnLPZWrp97C6iAtUVOFjbsX/UeBJfzMAUOA/k5rS/yG1XFj5ligI4WFU+Z/I8Vrruuz2LiKv+A+AC2vLkgK4yCv+PW6vcU6Qw8cDp/uugy2iyQT/tWo5j+3YQvxwyfajP1AD7e5z3R4+NX02X523yJKDcDQxLcnP16/gwV0biGnJ4zQGOrz6VRc3T5zNN2admvER46Su8+tty7lvz2rCycSROY/WgNSR4+tGz+bfpp+Jz5XZ0UNd6vxxz3Lu37+SYDJ25L05GoHAJRQuHjGTf51yPjmuzJI3pZQ82rCcB+vfTNUXMdBQhODsipl8Zfxl5LvNt0sG03i5dTmPNbxMfzJoqCGAk8pmccfoayh0Z55TtLJzGc81P0VfohcF5bgtm4Hj3TOKZnPdyFsp8mSerLi5dzlvtN1PX6ITgXJcb5iB343Lm8OF1Z+37IQczYHAclZ3/h+BRDMC9bhqqAO/q/bPYXHFv1DoGZmxRnt4BTu6fkYoWW+qUeydw9Sy75DvGZexRiC6kuae7xFN7CKVXHpsXkfqdzme2Qwv+RE5nvQRyWNJxNcS6vsPtMQWUw3VPY3cwv/E7cl++9bhwyWT9dtxRByGhL5YlKd3beehbRtp7O8npiXxudyMLirmlqmzuGTcRPxuezUDgokYz+zbzoO7NtAQ6CGqJfGpLmoLirlpwiwuGz2ZPLe9UxfhZJznG7bx0N61HAh0E9USeFUXI3KLuGHMHC6vnUa+x1r0wIioluC15q08Vv8++4IdRLU4HsVFpb+QK2vmcdnIWRR6jI8dWyGuJ1nRvoWnG1eyL9hKVIvhVlyUeQu4qHoBF1bPp9hj7/RIUtdY3b2Jl1pWsC/YQFSP4RYuijwFnDXsJM6pXEyJyXFdK2hSY2vfJt5qX0p9aC8xPYYqVPJdBZxUegonl59BicfesU1d6uwLbmB114s0hHcQ1yMoQiVHLWB60enMLTmPYk+FLQ0pJc3hdWzvfZqWyHoSegSBglctYEzBOUwqvDQrB+RYjZ7oOur7H6Yz+i5JPXQ4ylJAVd551ORfS75njG2NcHw9nYH7CETeRJOp4m2qUkhRzgWU5t2M32N8iswqyfgmIqH7SERfQ8pUwTUh8nH7luDPvRWXxzix2uHEwHFEHGwTSyZ5aeduVh1soC8SQ1EExX4/54wbw2mjR6GmiWpIKdPuDcc1jVf37WH5wXp6oxGEEBT7fJxVN4az6sakjZxY0UjoGksP7uXNQ/vpOVwBtdjr47QRdZxbOz5t5MSKhqbrLGvex9Km3XRHw0gkhR4/J1fWcX7NxLTbN1Y0dKmzsn0fS5t30BMPk9Q1Cj1+5pfVcf7wKfhd5ts3VjSklKzr3s8brZvpjodI6kny3X5mFI/i3KqZaaMzVjW29e9nefs6euL9JGSSPFcOE/NHcWbFvLTJrFY19oX2827ne/QmeknoCXJcOdTl1nFy2SJyXeb1Y6xoADSG97O+Zzl9iS7iehy/mkO1v465JaeTl6YmilWNzuh+tve9RiDZTlKP4lFyKfXWMaXoPHJd5vVKBi7t6XT64weoDzxHKNFMUo/gVnMpcNcxquBSclzmDphVjWjiAB3BJ4kmD6LpIVQlD7+7jvK8a/C6RgyJRjJ5iFD4ERLJ/Ug9gFDycKm15OZej9tlnvjrMPQ4johD1nSGQvxlzXoe2biFQCyGKsSRmiGqoqDpOpX5edw8eyY3z55JjifzKEdPJMKfN67joS2b6I1FP6ghFDSpU5aTw83TZnL7zNnkezKPcvTHY9yzbS33bd9AVzR8ZN6URkqvxOvnpkkz+fTUuRR5M9ueAAgl4ty3ey0P7F5LWyQ4qEaB28cN42bxqYnzKTMpfGZEVEvwyP41PLT/fZojfahCQZepjZABjVyXl6tqZ3P72JMY5s/8e5LQkzxzaDWPHVxJU6T7A69jYNvFp3q4ePgcbhh1ClX+zLdANKnxasu7/LVpOY2Rtg+8joFtF4/i5qyK+Vwx4kyq/ZnX4dClzjudq3i1dSmNkUYUFOThfwa2cVShclLpQi6oOpdqv/HRbiOklGzofYd3Ol6gMbIfBfXwltHfNAQKM4oWcfqwS6ny12alsTewgvXdT9Ea3X54W2VAQyG1aSgYm38Kc0quocJvLdfjWJpDK9jV+wCd0fUf0OBw6rYEqnNOZULxLZT5sotA9EaW09L/J/qj75DaVpGkjgMP5JvoFPpOo7rwcxT4FmalEY2tIhD4P6KxNw/Pe6yGhtd7Kvl5n8fvOyMrDYfMcRwRh6zY3dHJbY89TVc4nLZgmSIEE8rL+MvVl1OeZ32BPdDbw83PPklrMGBJo66omPsvvZLqfOt//8ZAHze98jgHA71pj80qQjA8r4AHz7uGUQXWF9j2SJBb33qEPb2daY//qkJQ5svl/jOvZ1yh9QW2Jxbi8+8+zNbeprTnIlQhKHT7uXvRzUwuqrKsEUhE+LcND7Cx5wBgfv5CFQp+1cP/zLmNaUXWF9iIFuPO7X9hXc+O43JvBtNwCzffnXIHM4rHW9aI6wn+sO9u1vWsRyBMS8UrKKhC5Z/GfZEZRdMtayT1BE82/oH1PSssaQghuKHmn5lWZH2B1aXGsrbfsqX3+UFzVo5GHF7Yz6n6BpMKrZ9mklJnc9dv2N33AKnF2lxDojO77FuMKbwqAw1JU9+vaer7NYPnehxNarym6NtUFtxhKVI0oBEI/oG+/h9a1ijI/wYF+V+3rOGQPZms386pGQcADvb0cv3Dj1tyQiBVC2R3Ryc3PvIE/VFrjd1aggGueepRS07IgEZ9bw/XPPUYXRHjJmNH0xkJcfWLj9BgwQkZ0GgO9nPVCw/TGrLWSKwvHuG61x9gb196JwRSVWQ7oyGuXfoAh4K9ljRCyRi3r7yP7X3Nlg5nalLSm4hw6zv3sC/QYUkjqiX42rp72NRTjyT9IVBN6oSTMb685s/s6jfunHw0SV3jh1vvZkNPqiOuFY2YHue7W3/P9r79ljR0qfO7vb9nfc+GwxrmKjo6SZnkV7v/l2192y1rPH7od2zoeduyhiZ1Hjz4S7b1rbGkIaXkrdbfsKX3hcMa5vVUJBoSnddafsau/jctaQBs7vr1YSck9UzTaYBkfeed7O9/2rJGU99vDjshkL5YWWq8ofdOWgN/tqwRDN512AmxrtEf+AX9gV9a1nD4aHAcEQd0KbnjyWcJxuIZlW7XpORgTy/fenlpWlspJZ9/8a/0RCIZa7QGA3z11Rct2X/5redpC1tzdI7W6I6G+dwbz2IlQPiv777AoWBvxhqBRIxPL3/cksZ/bnqRvf0dGWnoUhLVEnz+3YdI6ukrVf7vrpfY3teYUUE3HUlCT/K1dfcQ0xJp7R88+CJb+vZmpCGR6FLnB9vuImzSEXiAF1teZmPvpowa5g1s2fx6z2/pT6R3QN/tfJWNvSszbMqX0njo4C/pjXemtd7R9xpb+14km2JrrzX/lJ54Y1q7xuAb7O57MOP5AdZ13ElvbFdau97Icpr6fpWVRkPPfxGIrk1rF4utprf/B1lp9Ad+QST6VlaPdfhwcBwRB945cJD93T1Z9Y/RpGTp7r009pm3817f2szm9rasNVYeamBPd5ep3Y7udt5tachaY2NHC5s6W03tDgZ6eKNpT9Yae/s6WdVWb2rXEQ3wQuOWrCq+alLSFO5lWetuU7v+RITnGtdkpaEj6Y4HebNti6ldVIvxfNPbWXXU1ZEEkxHebDdflJJ6kldaX8t4fki5CXE9ztsdb5s/F6mzrOOvWWlAKsrzXpe5sy6lZG33Y2RbFl4i2dyT/jnu6r2fbC/7AsGevkfT2rX0/4nse8cotAb+ktYqELzbhoZKIHhXlo91+DBwHBEHHli/0VanW0UIHt1ovig9sNmehioED27ZaGrz4A67Ggr3b19vavPI3g22+saoQvDA7nWmNk/Wr8+2AvkRjYf2rza1ealpHUmZfX8PBcHjB1eZ2ixvX09Uj2WtIYDnmpabRpDW9WwgmAxmrSGRLG17E92k+eHuwCb6EuZOsLmGzntdr5HUjSNIzZGt9MQbyPYPL9HZ2vsyCd04gtQb20V3bCvZ9o6RaDQEXiau9RvaRBP1hxNTs/1saXSHXyWebDO20FqJRF+2pRGLLSeZrM/y8Q5DjeOI/IPTEQyxbN8BW910NSl5ZONmwwWjPxbjxb27bWs8sX3rkaJpxxJNJnliz1abGjrP7d9BMD744qlLySN7N9h+Ha837qEzGjK0ebR+ra3+N5qUvN95gKZQj6HN04fez3p+SEUsdvQ3sS9gHEF6ueWdIydJskECTZF2dgbqDW3eal9mSwOgJ9HDtn7jXJH3u1631YgPIKwF2dFv7IBu633pcPJp9iRllD39KwzHD/T/1baGTpKG4MuG4x3BJ7DXSffwPKGnDMdC4cdtzw8qwdDDQzCPw1DgOCL/4DT29dtsUp+iLxojGI8POtYaDJDU7XTwTBFJJuk2SFrtjISOVF21Q0LXaQ0PfofdH48SSGR/hz+AjqQpNPhWVkLX6IhaS5pNx6GwsSPSHOkZkr97U6TbRKMzq22ZY2mJGOdXtEbbhkSjPWqc4NsRa8q6Ed8ACgpdceO7/J74oeOqoWauodKXaDEcDyQabGsIVIIJ41yUaLIee916ARRiyYOGo6lIht2lS5LUjDUcPlocR+QfnHBicOchG0LxwUPPoUT6pEbrGoM/33ByCDUM3pNQcgjfKwON8FBqGMyV1DVb2zIf1DB2zGL60LyWiGZ8KitmY+tnAAWFqG6mYe1UmDmCmMm2SdxkLBONuG58uiyhZ7+F9TckSd04mqfpIWztKwKgH57HYFSGsO/s6Eh9aBx+B/s4jsg/ODnu9A3VrJLnGXyuXJul3T+oMXhxs9yhfB0Gcxn9PjuNwV9HTpoKqRlpGFRCdSkqbmE/fA6Qa1Jt1WehWZ8VclTjkvo+1V5Jf0gdtfWrxgXtvErmxe6OR5rO41HslfS3Mo9bsVfSP4XApRjXDVKVPOwuKwIF1URDEbm2NUBBKE5tqhMFxxH5B6emqNBW8uUAJTl+cg2qrFbl5+POsNHdYOS63ZT4B7+Yl/lzyMmwCd1geBSVytzBG7Tlu30U2ewzA6nk3hG5g5cAdysqVVlURz0WAdTkGpcAH5Fjrz/LADU5ZcYa/grb+RsAw3OMG81V+6pt528AVPqMS5lX+EbY1tDRKfcaV3It8dYOSf5Gsce4XHqBp862hkQj321czM7vHk22J3/+pqHjc402HHe5xmA/IiJwm2g4fLQ4jsg/OKW5OZw9bgyqYu/UzA0zpxtWK8z3eLlk/CTbp2aumzIdt0FvGK/q4prx02xrXDF2imF0RRGCG8bNtnlqRuH8kRMp8RnfuV5XN+9wke1sNQSLh42hKse438mVNQttLRcKgmlFNYzKM3YSLqg+2Vb+hkBQm1PFuLwaQ5szhp1uO3+jzFPKxHzjMukLS8+xrZHnKmRiwSzD8WlFF9rO3/AoOYzNP8VwvK7gMvt5KMJDTd65huPledcwFE5Ced6VhqO5Oddg19kBndyc623O4TBUOI6IAzfPnoGm29vXvXaGecvvm6fPtH3a5Iap5uW4b5xoX+OmSTNNba4fO8tSQTJjDZ2bxs02tbmidpatEtSalNwwer6pzfnVs3Ar5s34zNCRXF1zkqnNKeWzTLdV0iGRXDz8VNP3YlbxDApc2UeQBIKzK85CEcaXwjF5UynxGDtc6TUUTio9F1UYv98VvomUeuvIdoEVKEwtuhCXYrxVVegZQ6lvBtnXEVEZlX8hbnXwiCGA1zWCQt+p2KnxUZJzAW7VONKmquX4/RfZ0vB5z8Tlstft2GHocBwRBxbWjGRieVlW0QRFCC6YOJ6qAuOLE8CMikrmVFVnpaEKwRm1dYwuNu82Or64jNOG12WtMb9iBNPKKk3thucWcn7NxKyiIqoQTC6uYP4w4zt8gFJvHpfVzMgqKqIKhdrcEk6pGGdql+vyccXIBVltnahCYZi3kNMrppraeRQ3lw4/PeP5IRVxKXDncvqwuWmei8r5VcZ36GYIBD7Vxylli82fi1A4fdhlWWu4hIv5pWeZ2wnB3JLryC7RUyCEwvSii9NaTiy6DTsRi7EF16a1qSr4LNnX+NCpKvhUWqv8vM+R/evQyM//QpaPdfgwcBwRB4QQ3H3VpRT6fRkt4qoQjCsr5Ufnnm3J/vfnX8Kw3NyMNUYWFvI/Sy6wZP+bMy5iZH5hxhoVOXn8/qxLLdn/ZMGFjC3IzHFThaDYm8MfT7vaUrTj/02/gClFmTluqhDkujzctegmVJM7/AG+NP48ZhaPysjhURB4FBe/mns7HgsRletrz2Vu8eSMHB4FgUtx8cOpX8BvIRn1vMolzC+Zm5GGQKAIha+P/yp57vRJnAtKzmZucaadWwUguLXu3yh0mzvRABMLz2Jm8RVZaEjOr/4OhZ703YSrc09lUvFnMtRIMW/Y9yn0jk1rV+hfxMiib2WlMarkP8nzzkxr5/XMorjozqw0Cgv+A5/X3Pl0+GhxHBEHAKoLCnj8xmupzM9Pe7cvDv9Mq6rkweuvIs9r7XREeW4uj195PTWFRZY1xpeW8fgV11HosxbiL/L6efzCGxhfXGZpWVIQjCoo5qmLbqTMb62LcJ7by8Nn3cjUksojzzOdRnVOAY+fczNVOda2EXyqmz8uuonZJanoSVoNISjz5vHgKZ8yTVI9Grfi4r9n38qCsnFHnqepBoJCdw5/mP85RucZJ3cejSpU/n3ypzipNLWtls5ZUFDIcfn4r+lfZly+eeToyGOEwudG38Gi0oWWNTyKh3+d8HXG55tHjgYQQnDlyM+xsHTJYQ3zS6eCgku4uK3um4zPn2FJA+DUYZ9nTsk1ljQEKgoqF1R/l7H5J1vWmFL8eSYXf+7IHOk0BArzhv2A2nxrNwOQiorUFH378H+l20JRAcGokh9RkX+jZY283FspLvwJqW+HFQ0oLPgu+XlONOREQ0g7G94fMpm0EXYYGnojUR5cv5EH12+iMxzGpShHutgqQpDUdeqKi7h5ziyunTEVryvzPIP+WIyHt27i3k0baAsFB9UYWVDArdNnc8PU6fizOP4bTsR5cOdG7tu+nsZgPy6hHKlWqiBISp2q3HxumzybGyfOJN/gWLAZMS3Jo3s3cN/utdQHegbVKPflccv4Odw0fjaFnsyPgcb1JM8c3MgD+95jf7ATl1CQMpUCqoiURoknh2vr5nHj6PmUeK05U0eT1DVebt7AYwdXsjfYinpYA1ILsCZ18l1+rhi5gKtrT6LMm/l3UZc6y9rX8XzzcnYHGg5rpPJAlMMaOaqP86oWcUn1aZT7ijPWkFKyunstS9teZ09w75GTLhKJgkBDx6t4ObX8ZJZUnMMwX3lWGtv71/J2x4vsD237gIZAQUfDLTzMLTmdk8svND0pY0Z9cDUbup+mIbz2sEMikOgoKOjoqMLFxIKzmVVyJaXeUVlptIVXs6fvYVrCAxVwUxoC5fD/q9Tkncu4ohso9k7MSqM/uprW/r/QExnotaOQ2rZRSW2tCEpyLqCq4FOWIiGDEYuvJxC8m0jkBVJbW8rhuZXD/y3x+84jP+8OvN6FWWk4ZE4m67fjiPyDoek6saSG3+0y3SJI6jpv7t3PyvoG+qJRFCEo8fs5Z/xY5o8cbvpYXUqiiWRaDU3XWX6wnmUHD9AXiyKAIp+fs+vGsGhkjWnURJeSaDKJz+VKa/dOUz1vHNpHTyyClFDs83P6iLpUPonJsWIpJZFkAp/LbaohpeT99gZea9xNTyyMLiVFHj+LK0dx5vBxuIZIY0P3IZY2b6c7FkaTOgUeH/PL6jiraiJuxfiOUEpJREvgVV2mWzZSSrb3NfJG2xZ64kESuka+28/MolGcUTnVdCtGSklUT+BRzDUA9gUPsbx9Pb3xAAmZJNflZ2L+KE4pn4XXpPaIlJKYnsAtVFST1wvQGG5kVdf79CV6iesJctQc6nJHcVLpArwm2z1SSuJ6ApeioqaptdIebWJ9zwr6E93E9Rh+NZdq/yhmFZ+CTzU+FSWlJCETqEIxTWAF6I03s6NvKYFkO0k9glfJo9Rbx8TCs/GZJI0CJPU4QghUYe7IhxItHAy8QCjZTFIP41byKPDUUZt/IV7V3CHU9BgIJa1GPNlKR+gpYokGNBlEVfLwueooz7sSt2ruEOoyVRhPEeaRV03rIBR+nGRyP7oMoIg8XK5acnKuwaVWmT5WynjK0RP2j+c7pHAcEYcP0NYf5Ik1W3hi7RY6A6Ejd9N1ZcXcuHAmF8+cRK7F7RUjOoMhnti4lUc3bKG1P3BEo6a4kBvnzODy6ZMpsLi9YkR3JMIT27by8ObNNPb3IUkFZUcUFHLTjBlcNXkKxQZ1RqzSF4vy5K5tPLBtAwf7e49oVOXlc+OkGVw7aZrlLRwjAvEYzxzYyv271rO/vxtdSgQwzJ/H9eNmcv24GVTkmC8y6Qgn4zx/aCsP7lvD3v4OtMMapd5crq6bxbV1s6k2Od5rhaiW4LWWLTxa/x67Ay1HolpF7hwuGTmbq2rmMyLH2jaREXE9yYr2TTzb9A67A4fQDjeny3flsKRyLhcPX8RIkzojVkjqGmt6NvJKy1vsCe4/opGj+jmlbD7nVJ7GyJzsIhsDaFJjW996VnS8yv7QTrTDlW29ip/ZxSdxctk5jMgZZUtDlxr7g+tY1/08B8Ob0GSq5YFb+JhYcDKzSy6i2j/eloaUOi3htezse5Lm8Gq0w06CKnzU5J7KxKIrKfdNtXXqS0pJb3Q1jYGH6IosQ5ep6rmK8FLqP50R+TdS5JtvWyMaX0tP8F6CkVeQMlXZVuAlx3c6xfm3keM9BWEh18phcE5IR+QnP/kJ3/72t/nqV7/Kr371K0uPcRwRe/RFovzgr2/w2rY9AEcWigFSaW7gc7u46aRZfOWsRbjUzL54wVicH776Js9v3Yk00ABwqyo3zJnON848BY9BLRAjIokE/7l8GU9u34amD94OTgCqonDNlKl857TT8GVY3CymJbnzveU8vH0zCT21SByroyAQAi4bN5kfnnxWxtVck7rOzzcu596d64gf7otznIYQIOHC2on8aOG5FGZYQE2Xkt/uWMGfd79LREsc+RsfqyGl5KzqCfxo9oUZb+dIKbl3/wr+sm8FoWQMgTiuXohy+HeLysfz3WmXUe7L7PsrpeTpxrd5oP5VAskICuK4RoAD2xSzisbxr5OupcKXudPzRtvbPNLwVwLJoKnGxPyxfG7MzVT7reXGHM2a7rf5a9PDBJK9R7Y9BtOozRnLdTWfpdqf+bHSnf3v8Hrr3QSSnYNqCFQkGhW+0Zxf9VWq/NZyY46mMbSS99t/STDZcmS+wTSKPHWcNOxbDPObH+kfjO7Iu+zq+h6R5EFTDb9rFBNKv0+J3/wI+WCEY6tp6/4m8eQuUltEx57wSf3OrdYwrPhH5PmtJeM7fJATzhFZs2YN11xzDQUFBZxxxhmOI/IR0NYf5LY/P0Fjd5+l2hoCOGX8KH5zw8V4LOZ9dIXC3PrQk+zt7D7OARlUQ8D8mhHcdc1l5BhUYT2W/miUW555iq3t7ZY0FCGYXlHBvZdfSYHXWt5HKBHnUy8/zeqWRkuHJxUhmFBcxkMXXU2J31pp7qiW5HPLnmJF8wFLGqoQ1OYX88g511uOjiR0ja+9/zSvNe+0ZK8KQaW/gAdOvYURuUWWHqNJne9vfpoXmzZa1FAo8eRy14JPMSrPWk6GlJLf7H6K55pXWbJXhEK+y8/PZ36BMXnWIhdSSh5ueIbnml+zpoGCT/Xy/yZ9hbH5dZYeA/Bq69O81PKEJVuBgltx87kx32Rs3iTLGqu7nuGNtj9a1lCEylUjv8vovDmWNXb3Pcu77T8//F/pPsEKCgqnVf0nNXmnWdZoDT7H9s5vMpDXYU4qp2Vy2U+pzLvEskYg/DLNXZ8n5XykO/6buo2qKP4ZRXnWk2gdUmSyfn/ocadgMMiNN97IH//4R4qLM09Ac8icYDTGZ+59isYea04IpL72b++p51tPvopuobhZJJHgjseeYZ9FJwRASljT0MQ/P/MimoVuvLFkkjue/yvbLDohkIoIbGlr4/PP/5W4lr6WQVLX+eLS51jT2mS5goMuJbt7Orn95aeJWmi2p0vJ1955nreb6y1raFJyMNDDLW88RtBCx18pJf+x/kWWWnRCBjRaIwFuf/tBeuPWmq79csfLlp2QlIZOdzzIF1bfQ1fMWtO1ew68bNkJgVQibCAR5t82/oG2qHHH4aN5rvk1y04IpEq0R7UoP97xG1oixl10j+btjtcsOyGQKm2e0OPcte+nNEcOWXrMlt43LDshAxqaTPLkoR/SEtlt6TEHg8t4t/1nWHMQIPVuaSxr+Q5tkY2WNLrCKw47IbpFDQnobO/8Jl2Rty1phKPv0dz1OSCJtRokqdfb1vNvBMIvWdJwyI4P3RH50pe+xIUXXsjZZ6cPb8ViMfr7+z/w45A5f1yxhgMdPRlXS5USXtm6m7d27U9re9/qDWxv7ci4kqkuJcv2HuCFbbvS2j62dQtrm5oy1tCk5L3GRh7fujWt7bN7trP8UL1lR+dojc0dbdy7dUNa29cO7eblhl3Hhf2taOzp6+Kube+ntV3VfoCnD27KuByWJnUaw738bseKtLabew7xSP27GSqkXkdnLMhvd6Vf+A8EW3jo4OsZa+hIAskwf9j717S27dFOHm54JiuNqBbjLwceTWvbn+jl6cb7MtaQSBJ6gsca0jsXUS3Iyy2/yVgDJLrUeKHpl2mrBCf1KCvbfpyVhkTydusPkdJ80ddl4qhISOY62zv+DV2a3xBIqdPS/RWsOzofpLX7n9FNOhs72ONDdUQeffRR1q9fz513Wis8c+edd1JYWHjkZ+RIpwRvpsSTSR5bsznjhXUAVQgeem+jqY2m6zy4dmPWGooQPLDGfAGXUnLvxvSLvBECuG/jhrQX2nu3rs+6d4xEct/WDWmjO/fuXJd1DxxdSh7avSFtdOeBfWssFTEbDE1KnjiwkXAybmr3+MH3bGjovNS8if6EeeTluaaVtjTe6dhCV8z8BmZp24qsm9jp6Gzu20FrtMPU7t2uN7PusyPRqQ/voSly0NRuS+/rRxJSs9HojDfQFNlhancg8DoJPZSVBuiEkq00h1ebWnWGXyehd5OtI5LQu+kMv2FqFY4uJ6k1kl01VokugwTCz2XxWAcrfGiOyKFDh/jqV7/KQw89hM/iaYlvf/vb9PX1Hfk5dMhaeNLhb7y2bS/9kfShfCM0KXlvXwP1ncYh7uV7D9AezPbilFpcN7e0sb213dDm/cZG6nt7s26ZJoF9Pd2saW4ytNnc0crWTuvbPoPREgqworHecHxvXxfvtTXY6oHTHYvw2iHjMHpLuI+3WnYfOe2RDWEtzouHthmO98RDvNqyxZZGUtd4vtHYuQwlo7zausaWBsBLLe8ZjsX1BK+3vW2riZ2CwuttxhEkTWq83bHUVsM/BYWVncaRISkla7ufI7vFO4VAYX33C6YaO3ofx06DOYHCzr6nTG0O9T+IvaVI4VD/A6YWPcG/kH1vmpRGT/DPNh7vYMaH5oisW7eO9vZ2Zs+ejcvlwuVysXz5cn7zm9/gcrnQBrnD83q9FBQUfODHITNe2brbVndYSEUslh4+aTMYL+/cY6vLLYCqCF7ZYazx0p7dpvU3rOBSFF7eY7yAv7R/d9Z33wOoQvDiPuNtplcadtl+rxQheOGg8Z3r0uZd2O1GKoAXG40dkRVtO207CBJ4tXmz4fja7l3E9PQ5N2boSN5sW284vr1/N2HNWj6MsYbOys41huMNoX0Ekr22Ndb1GOfJdMTq6U202tKQ6Ozof8dw6ySYbKEnvhc7zo5EpzG0iqQ++M1RXOumL7YWex17dfpia4lr3YOP6mFC0TfJvv9NSiOW2EYi6dwcfxhk334zDWeddRZbtmz5wO9uv/12Jk6cyDe/+U3UDI9wOlijIxCydYcPqYWvO2R8se4Mhmzd4UOqDHd32HjPtSsStpTQaoYuJV1h49fRFQlj5yILqQhSV9TkvYqGUvUObLxfupR0RIwjUF2xEKoQJG1oSKAjapxM2hMPoQrFtjPSGQsYjvUmgoMeN86Unrjx6+hPGOtnQiBhrBFI9g2JRlQLo0t90M7AIZuOzgA6SWJ6BJ96/BHuqGYt8Tc9kpjWh0s5vt5LwsB5yIaE1oNHPf4It6b3YP9TlSKpdeJ2uvYOOR+aI5Kfn8/UqR/szpmbm0tpaelxv3cYOpKavYVigISJE5C06SBAKr/CbB6jeiEZacj0GkNBwiR/Q9PlkFwDP+y/R7p5kjYdkAHMHJlUkS/7roi5xof/OvQh0kjNpQ3qiOi27u6P1Rg8z0TKIdTAQMPg99kgGTyadmwtkg9Dw8EeTtm4TxjFufZLFEug0Gdcg6PY78fmbgMCYVpptcCbWSfgwVCFYvo6Crw+W9UZIVW4q8jkdWRakMyIEq9xxdgCjy/jEzmDUWTSCyff5RuSBbbAbayR5/Lbyqv42zzG73muaq/y7gB+Ew2/SXn3THAJFy5l8Ho7PiV9x2Cr+NTB5/Io9qr7Ho3XYC6XYq+6r5W51CHUUJWiIZvL4W98pI7IsmXLLBczc8iO+XUjbeeIaLrO3LoRhuPzakbYvstP6jrzaoYbji8YMcL29k9S6swfbvw6FlaNsB1N0JEsqDLWWFAx0nY0QUGwsNK4E+2Cstoh2Y47adgow/G5pXW2XQRVKMwvG2M4Pq1wtE2FlMbsYuMy5uPzR2d9YmYABYWpBRMMx0fmjMaVpoeMFY0xJkXNhvlG4VHsOVUChWr/RBSDnjoFnpF41SJbGiAo9IzCbeA4edUKvKq98vmpearxqoNXvVVEAR7XeOzmUalKKR6X9WJ2DtZxIiKfMK6cM9VWtEIAI0sKWTjaeB/0smmT8Ljs5fiU5+VyxjjjhefC8ePJ89jrf1Pg9XLBOONF6exRYym1WBnVCL/LxeXjphiOL64axci8QluXQEURXDNmuuH4jJLhjC8YZu8yK+G6OuNKm2PyK5hRXINiQ0WTOlfXzDccr/KXMq9koi1HQZM6l4442XC8yFPIgtJZtjR0dM6rPMNwPMeVy9ziU2xrnFp+nuG4W/Exo+jcw515s0OiM7fEuCqpIlxMLLzClgZIJhddYxh5FEJhZMFN2HMSBCMLbjbsCyOEoDj/09i7e1IoyrsVkaa5n0N2OI7IJ4zSvBzOnToeVcn+i33TSbNMtyzyfV4unzY5aw1FCG6aO9P0VIzP5eb6adOy3p5RheCGadPxmpSrdykKt0yZmXUESRWCqydMNXWYFCG4dYL1UtqDaVwyajIlPmOHSQjBLWPnZ32ZVYXgzKpxVOaYn1K7rvakrLeAFARzS+vSlnm/fMTJWR+tFQjG549gfL5xhArg3MrTbR3frfINY1KBea+Wk8vPsaVR6C5mcsFMU5tZxRcc11MmE3xqPhPyF5vajCu4xNby7RI+6vKXmNpU5V2BsHG0VuCiKu9yU5uCnCsQwl4EqTDXKfP+YeE4Ip9AvnD6AtyqmnFkRFUEI0uKuGK28R3+AHcsmkeO25PxIq4KwbC8XK6fbXyHP8CnZs2h0OfLSqPI5+e2WbPS2t48eSbDcnIzdngUIch1e7hjxry0tteOncHIvKLMNRB4VRdfmpq+sdclNVMZm1+WsYYgtZ3x5cnpe4KcWTmZyYXVGR95FqScpS+NPyet7bySiUwvHJ115OWO0ReltZmYP5ZZRVOz1rip9sq0uUUjc+qYVXQSIkuNS6tvHDRJ9WhKvSOYWXQe2UYTzhh2u2EOygC57mFMLro2q/kBZpbegVsxjzq61WJGFX0ha41RRZ/HrZq3D1GUXMoK/y1rjeL8O3C7qrJ+vIM5jiPyCWTMsFJ+e+MluBTF8iKuKoKiHD9/uu0Kcr3pt0RGFhVy97WX4lYVy4ufKgT5Pi/33HAlRf70SZwVeXnce/kV+FyujDT8bjf3XX4Fw3LTJ/SV+HN44MKryHV7MtLwKCr3nH8lI/PTJ8Lle7w8ePZ1FHn9GWm4FIU/nX4lYwpL09r7VDd/OflGyn35ljUUBKpQ+N+FVzG5qDKtvVtR+c3cW6jyF1l2RsThf34882pmFBvnuRx5TkLhP6d9mprcCsuOwoDVNyZey+yS9G3uhRD88/jPMCp3ZMbOyG2jrmVuyQxLtjfWfp663AkZOyMXVV3LnBLzSMUAS6q+yOjc2WTqjJxUeg0zi423fo5mTtkXqck7PWONiYVXMbnoOku2owq/RGXuZRnND1CZezmjCr9kybY477MU5d6eoYIgz38h5YXfyfi5OVjnI+m+my1O9117bGxo5osP/pXecBRFiEETGlVFoOmScRWl3HXL5VQWZpYpv62ljc8+9iwdobCxhhBoUjKquIg/XX85NcVFGWns7urk9meeoSUYSKsxPL+Aey6/nLEl6Rfvo6nv6+G2l56ivr/3yFzHMqA9LCeXe86/killx9dFMKMp1MftbzzB7r5OY43DrehLvH7+fMbVzCrPLJGvIxrksysfYVtvq6HGwAHZfLeX3590LfPLazPS6I2H+dq6B9nU02BYW2RAI0f18NNZ17F4WHoH4WiCyQg/2Hof63t2m2oAuBU3/z75Rk4pTx9lO5qoFuM3e/7Mup7NKCiGWykCgSpUPj/mZk4pX5CRRkKP83DDXazvWZVWQ6Bw9cjbWVR2VkYamkzySvNv2dz3GgLFcLtmINfjzIpPM7/UfCvjWHSpsabjN+zsewKBangkNqUvmVn6GaYX35bRqTQpdfb3/g8H+/5I6h7Z6NitCujUFt7B6KKvGeaGDK4h6e7/NZ39Pyf1CTLa2lIBjaK8TzOs6PsIg4ReB2MyWb8dR+QTTjSR5JUtu3jwvY1sb/5gSXUBnDq+jhsWzmTx2FqULHM+4skkr+7cywNrN7Cx6fhqj4vqarh57kxOH1uHmmW11ISmsXTfPu7buGHQsu0Lho/glpkzOXv0GNxZFstL6jpvNuzn3i3rWdXccNz47Ioqbp06m/PqxuFVszsVoek6K1oOcN/OdSxv3n/c/vu0kkpumziHC2sn4nNllxinS8m77Qd4cN8a3mzZfZzG+IJh3Dp2PheOnEKOK7uEYCkl67vreezge7zZuv243JFRueVcP2ohFwyfSa7L+Ah1Oo3t/fX8tXElyzo2HueMVPtLuXzEKSypmEeeybHgdOwN1vNa63JWdq4meUztjDJvCedVnsHp5SeR787+yGxjuJ6Vna+zunsFyWMatBW6izml7FwWlp5Ovjv7o6Yd0YOs73mRLb1LScgPVjLNUYuYU3IRM4rOJd+dmZN+NH3xBnb3PcPuvudJyg8WJPQqhUwoupzxBZeS6x78BIsVIolDNAUepTnwGEn5wQJ0LpFPdf61DM+/Dr87+8JiiWQjvaGH6A3eh673fmBMiFyKcm+kKO9mPG7jU14O5jiOyD8A3cEwW+pb6Q9HURWFknw/s8cMx2OSnLm3vYvm3n4i8QR5Xi+jy0uoKjKOgPSFo2xsaKbvcESlODeHOXXD8bmNNfZ3ddPY208oHiff62VUSREjiowvroFojPWHmumJRBAIinP8zK0ZTo7HeBGu7+2hobePYDxOnsdDbVERtUVFhvaheJy1TU30RFIVUIt8fuYMrybfa7xAHurvZX9fD8F4nFy3h5EFhYwpOr5q4wCRRILVLSkNHUmR18ecymoKTWqMNIX62NfXTTARw+9yMyK3kHFFZYb20WSSNe2H6I5GSOo6RV4fs8qrTRNZWyP97OvvJJCI4lPdVOUUpE7YGNypxjWNtZ2H6IyGSOoahR4fM0qrKfMZL8KdsQD7Am0EElG8qpthvgLG51caaiR1jQ3dDXTGgsT1JAVuP1OKqhnmM/6O98aD7A82E0hG8CguSr0FjMsbYaihSZ2tvQfpjPUR15PkufyMK6im0mecSxBMhDgQPkQ4GcYlXBR6ChidW2OYq6FLnV2BerpivcT0OLkuP3W5w6nwGf8NI1qYQ+H9hLUQLuEiz1VATc4YQw0pJQdC++mOdxLX4/hVP9X+EVT4jLfSYlqY1uheoloARaj41QKq/ONRDY4USylpjuyjJ9FGXI/hVfyUe0cwzGe80Cf1KJ3RHcT0fgQCr1pImW8yqsGpEiklXbG99CeaSOgR3EoORZ6RlHiNT85peoz++GaSWqpSrUstpMAzHVUx/t72x/cQjNejyTCq8JPrrqHAM8HwcyJlnEh8Q6r6qpSoShE+zwwUk7yWSGIP0cRedD2IovjxqCPJ8Uy3XZPok4bjiHxCkVKyub6Fx97exCsbdh9XGbQgx8tVi6Zz1aJpDC/N/s5qa2Mrj7y7iRc37iRxTKXWXK+Hq+dP45oF06ktK8paY2drBw+v28Szm7cTS37wLtTvdnPVrClcP2cGY8qMF/907Ons4qFNm3hyy1YiyQ9WcPS5XFwxZTI3zZzBhHLzkxxm7O/t5qGtm3ls+xaCiQ92r/UoKpdNmMQt02YytTz7O8RDgV4e2rWRh3dvpD/+wTtdl6JwSd0kbpowi1nl1VlfDJvD/Ty6bz0P711PT/yDJetVIThvxERuHjeXuWUjs9boiAZ4umEdj9a/T1fsgyXrFQRnVE7kuroFzC+ty1qjJx7kheY1PH1oJR3HdOEVCE4qm8AVIxYzv3Rc2mRQIwKJEG+0vccLLcvpiB1fonxm0UQurDqNOSVTsu5lFE6Gea9rJW+0v0ZH7PjmkOPzJnJmxTnMKJqFmuW2QUyLsKl3Oe91vUhHrPG48ZE5E1lYegGTCxamTWo1IqFH2df/Blt6n6I7tu+48XLvBKYWX8no/NNxmTgYZmh6jObQqxzoe4S++PH9kgo8E6gruJ7qvPNxZVl7RZdxesOv0BG4h1B83XHjPtdYyvNvpyT3ctQhLDb3ccZxRD6BROIJvn3/y7y1Zd+RvI7BUIRAIvnKRSdz+1lzM7qgx5NJvvPka7y4cZepxsDY589cwJfPOSkjjYSm8cOX3+TxDVvNNQ7nN3xq4Rz+9exTMjo5o+k6dy5fwT3r1hvmSRytccOM6XzvrDMzarInpeS/31/Jb9e9b0nj8vGT+OmZ5+LJYNtISsn/bXmPX6xfgWJB47za8fzqlIsy3tK5d/cafrxhKQgMC6MN5GmcWjma/110BXnuzBaNpxvW8Z+bn0dKaXgEeEBjTkktv553AwUmlV4HY2nrBv5r2+MkpW5YoXUgT2NSwUh+NvN2ij2ZLRrvd23mF7vuIaEn02rU5lTzvSlfpNRblJHGtr4t/GHf/xLTBxzbwXKJUhoV3kq+Ov4blHkzc6brQ9t4qP5OonoIo7L6A/kmRe5ybqn7HuVe4wKEg9Ee2cHLTd8kqvWl1chRS7lgxM8p9WW2FdIf2817rV8gpnWQyisZLOcj9XuPUsyCyt9R5MusxUg0sY+97TcT1xpNNFKvTxUFjCn/M3m+zHKJPok4jsgnjEg8wWd/9yRbD7ZlVEHztrPm8rVLTrFkG09qfP6eZ1i9/1BG/dmuXTCd/7jsTEvOSFLX+fLjz7Fsz4GMahNcMm0iP730PEvOiC4lX3/xZZ7fudPy/AI4e+wYfnfJxZZyWKSU/PuypTyyfUta26M1Fo+o4Z6LrrCcw/LjNW/yx23GXV6PRRGC2eXVPLjkWsvOyG+2ruDX297OSGNi4TAePfMWct3W8kvu27eS/97+qmUNVQhqckt5YPEdlp2RZxvf4xc7n7bcqUZBocJXxF3zvkSJ11qC9rL21fzP7vsz0ijy5PPzGd+gzGt+vHSADT1r+cO+3wJYKnevoJDjyuVbE/+DYT5rUbe9gY08UP8jJNJSHRKBgkfxcceYO6nwpT/5BNAS3sSLjd9Al0nLGqrwcEnNbyj3GVetPZre2DZWNX8KTcax1llXQREuFlbdTalvtiWNSHwXu9quQJdhyxqgMLb8Xgr8p1rS+KSSyfrtHN/9GPCdB1/J2AkBuPeNtTyx0rjt+tH88JnXM3ZCAB57fzP3vW3cdv1ofrp0ecZOCMBzW3byuxXvWbL91cpVGTkhkFpUXt+7j58sX2HJ/q4NazJyQgY0VjY28B8r3rBk/8DODRk5IZBywtZ3NPMv77xkyf6Z+i0ZOSEDGjv72vmnVU9j5R7mjZbtGTkhkOpo3BDq4itrHrbU32Z1127+e+czgPXamTo67bFe/nXjX0jq6ReY7X37+PXuBzLW6I0H+N7W3xHT4mntD4bquXv/75FYc0IGNMLJEL/a/XMimnEX6AHao4d4+OCdpOJS1oqhSXQSepT7DnyfcLI/rX1/vIWXm75l2QkZ0NBknJcO/SuhZGda+2iynfdavoAuY1hzEAB0dJlkdcuXCCeO34o6lqTWzd72GzNwQlIaoLOv8zNEEnssPsbBcUROcHY0tvP6pr1Z9xL57YurSCTNv0QHO3t4Zt32rDvV/+71dwnHzbtStvUHeXDNpqyrNN69cg19kaipTU8kwl2rM1u8B5DAves30B40bu8OqcTXX695N2uNx7Zv4WBfr6ldTEvyi/XWnKJj0aXkxfqd7Og+Pq/gaDRd52eb3sxaY3nrPtZ3HX966WiklPzPjteyKrelScn67oO823F8XsGx/GHvy1kopBJadwWaeLvj+LyCY3mo4YWsNHR0GiOtvNOZ3ll/oflZpNTJtBS5jk5nvJN3O99Ja7ui4yk0abytZKYRTPaypvu1tLabeh4lqccyrvoq0YnpAbb2PJ3W9kDfwyT1/iwqy+poMsq+vvvTWnYEHyKhd2LdCfmbhpQJ2vp+l+Hj/nFxHJETnMfe3mSrXHtvKMKbW/aa2jz63mZbnW7D8QQvb9plavPYemuRGSMSmsYzm7eb2jy5ddtxCbyZ8tiWrabjz+7ecVziayYoQvDwtk2mNq8c3E1f3NzpMkMVggd3bTC1Wdayl/aoudNlrqHw4J61pjZru+ppCHXbKD2v8Gj9+6Y2O/sPsTvQlHXXXgXBU4dWmto0hlvZ2rcn6/L2AsHzzctMbbrjXWzu22irLPwb7a+ZRqlCyX629L6TtYZE8n7XS+jSeGGO62F29b1sWGckvYbO9t6/ounGESRNj1EfeCLr8vYSjUOBZ0nqIWMbmaQjcB/GdUbSodETfo6kdnwys8PxOI7ICUx/OMoLa3cYJnRaQRGCR1ZsNByPJpI8tWaLrU63QsCDK40XvoSm8fC6zbY7xD6weoPhhVaXkvvXb7DVF0OXkgc3bDTtyHvv5g222nNpUvLwts1ETZyZe3ess9VBWZOSJ/duJXDMCZujuX/PWlvOpyZ1Xjq0g66o8cX80fr3sz41MqCxom03LeFeQ5tnDr1rS0NHsrH3APWhNkObV1rfsdXATiI5EGpkT+Cgoc3bHcuyLgc/oNIRa2dXYIehxfqeN2z1pgEIJHvYFTj+1MgAe/peO5yzkT1xPcj+4HLD8ZbQUpJ6wHDcCpqM0hh80XC8L/ImSd08qpgOiU5X6HFbc/yj4DgiJzC7mjrSbqukQ5eSTfUthgv4vrYuQjHzbZV0SAm7WzuJJQZfXBt7++kJp9+/NtVIM09nKERzwN7FCaAzHKa5f/B98FA8zp6eLlvODkAgHmd/7+B3SrqUbOxotu20xbQkO3s6DMfXdTbacj4BklJnS0+L4fjaroODVkTNBAls7jXez9/Qu9+2BsDWXmMnIRUNsachEOzo3284vjuwy7aGgsLeoHFeQkNoR9aRo79pqDSEjJ2d1shWmw4VCFRaw8YR1O7oBgTZFRT8Gwrd0Y2Go8HYmiHQkARj5lFDhxSOI3ICE4gY39Fmgq5LIgY5HIHo0GgA9BvkcAylRp/BXP2xj0DDJMKQsYbB8w0m4rYdnSMaBts7mq4T0ew5nwP0m2whhZJD8371x42d2GAi+y2sARQE/UkTjWTYcMyyhlAImcwTSma/TTaAEIKIZqwR1uw76gBRzTgKFtMDtqMuIInpxu9HYkg0dBKHC6UNhqb3DcH3UDpbMxZxHJETmGxLlWcyl1sduo/AR6Mx+FyZ1ABJh+ej0DCYyz2EGkZzKUJk3Xn2eA3jz6idLRPLGkPwfknAbVIULNuCYcequBTjO2zVZCwTzJ6rUWXVzDVMXseQaAjDCq0AinDZjrqAQBHGx8+FyVhGKkM0zycdxxE5gSktMG+fbZUcrxu3a/ALVGle7pBouBSFfP/gRa5KcofmdQigOGfwuhKlOTlDtLSm5hqMIq/PVl7F0ZTlDP6++1QXviz72Byn4RtcQwhBkTf73ixWNABKvEPz2Sr1GhcdK/Vk1qRxMCSSEpPCZiWeQtufLU3qFLqNn2uRu9j24qpLnXyXcb2GfFfJkcZ32SKR5LqMqzb71RIE9h03n2pcd8WrZt8rZwCBglc1rtrsVsvI9PTS8ai41ewrKv8j4TgiJzAThw+jusReITdVEVwwZ6LheG1ZEWMrSm1dAlVFsGTaOMO704r8PGaOqLKVgKkKwWlj68j1DH6Hke/1cvKoWluOgiIE84YPpyx38AXUraqcO3qcPQ0Ek8rKqSkY/GIuhOCS0ZNsaQigJr+IySXG3YEvrZ1q26kq9+Uyq3SE4fhFw6fbjrzku3zML6szHF9SOcv2Au5V3CwsM/6OnFo+1/6SJBQWlE4zHJ9fssB2/gbA7OJ5hmNTixbb3tKQ6EwtOtlwfEzBGVmfmPmbhsbYgjMNx6vzzhsSjeq88wzHi3MuIvNju8eiHZ7HIR2OI3ICoyiC60+diZ31QtMl15w8w3BcCMFNi2fZugRquuT6k4w1AG6ZN8tWAqYmJTfOm2muMWuWrQRMXUpumT3LXGPaTHsaSG6fNsu0Eu0tE2fbTiS9bdIcU40bxtjTUBDcNHau6XbVlbVzs54fUo7hVbVz8arGYfoLqufZ2gJShcL51XPJdRk3JzytfB5eJfsQu4LCKWVzTCMis4vnkaNmHzlUUJheOJNSr3G0YEL+HPJd1iq8DoZAoS53qmmp92r/LArcwyFL51AgKPOON62uWuSdQoFnUtYaIMh11VDqM/58+txjyfMuBBvRHbdaQaHf2KFy+BuOI3KCc+n8KVnniqiKYFptJROGm/ehuHDmRHI87qy+1ooQjBlWwqzaalO7cyaNpdjvy8qpUoSgujCfk8fUmtqdVjeKyry8rCIvQkCJ3885Y817XSyoHsHoouLsNIA8j4eLxxnffQNMLa1kemllVhELAXhUlSvHmPfTGF1QyknDRmUdFRFCcO3omaY2lf5CTq+cmLWjICVcncaZKfLkcnbljKw1NKlz+YiFpjY5Lh9nVyzM+givjs4FVeblvt2Km9PKz8x660RH54xhZ5vaKEJlQemFWUeQJDoLSi8wtRFCMK34qqzmT2lIphZfmdZudOEN2Nk6qSu8MW1bivL828k+KqJQnncrYkjyiz75OI7ICU5hro/vXXdOxo9ThMDrdvHDG5aktc3xuLnzGuMwpRFCpJJHf3LteWm/1B5V5ReXX5DxRVCQei2/uPz8tIu/qij8+qILUURmKoLUndivLrowrdMnhODX51yIW8lu0+F/zj4fvzt9H5hfnHwBXtWV8daGBH62+AIKvcZ3+AP8eN4F5Lm9WTlVP5xzHuX+9A3jvjX1Aoo8/qwcnn+ZvIQRuem7L39p3EWUeQuychQ+NfocxuRVpbW7ofYiKn2lWWlcNvxMJhQYby8NcH7VRVT7h2elcWrZ6UwqmJLWblHZRQz3j83Y4REIphWezOQCc6cNYFLRxVT7Z2ahoVCbu4hxBemvdyPyLqIi5zQyXcIECqW+edQWpHeWivznUpxzacYaoJLjmcmw/M9k+Lh/XBxH5GPARfMm8c0rTwesBSNVReD3uvn9F65gdKW1xK6zp47lh1eek1rELYgoQuB1ufi/2y5j8nBrCVknj6nlvy8/H1URWCkWqwiBW1X57dUXM2ektc6fc0cM53eXXIxLUSwtsIoQRxyYxbXWGnpNG1bBny+8HK/LZUlDIFCE4Odnncc5dWMtaYwvLue+c67B73ZbWsTF4Z//XHgOl46ebEmjNq+Y+067gXy3NyNH4d+mn8l1Y8y3sAao9Bdy98LbKHTnZKTxuXGncfPoRZZsiz15/Gr2HZR68zOKjFxbcwq315lHEQbIc+Xwg6n/xDBfSUaOwjkVi7h11GWWbH2qn6+O+wYVvsqMNOaVLOT62lssNZ50K15uHvUdKn2jMnIUJuTP5YoRX7GkoQo35w7/McN8kzO48RBU58zirOrvoViIIgihMnvYzyjzzcP6Fo1CkXc68yp/hWJyKudvGgq1pb+g0H+WxflTGn73JMaW34OipL8ZcEjhdN/9GPHWln3897MrONTZi6qI4yquDvxu3riR/L+rz6SuIv3d5LG8s7uen76wnP3t3aYas2qr+c6lZzKxOrP24wCrDzbyo1feYld7p6nG9OoK/uP8M5leXZmxxsaWFr7/+ptsaWtDFeK4fIiB300qL+d7Z53BvBHGSZdGbOto57sr3mBda7OpxrjiUr57yumcMnJUxhp7ezv5znuv8V7rIVONUQXFfGfemZw90pqjczQHgz18d+3LvNN2wEBDQZM6I3IL+eaMs7hg5KSMNVojffx4y/OsaNuNQBxXLn1Ao8JXwFcmns3FI2dmrNEdC/DfO5850jvmWA3lsG6JJ59Pjz6HS9NsyQxGIBHi7v1P8E7H+sPdawfXKHDlcs3I87io+nRLi/fRhJNhHj/0EO91vXu4Od0HNQQCiSRHzeHcygs5t/IClAy3puJ6lFda7mV9zxtoUuPYbY4BDa/iZ1HZJZw+7GpLDsLRJPUY73fezY7e5wyqrab6GLuEj6nFVzC37NMZH//VZYJd3b/jQP8jaDJyZM4PaoAiPNTmX8Wk0q+hZnikVkqNlr5f0x74E7oMkrp/PzrpVxz+XzeludcwvPg7qMrQnBT8OJPJ+u04Ih8zpJSs3nOIx97exPu7GwjHEiiKoCDHx/mzJ3DN4umMysIBOVZj/cFmHl61kZW7DxKKxVGEoMDvZcm08Vy3cDrjKstsa2xubuWhNZtYtucAwVgMBBR4vZwzcSzXz5nB5CrjUx9W2drWxoMbNrF0714Ch4uI5Xm9nDVmNDfPnMn0qsydnGPZ2dXBg1s38dLe3fTHY+hSku/xcGrNKG6dNos5ldUZL0bHsre3iwd3beCFAzvojUXRkeS6PCyuruXWiXNYWDnStkZ9oJtH9m3grwe30huPkNR18twe5pfXcPO4uSyuqLN18gmgKdzDkwfX8nzjJnpiIZJSJ8flYWbxSK6vW8DiYeNs1x/piPbxXNP7vNS8lu54gKTU8KteJhWM4MqRi1lUNgmXSW0SK/TE+1natoqlre/SE+8jIZP4FC+j80ZwYdVpLCydYVsjkOhnZefbvNO5nJ54NwmZwKt4Ge4fwRnDzmZ28TzcSvo7ezMiWpANPW+ypvs1euMdJGUct/BS7h3BgtLzmVZ0Mm5l8GP5VolrIXb3v8b23mcJJFpIyjgu4aXQM5zJRZcxruBs3DYX7qQepin4EvX9jxJKNKDJGKrwkuMaTm3BtYzIvwi3kn4r0Qxdj9ITfo6OwH1Ek3vRZRQhvHjU4ZTl3Uhp3lW4lCJbGp8kHEfkY8bug+28sHI77d0BovEkeTkexo0s5+JTplKSppaIlNLSArS3uZPn3ttOS3c/kViCPL+XMVWlXLZoCuWF5l/QgY9IOp0D7d08u2YbjV19ROIJcn0e6spLuHz+FKqKzf9+VjUOdffy1IZtHOzqJRSPk+vxUFtaxFWzpzKi2Li+QSYazX39PLF5Kwe6ewjG4uR43NQUFXHl9CnUlZifOrCq0RoM8Pj2rezt7iIQj+N3uxmRX8BVk6YwvjS9k2fl794RDvHEnq3s6O4gEIvid7upys3nqnFTmVya3smzotEdDfPk/i1s626lPxHDp7qoyMnj8rqpzCg1T2C2qtEXj/DXhk1s6WkmkIjiUV2U+/K4aMQ0ZpaMSPt4KxrBZJRXmjeyuaeBQDKCW1Ep8eSxpGo6s4rrhkQjqsV4q30dW/v2EUyGUYVKkTufU8pnMqNoXNqohhWNuB7n/a41bO/fSTAZQhEKBa585pTMYnrh1CHRSOoJNve9z+7AZsLJIEIIcl35TC6Yw6SC2WkLwFnR0GSSfYFVHAiuJqqlWi741ALq8uYzJn9R2siJFQ1dJmkJraQptIKY1gtIPGoBlTkLGZF3ZtrIiRUNKTV6oyvoCr9GUutGyiQutZhC30mU5lyI+gnevnEckY8BUkqWvr+Lh19dz/YDraiKQJcSKVN5C5LU8d2z543n5vPnMr428+iAlJK3Nu3jgTfWsXF/83EaA5w5Yyy3njOXqaOyiw6s2HGA+5avY/XeQ4NqSCSnTx7NbafPZXadtVyPY1m17yB/WbmOlfsOHnl/dCmPJKbqUnLy2Fo+vXguC0dby/U4ltUNjfxp9VqW7T3wAQ0hUiF3TUpOqh3Jp+fP4bQx6ZMPB2NDazN3r1/La/v/1hFZl/JwHknqmPK86uF8ZuYclowZl5XGls427tq8mpcO7EJKQAxopEqNa1JnZnkVn5k6l4tGT8gqkrKrt4O7tr/H8/Xb0aR+ZLvlaI0pxRXcPnEel9dNzSqSciDQyV/2rOK5Q5tJ6NqgGuMKhnHzmAVcUTszq0hKU7ibBw+8zQtN64nriUE1anPLuLZ2EZeNmJdVlKMj2sNTTW/xasu7RPU4CsqRvjIDW1KVvlIuHX4qF1SdjCeLKqu98V5ebl3KW+0riGiRD2gM/HuJp4QlFWdydsUZeNXMoxzBZD8rOl7k3a6lRLTQMRoqOhr5riIWl53LyWXn41MzL5oX1QJs6H6GTT3PE9H6EKhHaoYM/LtfLWRG8cXMKrkcn5p5QbuEHmJP72Ps6XuCqNY5qIZHKWBM4ZVMKLoBr1qUsYamR2gNPEBL4D7iWgvgInUCR5I6Eqyhinwq8q+jOv/TeFz2o78nGo4jcoKTTGrced8bPP/2VhQhTOtrqIoAIfjBHeexZKH5sc+j0XSd/35qOY8s22hJQ0r4jxvO5rJF5sc+j0ZKyW9eXsmf3lxjSUPXJd+87HRuPNlaouOAxl0rVvOrN1cNmk/yAY3D+Q1fP3sxd5w8L6MF9p4167nzjeUog+RIDKbxxUUL+OdTTspI45Gtm/nOstcRYKox8F5+auZs/t/Jp2e0iD+7dztfX/4SIEybwQ1oXD9hOj9afE5G5etfO7SbL7/zLLqUphoDeQaX1E7mZyddiDeDirHvtO3ln95/jISup9FIXdrPrJzAL+Zdid9lfatiffcBvr7ufmJ6Iq0GwLzSsfx01o3kuqwv4rv6D/KdrX8gnIymbWongMkFo/nelDvId1vfqjgYOsTPd/0PgUTQgoagNqeGb0z8KoVu69fU9mgTd+3/Ef2J3rRF0QSCYd7hfHbM/6PQbX2buC/ewlMN36Y/0WpBQ6HAXcmVNXdS6El/6mmAcLKd5c3/RH+8Hixo+F3lnFb9Wwo8oyxrxLVOdrR/ilB8G+mPGKu4lWImV9xHrifz3KsTmUzWb+fUzEeMlJIf3fMaL7yzFSBtkS9Nl+iaznf+8BJvrTXurHmsxs+eWMYjyzZa15CSHzy0lOff325JA+DXh50QqxoS+Mmzy3h05SbLGne9vYZfvbnqyBymGoefwy9fX8mf3rHe9fL+tRv4rzeWIzF3EI7W+L9V7/M/b6+yrPHE9q38+1tLDy/e5hoD7+U9G9fzn2+/ZVnjxf27+OqyF9HSOAhHazy6azPffudVw+7Mx/JW016+sOJpkrqWVmMgyfL5gzv4+qrnLRe0W91Rz+fffYSYlrSgkWJZ626+uvpxkrq1yqFbew/xT2v/QlSLW9KQwNqufXxt3X3E9cG7TB/LgWAT39z8W8LJiKXOuhLY0V/P/9vyf0S1wZI7j6cl0sqPd/zMkhOS0pA0hA9x545fEDFp9Hc03fEOfrf3ewQsOCEDGh2xZn6393uEktYa7QUTXTx+8F8sOSEpDZ3+RCuPH/wXQklrjeViWi9vNn6WQPwg6ZyQAY1IspM3G+8glDDuMn00ST3AtrYbCcV3YK3OiUZC72Fr63VEEsbdmT/pOI7IR8zTb23mpZU7yCQOJUnV7PjOH16kqcO4Y+QAL6/ZyeMrrC/2R/P9B19jX3NnWru3tu3jz4edkEz5r2ffZNuh1rR27+1v4FdvrMxK479ff4fV9cbt4wfY2NzCj15flpXG71et5q296S8eOzs7+Nabr2U8vwTu3bSB53fvTGt7sL+Hry57IePaJhJ4fPdWHt1l3HZ9gLZwgC+8/cwgZ0XSaUheatjJX3am/7z0xiN88b1HkDIzDR3JO217uWvXirS2kWScr627D03XjztZk05jU89Bfr87/d8yrif5j613kdCTGWro7As2cte+p9PbSp1f7Po1MS1myQk5WqMl0sqfD9yf1lZKyT0HfkZEC2Ws0Rvv5JGG31qyf7HpR4SSPRmVoJfohJLdvND4n5bs32v7HuFka0bl4SUacT3AOy3/YslZ39f1/4gk9pFZITQNTYbZ0f5ppLRbVv7jieOIfITouuSBl9ZkVQhLylRE4Om3zB0MKSX3LF2bdVl4ATxmwYm55621WZ+gUITggbc3pLX7y6p1WVf+VBXBPSvXpbW7d816FCtFTQZBEYI/vZ9e4/7NG7MuRq0Iwd3r0y/gD+zYmMrNyUJDAHdtXpP2Qvvo3o0kdC3repZ/2vE+WpqIxTMHNxBOJjJavAeQwP373ieumUcsXmnZSF8inKWG5KmG9wknY6Z2qzo30RXvy2jxHkBH8nrbavoTIVO7Tb1baI91ZKmhs7p7Ld0x82jC/tAOWqINWWvsDGykI9ZsatcW2U1zZFtW/WMkOs2RbbRFzKPF/fF6WsOrstTQ6I3voSNqfs2KJVvoCr+IlWjL8WhEkwfpiSzP4rEffxxH5CNk7Y4Gmjv7s76Q67rkmbe2EIsbX2i31reyt7kzo4jL0Wi65Ln3thOMGF9o97Z2sqG+OeveMZoueXXjbrqDYUObxp4+3t5Tn3U/FE2XLNu9n+befkObzlCIV3buSbvlY4QuJasPNbK3s8vQpj8W4+md27J+HbqUbO1oZ3ObcQQpmkzwyM7NWWtI4EB/D++1HDK0SegaD+xeb6tfUFskyFvN+wzHdSl5cN9qW83f+hNRXmveYTgupeTRg6tstcmL6glead5oavPXphW2Gv5pUue11vdNbZa2vZl12fkB3up423R8ZeertjQUFN7tfN3UZlPPC7Y69gpUNvc8b2qzr+9p2xp7+54wtWkLPkr2/W8AVFoD6aNUn0QcR+Qj5NnlW1LJpzYIRmIsX7/XcPyZVVtta8QTSV5bv9tw/OnV22xraFLy/DrjBePpDdts16wQQvD0hm2G489u3TEEXVUFT2421nhhz07imr1wqyoEj23fYjj+Sv0egglrOQVmGo+YbM8sb95PV8zYcbSq8fAe47vKNZ31NEfSbz2aoSB49IBxftCO/iYOBNtt/d0F8NQhYyehMdzGzkB9VhGXASSSF1veMRzvjnWzpW9bVpGKozXeaFtmOB5OBtnSt9qWho7O+90DRdOOJ6FH2dn/hq1uuhKNHf1vkNCjgz8HmWR//19tazQG3ySmDf75lFLSGniI7KIhA2j0Rt8mlrSWj/JJwnFEPkLqW7qzvvseQFUEje3GF+v6th77GqrCoY5ew/GGzl77GkLQ2GX8Ohq6+2w7CULAoR5jjYM9vbadHV1KGnpN/h69vRmdSBkMTUrqe3sMxw/29+KyWQRMk5J9vcZh+oOBHtvvlSYlBwLGGg0h49doFR3JwaCxRlPYWmKjGTLNPM3R9DlWVmiPdhtul7XHhkYjkAwQ1wd3YnviHRnlbBgR06OEk8FBx0LJLjSZsK2hyYRh0mpc6yMp7TnRkNoGCicHj0zqMkpSt//ZAkksaRyZ/KTiOCIfIeGo/S+cEIJw1PjuN2QylgnhmPFzDUbN98etIJGEY8bPNRyP29oGgNRWVjhu/DrCiYRtDQlHKrYaaQzF+fh+E41QIp51TtDRBBMmGsm4ra2GI/OYRG7CQ6QRNjlxYjaWCVHN+HNl9cRLOnQkcX1wnajB3X82RLTB54oNoUZMH/yETnwINeIGGklp7XSQFZL64A6NJs3zeTJhKOf6uOA4Ih8hub7MehwMhpSQ6zeeJ89kLBPMnmu+z17JZ0g5VDleY41cr8f2HbiiCHI8xnUlcj32NQSQ7zV+P3I97iFYWlOl7w013J6sc4KOJt9jouHy2HbaAPLcxn/zHJfH1nbGALmqiYbJWCb4TOYxG8sEBQWPQQl3/xBW5PQbFB7zZlGQzAifOnhdFI8ydBoegzLxLjF0fV9cSu6gv1eFvfLxH9ZcHxccR+QjZPTwUvu5FbpOTaVxmfHRlfY1kppO7TBjjbphxUPyOkaVm2iUmpdSt4KU5vPUlRTbXlwVIagrKTIcH11UQsJibQsjVCEYU2zcRXl0YQnJNLUwrGiMKzLRKCix7SSoQjC20Lh8fV2etU7RZigIRucba9Tm2uuRBKmCXTUm84zwD02VzCp/qWHBvArfsAw62xpT5C40dHaK3WUoNhI8B/ApOeSogy+uea7SjJvQDYZLeMlzDV48zaMW2u4zA6mE1VzX4MXTVMWHW828AehgKj5X7RDM8/HCcUQ+Qi4/fZrt3IqCXB+nzhpjOH7F4qm2NfweN0tmjzccv3y+fQ2XonDRHONKglfMmmJr/gEuN5nn0imThiTv4ZoZ0wzHLxw3Ab8r85Ldx2pcN8VYY8mosRSYRDOsatwwcYbh+KlVoxnmt3cx16TkxnHGVXXnlNYwMrfY1vKqI7lu9FzD8fEF1YzPr7K1BSSRXFWzwHC82l/O1MIxtjQEcFH1yYbjRZ4iZhRNs3WiRSA4q+IMw/EcVx4zihba1FBYWHq2YY8bl+JlSuES2ydaJheeg8ugOZ8iVMYUXI6w9TpUavLOwWNSUr4y7ybsLakqxf4zP5Hl3tPxoToid955J/PmzSM/P59hw4Zx2WWXsWvXrg9T8oRm1oQRptGMdCiK4IozpuN2GX9pJ9VUMGnksKwXWFURXHrSFPxe4y2NumElzBszwpbGhbMnUphjHF6uLMzn9PF1WUdeVEVw9sQxVBQYL54lOX4unDQh+1olQrB4VA21xUWGNnkeD1dOmpK1hiIEMysqmVxufHHyqi5unDgjaw0BjC0qYW6FcR8gVVG4efxsW4trdU4Bp1SNNn4eQnDzGOMF3grFnhzOqjJvhXBt7SJb0Z0c1cOSqummNpdUn2JLwyVcnF0x39TmnIozbZ1oEQhOLz/F1GZR2RKbJ3N0Tio929RmevFFtk+0zCi+2NRmTOEVthJvJRpjC68ytanIuzbr+VNoVObfbHOOjycfqiOyfPlyvvSlL/Hee++xdOlSEokES5YsIRT6x0vGgdSF9pYL5mX5WHCrKleeYXzXOsDtS+ZlteUgSD3Ha0+zoHH63Ky3NaTEUr+ZTy2eg55tjQ9dctuiOWntbp83K+vlQpOSTy8wvvse4LbpsxAiu0C6LiWfm22+IAHcNGkmLiU7N0ECX5i+IG3fnGvHzMTncmXtjHx28oK0zuulI2eQ7/ZlrXHb2IW40zSmO6dqOiWevKw0BHBN7Ulp80BOKp3OMG9xVtEEgeD8qpPIc5nnNkwtnEy1ryprjUVlCynymHesHpUzgZH+MVlqKEwtmEept8LUrtw3mpE5M7OKWAgURubMpMxn3oQyzz2C4bmnZamhUuKdTKnP3Pn0uIZRnnsp2S2rKn73WIp8xlGwTzIfqiPyyiuvcNtttzFlyhRmzJjBvffeS0NDA+vWpa9G+Unl4lOmcOUZ5h/oYxGkLhw/+fJFVJSm7zZ5zuzx3Hp2+kX4WA0J/Ndt5zOqIn2jqlMm1fGlc0/KSGOA7199DhOHpw8/zq0dwbfOOy0rjX8//3Rm16RvQz+lsoIfnWd+x2bE109dzCl16fdzx5SU8stzzs/K4fnCnPmcN/b/s/fe8XFUV///585s16r3LlnFliWruPdece8YTEsgCRDyJDyEkAIESKWmkAQSQgIYU9x7772pWcVVlmT1Lq2278z9/bFaR7Y1s7MzgvB8f/7k5SdPNGfnvTO7e++Zc889x3sX3jj/QPx16gIAvpVUIgAezsjF0jTvS2Hhej/8Y9IyEOI7Y0lyFh5O9/6dDNDo8N6YB8AyjE+OAgOC6dGD8Hi694Fcx6rxp+GPQs345lQxIBgZmorvpHr/vqgYFr8a8iR0rManSZwBQUZAEr49YJF3W8LguUH/A4PK4CODQYIhHo8kPejVlhCCR5Ofg1EV4DMjXBuFlQlPSbKfG/tz+KvDfVqiIWDhrw7H3NifS7IfGfFLGNUJPjO0bDDGR78pqcHlgJDX4KfOAHxaamKhYgKQEfEhiMJt+P9X9bVedWenu95CSEjfE53dbkdXV9dt//5fEyEEzz00FStnuCMC3sqLswyBSsXi9R8swLgc4bD2nfrBwgn49qyRt87hjcEwDH772H2YIZIbcqe+O30UfjBnnHQGIXh1xUwsHik9/+ORMUPxs9mTQCQyCICfz5mMh0ZL7/C7PCcLv5o9HQwhXpc3PMefmzwe3xsjPbo1P30Q/jDzPrA+MJ4ZMRo/HiP9CWl6Yiren74IKoaRzPhW1jD8csw0yV2Ex0Yl4Z+TV0DLqsB6GTQ9jJUpOfj96LmSGXmh8fjnuIegV6m9XofHkZgTl4m3RiyVvFyYHhCD90Y+DqNa55XhiWVNiBiEN4auhspLxMWjeEMk3sz5AQLUfl4dHg8jNzgdr2V9DxpGWl5RuDYMLw7+CYI0QZKTV1ONA/DTjP+FjpWWVxSoDsHTqa8hWBMukUEQo0/Ekym/hF5gt8yd0qsCsTLxHQRr4iDNzSUI0cRhZeI70KvEozoeaVgjpsa+jyBtGjyPd+Ji4KeOwbS4D6BXSUtEZRkDMiPXwF+bK5mhYSOQFfUldKo4SYz/F0Wo1LabCsXzPBYsWICOjg4cP953xcBf/vKXeOWVV+76u5Q2wv8XdazgOj7bm4/z5TdvTbI8pWAYBjzPQ61iMXdcJlbNHIqkGOnttHvrVHkVPj2Yj5NllbecHp5SMIQBT3moGAZzRgzCg1OHIj1WXtb3uWs38cmxfBwuqwABASH/YVBKwRCCWbnpeGjiUGTGiYdphZRfXYePTuVjX7m7qux/GOTW1tWZg9PwyJg85MV7j4T0pYv1DfjXuXzsunT11rk5yoMBcXdgpRTT0lLw6IihGJUgb9Aoa27Ch4X52HqlHC6eB8sw4HgKj4/FU4pJicn4Vu5QTEhIksW40t6CD0suYOO1Ujg4DixhwFPa4wi4u/+Oj0nEt7KGYVqCcOKzmG50teFfl89h/fVi2DhXL4b7OEcpRkbE47GBIzArPl2yE9JbteYOfHz9NNZXFsDCOaC6jeH+bHJD4rA6ZRTui82UxWiydeKLqlPYdPMsul02qAgDjlL3E1rP5z8oIBYrE8dgdkyuV+erL7U7urC19hh21B+HyWX5z73q+a1wlEeSXzQWxkzC9MiRkh2d3jI5TdjXeAgHGg+hy2UCS1jwlHczAHDgEa2LwsyoaZgUPh5qgZ0yYrJyZpxs2YsTLXvQ5WoHA/ZWOX4CAh4cQjWRGB82G6NDp0PN+L4bxsFZUNyxA4VtW2ByNfXJ8FdFIDdkIbKD58na/uvibajo2oQrHV/C7KoBgQoUfI/LwIDCBR0bhrTAFUgNXCaaoCokntrR2P0l6rs+gs1VcYvhvg43Q82EIsr/IUT5PwQ1q3yX4DdNXV1dCAwMlDR/f22OyJNPPoldu3bh+PHjiIvrexC32+2w9yrc1NXVhfj4+P+TjgilFA6HCyzLQCWSXAoA1Q3t2HWyDI1t3bA5nDDqtUiLD8ecsRkwGsSfWmwOJ1SMd0ZtSye2nylDfZsJVocTfjoNUmNCMXfkYAT6idcksDtdYAgRTZIFgIYOE7acL0NtWyfMNgf89VokhQdjwfDBCDGKPxk5XC4QeGc0mbqxubAM1W2d6LbbYdRqkRgShEW5gxHu3/ce/94MANB42cXSarZgU0kZKlrb0e2ww0+jQXxQIBZnDUZ0gPig5OA4UEqh9cJot1qx6XIZrrS2otthh0GtRqx/AJZmZCIuQPwJz8lx4CUwOu02bLpWhkttzTA57NCr1Ig2+mNJaiaSA8UHPifHgaM8tKxKdILvdtqxtbIMJW0N6HLYoVOpEKn3x+LkTNGtugDg4nk4eQ46LwyLy4GdNSUoaa9Dp9MGLaNCuM6IefFDMDBQ3LGVyrBzThxoKEFRRxW6nVaoGBVCtUbMiMpGRqBwEi/gdiQcnAs6Vi3KcPIunGgpxsXOa+h2WaAmKgSq/TA+PA+D/BNFX8tRHg7eCR2jEbVz8S4UdBShtPMSzJwZLBgY1UaMCB6KdP800dfyPQytFwZPeZR3FeCyqQhWrhsEBAaVPzIDhiPVKO4Q8pSHk3dAw2hF7SjlUWW+gBvdZ2Hj3JFxHRuAZONIJPoNE13CoJTCSe1QE41XuybrOdSaj8LBdYKCQsMGIMowGtGGcWCI8FhEKYWL2qAiWq8Mk/0cWi174OLbQakLKiYIgboxCDZMB0OEHUJKKXhqBUO0ICLv5Zuqb5wj8v3vfx9btmzB0aNHkZwsnlTUW75cyDdBLheH4+euY8OOfJRcqoOLc3vAfgYNpk/IwOI5uUhJVLbXnON5nCi8gXX7ClBwqQZOl5th0KkxbdRALJueg0FJ8qIOHvE8xamySnxxqAhnLlXD6XJntOs1akzNS8WKKTnISoqS9QTqEaUUZ67exOfHC3G8vBKOHoZOrcLkrAG4f3wu8pJjFDMuVNZi7ekiHL5cAZvT7YhoVSwmpCfjgdE5GDUgXjGjqLYBn14owt5LV2HtYWhYFuMGJGD18FyMG5CoeJvwxcZGrCkqxI4rV2BxuqttqhkGo+Pj8XBuHiYnJYFVWEq+vLUZn5QWYtu1cpgcjluMYVGxeDQrD9OTUhWXq7/W0YI1VwqwsaIEXQ73Q4eKMMgNi8Yjg4ZjVkI6NKyyQbfS1IrPKvKxqaoIHQ53VU2WMMgMisLq1BGYEzcYWlbZlupaSzs2VJ/D5pv5aHO4k+8ZQpDuH4X7k0ZhVswQ6BUWNmu2dWBb3WnsqDuNdocJFO6lqES/SCyKG48ZkUOhVynbtt3u6MSBxhPY13gMbY4OAO7IQ7Q+ArOjJmFi+Gj4qZQVHTM5u3Cq9TBOtBxAu6MVFO5oUKg2AhPCpmNU6ET4qZRtDbe6TCju2I8L7TvR4Wi4xQhQh2No8BzkBM+En8QlHCE5uG5c69qN8o6N6HLW9EQ4CAyqMAwKXIj0wHkwqJTVqXHxZtR3b0NN11p0O68DPQwNG4ZY/2WI818BnSpKEePr0jfGEaGU4plnnsGmTZtw+PBhpKV5T7rrrf9LjsiO/Rfx/ppjaO+0gGHIXbs9WIaA4ymyBsXghadnITHO9+JNe05dwp/WHkFLh1mUMSg5Ej//9kyky3B6DhVewxtfHEZDm+nW+fpipMWF4cXVM5CV7PuP4nh5JX674SButnaKMgZEhuDF5dMwLMX3ZZCzFTfxytYDuNHcLspICAnCSwunYmyq70WECmvq8Yud+3GlqQUsIXd1v/UwYgL98eKsKZiW7vsySFlTE17YtxclTU19M3r+Fmk04ucTJ2HewIE+M661t+L5w7uR31jfJ4MhBDylCNMb8MLoiVg2MMtnRpWpHc+f3IkzjTf7ZoCAB0WwVo9ncyfgoYFDfWbUWzrxswvbcaKxQpThr9bi6YyJeCzN+06hO9ViM+HVi1twrOlyz1JB3wwDq8FjKRPw7dSJgjU0hNTpNOPtS+txrPkiCHAXw5NYrmXUWB4/CY8OmOXzkpHZZcUHFZ/hZMt597KjQCq1mlFjdtQkPJCwyOclIztnw/qaj3G29Tgo+D4ZBAQMYTE2dAoWxz3g83KOi3dgf8M/Udixp6exXt8MAgbZQdMwI/o70PhYlZanLpxveQ/lHRt79cW58zNx3/9k/2kYG/EcNKx4dPZOUcrhevufUdX1EXhqw38+5d5yMyIM05ER9ktovuHLOd8YR+Spp57C2rVrsWXLFgzsNUAGBgZCr/fuZf9fcEQopfhg7Ql8vP60JHuGIdBp1XjrpWXIGiQ9l+GjbWfx1y+Fu3HeydCoWLz17CIMz0yQzPj8UCFe//xQnz+BuxiEgGUZvPm9eZgwRHoS7eYzpfjlF/tAqfdm74S4dwb8/qE5mJkrPYl2V/FlPL9uV09o0zuDgOBXS2Zg0VDpSbQHr1zHMxu2g+Op123MnqnupdlT8eBw71ujPTpRXY0ntmyGo2cpRopemDAB3xkuPYn2fH0tHtm5ATaX866JW0g/GDYGz44YJ5lxsbUBq/d9jm6nXTLj2xkj8IvhUyU7Clc7m/Dw0TXocFjBSawyuyI5D68OnSs5WlVtbsUTpz9Ei71bMmNOTDZey1kieRJvtLXjR/l/Q6OtXXL9jrFhmfhl1sNQS0xwbXd04pXSP6De2iip1gkBQVbgQPxk0JPQSozymF3dePfqb1FrrRJ0cu5kJPul4cnU56GTWFrezlnwedXLqLVekshgEKFLxoOJv4JeJS3vw8XbsK/2BdRbL8D7qOhmBGoSMDvujzCopD1s8tSB4qYfotlySJI9wEKvisHw6I+gE6j0+k2QL/P3V7pr5m9/+xs6OzsxefJkREdH3/r3xRdffJXYr1VfbL0g2QkB3MseNpsT//vqelTXSuvWuPlQsWQnxMOwO1149u3NuFLVLOk1u89ewuufu38IUqYKnlK4XBye+9s2FFdIa1t9qOQ6Xv5iL3gJTgjgrjfC8Tx+8slOnLlaLYlx8loVfvzlrh4HQRqDpxQ/37AXhy9VSGLk36zD99dvh4vjJTkItOffK7sPYkeptIJ+pU1NeHzzJthdLp/qtfzu2DF8WVIiyfZaeyse2bkBVqd0JwQA/nThFD4slrYF/6apA6v3fQ6TD04IAPyz/Bz+cvGUJNsGaxceObYGHQ6LZAcBAL68UYC3Sg5Ksm2zm/Gd0/9Ci93kE2N3XTFeL90p2EW3t7qdVjxX+D6a7NKdEAA41VKG18u/kMSwcXb8uuxd1FubJBdco6Ao6byMP175UNK1O3kH3r/+Juqs1ZIcBA/jhvkaPqj4Azjq8mrPURfW3/w1aq2XfWDwaLLdwBfVr8Al0HG4t3jK4XD9K2iw5kPaqOhmdDpuYm/t/8Ip0ITvNntKUdr8CzRbDks6v1scbK46XGj4Npzc/xs7S79SR4RS2ue/Rx999KvEfm1qbOnC3z464vPreEphszvx5nv7vNq2d1nwxkfSBsveohRwujj8+oM9Xm27rXa8+sk+n8s7UQAcT/HSv3d7HQTtThdeXLtH6u/5dg4Ffv7pHnBeera4OB4vrNsteWC6Uz9dv+dWQqvwe6F4futuyc5UbxEAP9u+D2aH+CBIKcXze/fAycurzfnigf1ot3ofBH96dC9sLqesCqC/OnUYDd0mr3Yvnd2HbqddVvG7NwuP4kaXd2f99eL9aLdbfHJ0PPrH5ZMoa/fuSP/1ygE0200+MyiAL6vPoqC9yqvtmqr9qLO0+OTouBkU+xvzcab1klfbbXX7UW2p9blaKgXFufYinG7N92p7rPkAKs3XZDB4XDaV4EzrMa+2xR0HUGku8rlaKgWPWutlXGjb6dW2svswqs3HZDA4tNsrUNL+uVfbFutRNJi3wdeBkYKDxVmNG53v+/S6b6r+/1k9pZ+0bW+xb5WdeonnKQpKbnqNimw7WuJ1AhZjXKpsQvmNRlG7nWcuwe5wyZr0eEpR3diB/Ku1onZ7C6+gy2qXzWjq7Mbx8kpRu8OXK9DSbZHViZYC6LTasK/0mqjdqcqbqG7vlDWxUgBWpxPbSsQnjOLGBpQ3N8uuXOvieWwoKxW1udrWgnP1tbImb48+Ky8WPX6zuwOHa6/LZrCE4NMrBaI2rTYzdtWUK2AwWFshHt0xOW3YVlPgs4PQm/FF5RlRGzvnxPba07LLwjNgsKlGPGrq4jnsaTgi21FnQLC7/rCoDU95HG3eI5tBQHCkaY/ogw2lFOdat0L24AuK823bQL18nmXt62X3p6HgUd6xEbyX6M7NrjUK+uxwqOn6Ehxvk/n6b47uOSIy5XRy2LS7UHYJcsCdzLh5T5HgcY7nsW5foaIW7yxDsOGAMINSis8Oig/2UhhfHCoUtVl7rFDRzhGWIfjsmDjj01OFsvutAO68lzWnxBlrzhcq6jxMAHx8tkB0oP2ksEjRdVAAHxUUijoya8qUMXhK8UlpIZyccI+Qz64UKfrMOUrx+dUiWF1OQZt1leL30juDx+aqYnQ5hAfzHbWFcPDelwvEGPsbytBiE44gHWoqhJmTP6Hw4HG27RLqra2CNhfai9Hp9B7FEmZQXDJdx01LnaDNFVMpWh3SloP7EgVFne0mqizXBW3qrJfRbK+CrPBqjzqcjag0CzvS7fYKNNkuKupPY+PaUd19QvC4xXkTrdYTivrscLQbjebdsl//TdE9R0Smistr0WVS5olyPMX+o+WCxy9XNqGprVs547RwXkJlYzuqGtsVNXjneIqDhddubVe+U40d3Si92Sj7Cd/DOHm5CmZb38saXVYbzlTcVPSEz1OKopv1aO7q+547OA4Hr1Qo6jxMAVxraUNVe0ffxynFjiuXFV0HANSaulDW1CR4fMtV+VEEj9psVlxoFJ6UttwoVczodjpwskF4WWN7dami5nIA4OA5HG0QjoTtrruo6PyAO1JwuEk4Ena4sVByZVQhMSA41iz8Xk+15itqWuhmMDjVIrw8U9B+RlG3XjeDRX67cN5deddxMAq69XoY5V3CEaQbpkMKIhVuETCo7BZOQG2y7If8qI5HDBrNuxSe47+ve46ITLV3WvrlPF0mq+ATXVtX/zCsdicczr6f6PqLwfMUJqu9z2Nt3f3DAIB2c9+5D20Cf5ejNkvf5+q02hQ5U73Vau77nlhdLthFogw+Max9M3hK0Wnvn3CuEAMA2uz985m0WoWbZLbalTnqgHsCb7ULM1rsJoWujnv3V7sIo9XRJXs5ozejwyF8P9odnYqdNkIIulzCURWTq0tRt163KLpFGGZXp+J7xYOD2dUheNzGtSt2DCl4WFzCS+9OrlX20s9/xMPOyY9AfVN0zxGRKU7g6d9X8TwVXHrpLwYAwWiF3PyTvhl9T6D9y/g6ruO/x3B9DQwpW6clM0TeL99P1+IUWctXGnEBABDx65CbG3IHAi7R6+ifeyXO6A8Hl37lDAoqeh6+X64DovkbPDjFzo6bIbys+HVcx/8V3XNEZMrop6yioUd6vUaw8Z2/l/LuUsUyBHpt36WE+4sBAAGGvgsF+ev7k9H3uQL0vhUpEmUIvN8AXX8y+j6XUaNRHKy9xRB4vyzDQK/yvddInwyt8Gdr1PTP5x6oEb7vAWrlnwlPKQJEGdIat4mJoxQBauH6GAFq3wpg9SUKCn+RKqj+CquXukVgFGlk56fyU7w0Q8DAIFIQTMf6KY5WEDDQifSQ0TK+95fpSzpWuJqrmvGHkjwXj77phc2k6J4jIlOZA6OhYpXdPpYhGJoVL3h8YFIktBplpagZhiB3YKxgYagB0aEwKnQUGEKQkRABrbrv9xobEogwf2WDOQGQGB6EYL++B9owowHxIYGKJ/GIACNigvouvmPQqDEoMhwKclUBAIF6HVJC+x48GEIwPDZWcUl4vVqNweHClXVHx8QrSlYF3OXfcyOECyqNi0qU1SSutxgQDI8Qrqw7NjJZ8XUAwIgw4cJ/o8IGKP48KCiGhQhX780LTlWcv8FRHtlBwsUFMwPTFU/gHOUwOFC4QnaqMUPx0gwPDqnGQYLHE/2GgFeQ4Am4l00S/IQrBEfp8xQlkbpFEGUQ7gIerBvRDwwGwbqRCs/x39c9R0SmggIMmDp+kKIdFBxPsXSucClrP70G8yZkKmLwPMWKmcI/Bq1ahaUThghGZSQxKMWqqcIMFcvg/vG5igfzByfmCTpUhBCsHpOr6PwMIXhwdI5oz5aHR+RKKpQmxlg1NFu08d4juXmKclFYQrAiMxN+GuEqmI9k5Sla1mAJwcLUDATrhJ/AHx40TNGSA0sYzEhIQ5RB+Ol01YBhiq9jbEQykvyFq2AuTxip6PNgQJAVGItBgcKVlOfGjJJ9fsDtqCcYIkQdkckRo8EqbJ4WqglGbtBgwePDgsdAyyh7sPFj/ZETJFwdeKD/GOhldMTtLTWjQ1bgJMHjsX4j4aeKUMRgiAppAXMEjwfphsOgSobShNU4/+WKXv9N0D1HRIGWzMlVtIMiJjIQQ4eIl2BfOi1HESMkwIDxeeI9TpZNygZVwDDqNZgxXLwE+5LRWVDih2hULOYNzxC1WZg3GGoFDdMYQrBkmHgflbmZA+Gnkb+sQSnFyrwhojYzUlIQIqEFgpA4SvFgtngp+YnxSYgxyh/MOUrxUFauqM2w8FikBYbKHmY5yuMRLz1nMoKikBMSKzuawFGK1aniJfHj/UIwOixFNoMHxarkMaI2YdpAjA8f4nNfGo8ogKXxE0RL4htVfpgQNkL20gkBwZzoyaLvUcvqMCZ0igIGgwnh06ESKVfPMmoMDb5PdqInAYO8oFlQi/ScYQiLjKClkOskELBI8Z8JLStc2pwQgoTAh2Sd38OIMMyAVqHD9E3QPUdEgQanR2P8yFTZ0YSnHpnk9bUp8WGYMy5DdofYZ1ZN9LqEFBsWiOWTcmRPGN9fNF5wWcajsAA/PDpluEwC8L1Zo2HUiT9pBeh1eHLqaNmMb08cjlCj+BKSXq3Gj6ZI77PSWwTA6hG5iBVY+vFIzbL4yYQJshlLBw9Gaqh4nwuGEPx8zGRZDIYQzE5OQ47IsgzgHmh/NnyqbMaE6CSMifLejPDHQ6bJmi9YQpAXGofJUd6bcT49cDoY4vvCBksYDAqIxowo732MHkmeCRXx3d1hCYN4QzhmRA3zars4bjY0jNrnJRoGDMK0wZgeOd6r7dTI+6BjDT47IwwY+Kv8MTF8hlfbEaHzYVAF+uyMuHND/DAqbJFX24GBC2BURfq8jZeAgYpokRPi3cmIMS6EQZ0kY6swASEqDAh+ysfXfTN1zxFRIEIIXn52LgalRPm87PD0o5MxaYy0Rm4/+/YMDB0U57Mz8sSSMbhvvHAYtbeeWzkZ44ck+zwIPjRjGFZMltbI7Zn7xmF2nvTmdR4tGzME35omrZHbdyaNwLLhvneHnZczCM9MGyvJ9qHhuXh0lG/dYQmAqekD8NMZwuHg3lqemYVnRvnmVBEAY+IT8Ovp3gdyAJibMhA/Gy3t/XjEEILciCj8Ydp9kuynxKbg1VEzb70/qYxBQeH42+TFkr7zI8MT8bvhC0B8YLCEINEYgvfH3Q+VyFKcR0OC4vC7vBU9fVylUVhCEKkLwLsjHoKG9Z7rNcAYjdeGPAaGMD4wGASpjXgj97vQs96XRKL1EXgh4ymoCCuZwYCBn8qAFwf/D/xU3nO9gjWheDL1eagYtWRnhAEDDaPF02kvwF8tnODpkZ8qCA8kvgYNo5PsjLgdBA3uT3wFAWrvncm1rD9mxb0DDWOU7CgQMGCICjNiX0eAxnvXcJYxYGjUB1CzIT44IwwIWORE/Bn+Gt/H02+ivtLuu0r1f6H7LgDY7E689oedOHr6ap8t5z0ihIBlCX785EzcN9W3ydLhdOHXH+zD7pPlogyGEBAC/Gj1FCyfkesTw8Xx+P3nB7Hh6EWvDAD4/uJxeGTmcJ8cJI7n8c62Y/j4cL5XBgXFd2eOxpOzRvvEoJTi3QOn8N6hMyA9Lez7kqdV/LcmDMOzMyf4FNmilOLvJ8/hncMnAcAr44Fh2fjFrCmSJr3e+qigAL86chhUAmNJxmD8ZsYMaHxcnvry0kX8/Og+uHgquGXRw7hvQDrenjoHOh933Wy9UYbnTuyAk3cn5/VFYQkDjvKYHDsAf5m4CH5q31rCH6i7jGfPbIKNc3pljApPxF/GrBDdLdOXTjVfw3P5n8Pssgt2qfYwhgTF4Y/DVyNE69uOmOKOCvyi+F8wuSwgIH1+JiwYcOCRYozB73IeR5jW++TdW9dMlfjdpb+i02kSZDBgwINHrD4KP8v4PiJ00rrJelRnrcZfr72OTme7IIOAAQWPUE04nkr9CSJ0vnWTbbXX4vOql9DhbLx1LiGGvyoU9ye+gghdkk8Mk7Mee2ueQ6ezyitDxwZjZuwbCNMJJ9v2JZurCQWN30W34xIIWIEkVvc3TsUEIjfyLwjWeY+A/Tfly/x9zxHpR12+3oCNuwqx90gZXK7bv6wRYf5Ycl8e5k7LQlCA/B0kV6ubsfFAEbYfK4XDefuXNTTID8un52LBpCyEBsnfDlhR34r1R4qx5UQprI7b98EHG/VYPjkHS8YPQUSw/O2AVc3tWHfyIjacugiz/fZqqQF6LVaMy8GyMUMQEyL/c69t78QXZy/iy7PF6LLdXmzNqNVg+YghWDkyGwmhQbIZDV0mfJF/EWvzi9F+RyE0g1qN5XlZuH9oNlLCQmQzms1mfFFyEZ8UFqLZcnsBMZ1KhWWZmXgwOwcDw8JkM1qtFnx56SI+KilAg/n2olgahsXi9MF4KDMXWeGRshkddivWX7+If5dfQI2587ZjKobBwqTBeGjQUOSERsteijQ5bdhcdREfXzuLqu7bi0mxhMHsuAw8mDIcw0LjZTMsLjt21hZjbeUpVHTfXkyKAcGUqAzcnzgKw0OTZTNsnAMHGwuwseY4rnffXr2WgGB0aAYWx43HsJA02XklDt6J06352FV/CNe6765emxM0GHOiJiM3OFP27icX70JRxzkcbd6LCvOVu46nGQdjUsRMZAUOlZ1Iy1MOV0xncL51G6osd1eWjdNnYETofAz0HwOWkZffxVMONebTKO/YgFrL2buOh2kzMDh4GZKMk6GSmaxLKY9W60nc7PoULdYjuNPNNWoGISHgIUT5zQHLyM8h+7p0zxHpR3Ecj7KyWrQ0m+BwuOBn1CEtNRKRUcJPIKZuG65UNMFktkGtYhEUaMCglEiwArkaPE9RdqUOTc0m2O0u+PlpkJIUjtho4f3h3VY7Lt1ohMlsB8syCPbXI2NAlGA+CKUUZdcb0NDSBZvdCT+9FgPiQ5EQLTxBWmwOlFU1otNsA8swCDLqkJkcJZgQSinF5eom1DR3wmpzwk+vQVJ0CAbECD9J2RwulFQ3oNNiAyFAoEGHIYlRortKrtQ2o7q5A2a7A35aDRLCg5AeKxxqdbhcKK5pQIfFBlAgyKBDVlwUdCJ5LdcaWlDR3A6z3QGDRo2E0CAMigkXnFicHIfiuga0W9zVV4P0OmRFR8Igktha0dKGa82t6LY7YFCrERsUgKyYSEGGi+dR3NCANqsVLp5HoE6LrIhI+IvU8qhsb8eV5haYHA7oVCrEBPgjN1p4oud4HsXNjWi1WuDkOQRqdcgMi0CgVjhyUNPVibKWJpjsDmhVLCL9jBgWLbwFmacUJa0NaLaZ4eBcCNDoMDg4UnQHTr3ZhIstDehy2KFhWUQajBgeESu4w4lSirKOBjTZumHnnPBX6zAoMBKhOmEHvcnajeK2OnQ6bFAzLMJ1fhgeHg81I/x9v2JqQJPNBBvngL9ajxRjBMJ1wknArfZuXGyvRZfTChVhEaL1w7DQBKhFkjNvdNej0dYBG++AUaVDoiES4bogQftOhxklndXoclrcSzcaP+QEJUPLCn8Xayz1aLa3wsbZYVDpEaOLRLhIBKTbZcGlrhvocprBEIIAtRGDA1KgY4WjWE22erTYm2DnbdCxeoRroxCmFU60tHFWXO2+im6X2zE2qoxIM6ZBxwp/T9odDWhz1MLBWaFh9AjWRCFEGyto7+DtqDKXwexyV7Y1sP5I8BsEvUgdk25nAzod1XDw3VAxevirohGkTRK0d/EO1FuLYeU6wFMOOjYAUfpM6EQSWW2uBpid1+HiTWCIHnpVNIwiyzA8daLVlg871waeuqBhAhCsGwItK/8BSKnuOSL9oM5OC3btLMbmTefR3Hx3ueGRIwdg0eJhGD5igKCD4U2mbht27S/Bxu35qG/svOv4sJwELJk3FKNHpMiuWWK22LHreBm+3F2Am/Xtdx3PHRSHZbNyMWl4KlQqeU8kFpsDu89cwuf7C1BRd3fTrawB0bh/Wi6mDkuDxktSq5BsDhd251/GZ0cLcanm7h4qGfEReGBiLmbmDYROZu0Vu9OFvRev4tOThbh4s+Gu42mRoXhwXB7m5g0SdTDE5OA4HLh0DZ+cKUT+zbv7tCSHBuOhUXlYkD0IRhEHQ0wunsfBa9fxcUEhTlffvOt4QlAgHh6ahyWZg2UXaeN4HkeqK/FxcQGOVlfeFXSPMfrj4ew8LM/IRIheXgSQpxTH6yrxUXk+Dt68fhcjUm/Ewxl5WJGejXC9vAggpRSnm6rwydUL2Fd75a7lr1CtAavThmFlSi4i9fJ2GVFKUdh2E59VnsXeutK7tjQHqvVYkTQCyxOHIdoQJJtR3lWDjTdPYl9DEVx3VO00qnRYEDsKi+JGIdbg2xJLb13vvomddcdxqOksnHdU9NSzWsyIHIM50eMRZ5AfOau11uBw00EcbzkOJ709WqphNBgXOh6TI6YiVu89B0NIzbZanG3bjfNtB+C4o3utiqiRGzwZo0JnI0afLJvR6ahFacc2lHVuh4O/vbw/AxXSAqYhK2gRIvW+LeP0lsVVj8qu9ajsWg8nf/scQsAixm8mkgNXIESbKzs6J1f3HBGFOnK4HL/9zTa4XLxgHxiGIeB5irT0SPzmtysREuLbQHjq3HW8/PutcDhcAO17rdnDSEoIxRu/XIaIcN/uwfmSavzkrS2w2hyC9fs8jNjIIPzhp0sRFxnkE6PoWh1+9KfN6DK7Ixp93S6mJ08jMsQf7/5oCZJFIiR9qay6EU+/vxltJgsYgj7reHgYof4G/PXJxRgU59uWtmsNLfjOPzehsav71rnulOf6Ag06/O3RRchJ9G09u6qtA4+v2Yib7Z3CjJ7/9tNq8O7KBRidLFzwri/VdXXhsXUbcb2t7VZehxBDp1bh3QXzMWmAb4Ntk7kbj23biLKWZkGGh6NmWfxhxn2Yk+pbUl2bzYLH929EfnOdKIMBAcsQ/H7cHCxJ9b4zpbdMDhu+d3wDTjdViTN6dsv8ctgsPJDqW5KyxeXA8xfW4UjjlVu5I0IMSoH/zZyJhweM8WnSsHNO/KrkCxxsuijO6Mlj+HbKDDyaPM0nhpN34S9XP8eBpjNeGTx4LI2bjoeT5vu0bMRTHp9Vf4pDzQdunUeMMSV8GlYlPOgzY1/DpzjavMkLgwUPDkODp2JR3PfAEukPN5RSXGj9BGdb/yWYUwLgVi5Iqv8UTI16ASpGek4UpRTXO9egtO3tnvwbcUakYSKGR/weqq9xSeeeI6JAu3YW4c03dkq2Z1mC0FB/vPvXRxAaKi1n4uCxS3j1jW0A+p6472IwBAEBerz31mpERUhLSjuefx0vvLUFPIWkNuksQ+Cn1+Ifr60SXa7prXPl1XjmnY3geSqp4BPLEGg1avzzhZVIi/eetQ4ARTfq8MS76+F08ZIZapbFP55ZhuwkaY5CeV0THv7bl7C7XJJqtjDEPfn9/dtLMDJFmqNQ0dKG+//5OcwOh2QGIcBf71+ISWnSHIWbHZ1Y9uln6LBaJRX5InAnUP9h/lzcN1Cao9DY3Y3F6z9Fk9ksmQEAr0+bhWUZ0hK0W20WLNm+BjXdnT4VK3t19HQ8nCHNUehy2LBi/8eoMLX6xHg+Zwq+myFeE8Qji8uBb538F8o76n1qNvdE2kT8IGOaJFsH78KPLnyAoo5Kn3qjLIsfix8OXCDJGXHxHH5V9nfkt5f7xJgeOQo/SHtQEoOnPN6//ldc6Dgv+fwAMCxoOL6b8pQkZ4RSio017yK/Xbgj7t0iSPfPw+qkn0rKYaGU4njTu7jYsdEHAkGUfggWxL0BVqIzUt72Lq50fCCZATAI0mRgXMwHX5sz4sv8fW/7bi8V5FfirTd9a6nMcRQtrSa88PwXcDq9l+stvVSHX721wx0Fkfib5niKri4rnntpHaw2h1f7a1XN+PkftvU01JMG4XgKs9WOH/xmPUxm751Zqxra8eyft4CT6IR4GDaHE0+/vUFS19+6ti48/d5myU6Ih+FwcXj6vU1oaBfu4OlRi8mM73ywETanNCcEcC8ZcDzF0//egqqWDq/2nVYbvvXJBpjt0pwQD4PnKX7w5TZcbmzxam92OPDIuvWSnRDAHYWjlOLZ7TtRVF/v1d7OufDI1g2SnZBbDAA/ObgXp2vuXia6Uy6ex7f2bfDZCQGAl0/vx6Gb172/J0rx5PENPjshAPB60SHsqC6TxPjJhfU+OyEA8I+rR7GpukCS7W9L1/vshADA+psnsf7mSUm2f69Yj/z2Mp8Z+xvP4MubeyXZbqxd77MTAgAXOs5jY+16SbaHmzb46IQAAMUVUwG210qb9C92bPTJCXETKOqtF3Go4Q1J9lWmzT46IQDAo8NRjgtNP/XxdV+P7jkivfTPfx6RVf2T5ygqKppw7Oglr7YffnpcVvdTjqe4WduOfYfLvTM2noKL873hN8dTNLWasO1QiVfbj3adhcPpkuzoeMTzFB0mK9YfKvJqu+ZQPix2h8/ltXlKYbY58OkR74P52pNF6OhJMPWVYXe58K8j3gfPdfkX0WiSPnl7ROHeUv3+sTNebTeXlqO6w/fJ27Mt+E8nTnm13XntCi63tcguqf7WmRNebQ7evI6ilnrZjN9fOOr1O3mqqQqnm6pkM14vOuT1+1LSUYvDjZd9dkI8+mP5Prh48QebG92N2NtQILtL7D+u7YWdE+4OCwCNtlbsqj8uuzXblzf3wOKyitp0Obuwt2G3TAKwt2EPupxdojY2zozDTdIclrtFca5tL9odd+em9ZaLt+Nsy79kM66Y9qPNfvfupd7iqRNlrX+SyeDRYDmMdpv38f3r1j1HpEfXrzWivKwOvMxS5wxDsGnjBVGb2vp2nC+sks0gBNiw7YLoQNvc1o0j56/JZlAKrNtTIPr6LrMNu06Vyy49z1OKdYcK4XIJD7QWuxObTpXIZnA8xYaTF+/aftxbDheHL04Xye4jwvEUW/LL0GUVjiBxPI81Zwt9dthuvZ5S7Cm/ipZus6ANpRQf5RfIrozLU4qjNypR03l3wnRvfVRUILtfEE8pztfX4kqreHTn3+UXZDexowAutTejsEU8uvPxlfOKmvHVmDtxqrFS1ObzG2cVMVrtZhxpvHu7a29tqjmtiGHmbDjYWCxqs6fhhKJmfA7eiUNN50Rtjrccle1MAe4GdsdbjoraFLQfgYt6jyYLi+Bc2z5Ri2umQ3clpfpGYFDasUXUpsFyFA6+TdRGnMHiRteXsl//VemeI9KjrVsLwLLKmsuVldXi+nVhr3nrriJFzeUoBSqrW1F66e7dFrcYhy4q7izd0NKFsxeFPfNtJ0rh4pV12Gw3WXG4UDiMvvvCJVhEnAgpMtsc2JMvPJgfKL3m3tKrQE4Xh635wlGq49er0NDVLXhciigF1heUCh4/X1uLirY2RR87Qwg+KxKelMqam1DU1KC4Gd+aEuFI2I3ONpysr1bcxO6TcuFIWIPFhAO1VxU24yP45KrwQ0eHw4KdtRcVMRgQrK0QjoRZXHbsqD2niEFAsK5aOErl5F3YVX9CdlTHo611RwQdcZ7yONi0X6EjQnGw6QB4gXtBKcWplh2yz+9m8DjbugcuXnhMKm7fpKi7MQWP8s5dcPLCEaSKzs+gZNqm4FDTvQsOTvyh4+vWPUekR0WFVeA45Xm7ZaU1gscKS27KjlR4xDAEF8trBY8XXa5RNFkAAMsyKL4iwrgm7AhJlYplRM9TcKNOUddhwJ24Wlghwqis87nS6Z0ixH0eIeVXK2fwlOJCtfD36nxNnewogkccpTh7U5hxrr5WYY9QN+N0rXCeyIUm5d8rjlKcbqgWPF7UWqt4YuUoxdlmYUZZRz1cChwEwN0or7BdmFHR3QCbyKQoRRQUl021gktAddZmdLu853J5U521CWau78m13dGODmeHYkaHsx0dzrvLEwDuZZlWh/ccKG+yct1oc9y9rR8AOOpCi/2KIocKAFzUhjb7DcHj7bYiQGCHjFRRONFh977E/3XqniPSo+5uu3cjL2JZApNJ+Anb1K3s6RtwP7mKnadThC9VBOLvtbPbKjnRVkiUUnRbhO+5yWpX1HUYcEepTCLLJiabXfaSyS0GBTpFoiomm/LvFQC0izHstn6pEdAhcq+67HbB4mG+qNMmwnDYFC0D/Oc8wve8y9k/n0e3UzjMb3Ip/w0CgIPn4OBcfR7r7ieG2LnMXnI7fGP07dBYOOWOjkdmAYZVwXLJXefi+o5uOrj+Y9j5vhkcdYCHMufTIxfvPZH/69Q9R6RHcouS9RalgEotvMVLblGy2xgA1CKFx8SO+aKvmkEIgUolfD/UDCMrcfhOhth7VTGM3C7ft0kjxuiHzxyAaP8YoUq3/ctgFDtt7vOIX4fSaAUAwUqo3o75IrEol1pmqfK+JOT8qfqRIXRPlEbybmMI1OGQW9a9LwndE9bnzrbCEqonwvTjdTAQYPTjdRAiryDjV6V7jkiPQkLl92bxiOcpgoOFzxMaYlQ8ufI8j+Ag4UqVYcF+shMKbzEoRUigCCPQT/GyCaUUIf7CjJAAP9k9NDwihCDEKMwIFeFLFcsQhIox/AyKl8oYQhDhL/K9MhjAKczZYQhBuFGYEa73U5S7Abh9vgiDCEOk/LovChOp5BrWT4wQrchn7mOTOyEFqHWCyaghWvl9nnpLTVQwCHTtDVL3T+0mAgJ/dd/3JKCfGADgL3Aug8pfUe5GbxlVQX3+XcP4gemnyd2g6ru1ByEs1Iy86r536r9Z+r0v3XNEejR9eqZiJ0GtZjFmTKowY1KG4iUNQggmjEkTZowZpHji43mKKaOEC1zNGDlQ8bIJx1PMGDFQ8PisvHTFkyvH85g1VPg6ZmWn98t1zM4Rvo7ZmenKPw9KcV+WyL1KT1O8NMNTigUZwqWmpyUPgLofnpAXDswQPDYhNgkGH7v63ikGBItShCusjgiPR5CPHXfvYhCCRYnCxdmygmMRIdJvRopYwmBuXLbg8WS/SMQbwhRNryxhMC0qW/C7E6ELQaoxXtEkzoDBqNAh0Ag0mzOqjBjknwFGwVTEgMEg/wwYVX07Z2pGi0EBIxQxCAhi9akI0vRdiJEQglT/ySCKohYEQep4BGuSBC3ijPcpZAA6NhLBWt+qEH/VuueI9GjmrCGye60A7qWd6TOyYDQKD3LTJmZAr5M/0LIswaSx6QgV6Xo7aUQqAv3lV85jGILROUmIFSn1PiYzCZEh8gdahhDkpMYgNU64W2zegBgMiAyRPQQSAqRFh4lWV82IicCQ+CjZESQCIDY4AGNSEwRtEkOCMHZAgqJk0lA/PaYOTBE8Hmk0YkZaqiKGUaMRra4apNNjYXqGIoaGZbF44GDB435qDVakZStiEAKsTBsieFzLqvBA6lBFUUNKKVal5gkeZwmDVcmjFOW7cJTHyqQRgscJIVieME72+T2MpfFjRW3mx0xSlIDJg8e86ImiNlMjpguWWpfKmBYxXdRmdOgcRQwKijFh94naDAlaBArvRS3FKNnBS0QfKpIClitkEAwIvB+kH5eS+kP3HJEe+fvrMX1GluzttRzHY+Ei8fLSOp0a82ZlK2BQLJ4rPAAC7vyNpTNy5Nd74CmWzRRnMAzB/dPyZEeQeEqxcpo4gxCCBybnyR4CKQUemOS90dPqcXmKIhYPjsvz+nmuHpkre1mDIQQPjMj1ul7/UJ4yxv05Q6BTizvJDw2Rz2AJwdJBmQjw0shv9SBljLlJgxDmpQHeqpQ82VvcWUIwOToFsX7irRaWJMh3dljCYGhIAlL8xfslzY4eCg2jluXuMCBINUYjI0C8cdz48KEwqvSyoiIMCKJ0YcgOEm8fkBOUi0BVoCwGAUGgOgjZQbmidgOMQxCiiQSRNeUR6Fg/ZAWKO20RugyEalNkM1REh/SAGaJWAZpUhOjyZEdFCFgk+C+U9dqvUvcckV564juTERERIKueyIOrxyItLcqr3aOrxiEuJlhWjsXS+UORk+W9t8nq+SORmhjuM4MQYN6kTIzN897bZOW0XOSkxvjsVDGEYNrwNEwf7r23yeLRWRgzKMHnAZ0hBOMzkrBglPfw4305AzE9M8VnBksIhiXHYtUY4fC5R1PSB2BRTobPwyxLCAZHReBbY4d5tR0VH4fVuTmyGANCQvD0mNFebbMjo/Dk0JE+EtyMWP8APDd6vFfb1KBQ/HjoBFmMcL0ffjFyilfbGL9A/GKo+BO0ECNQo8Mrw2d7tQ3R+uGlnAU+MxgQGFgNXsn1Pln4qXT4RdYKn30qAgINq8aLWSu9OuoaRo3/HfiIjwQ3gyUsfjzoEa8MlrB4YsD3ZDsiTyR/12vSK0MYrEj4UU/emY/jIoAV8T+E2ksfGEIIpkX9FCzRyLgWimnRL0DDes8vygv7JViih5zpOzf8xW9cfghwzxG5TYGBBrzx1iqEhvr75IwsXDQUj31LPPzokdFPi7deW4HoqCCfJvHZ07Lw9Le9D7IAoNep8c4LS5EUG+oTY8rIdPzk8RmS8g00ahXefmYRBiVESJ7ECYAxWYl49fE5kt6XimXw1rfmI3dAjOToCyHA0NRYvPGtuZJ2rDAMwe9X3YexaQmShw6GEGTGReLPjyyARuW9KychBK/On4Hpg4SXV/pipEaE4u8PLoLeS6TCw3hx2hQsHCycg3GnWEKQGByEfy9fAn8vkQqPnhszHg9m5fjEiDb6Y82i5QjWS1syfCp7NL6bJd3hYQlBmN4Pn85eiQiDtCTOR9JH4IdZ0n6zHkagRoePJz/gNRri0eKEPPw4cxYAaVMfSwj8VFq8P+YhJBmFly17a2pkNp7PWAIicepjQKBj1Xgz9zGk+ktrCjk8JBM/GvgQGIkUBgRqRoWfD34C6f5JkhiDAjLwvZSnwRJWEsPj6Hwv5WkMCpD2nY83pGN10gtQEZWkqIX7ahksi/8BBgZ4fxgAgDBdCubG/RYs0UqMjBAABJMin0WK/yRJDKMmEWOi/wYV4ycxMuK+n1kh//uNjIYA97rv9qmODgv+9Mc9OHb0MgDcVYSMYQh4niIoyICHHxmPBQuH+pwsaOq24Y/vH8DBo+V9dsj1MAL8dVi9YjRWLBzuM8NsdeCPnxzCrqNl4Hj+rkRZTyt6o0GLB+cNx8MLR/kc4bA5nPjjuqPYcrQEzp6S7b0xDHHX2jDo1Fg1fSieWDDG5y2tDqcLf9p+Al8eL4bD6bqLQXr+t06jwsrxOXhm3jiftxi7OB5/2XcKa04UwOJwgpDbmxJ67opaxWL5yCF49r4J0KmltwYH3EtS7x09gw9PXUC33XHr/vcWIe5tk4tyBuOFWZPgp5HeGhxwf48+OHce7505i06bvU8GQwgYQjBv0EC8NG0KAnS+JW9SSvHxxUL86dwptFmtYAm5aznF45zOSUnHK5OmIlRkJ4uQvrhSjLfyj6HJahZkUEoxIyENr42ZjkiD73lLWytL8UbxIdRZuvpksD33b1J0Cl4dPluyE9Jb++vK8FbZXtRY2sES5q5qqJ6/jQ4bgF9kz0OiMdRnxsnmS/jzle2otjSLMnKCkvBcxmIMMHqP3t6poo7L+Mf1Daiy1IMBc1fOBQsGHHgM9E/C91JXINUorTN1b13rvoq11WtQbanqk+H5W6IhEasSViPVKJy4L6Ray3Vsq/sHblquiDIidQmYG/NtpBiFc46E1Gq/gaONf0C9tRgE7F15HZ6/BanjMS7iaSQaR/nM6HZWobj5N2i2nRFlGFRxyAz9EWL8pHV07i/5Mn/fc0RE1NJiws4dhdi1qxjt7Wa4nBz0eg3S0qOwePFwjBmbqijBFQDaO8zYse8iduwtRktrN5xODjqdGinJEVgyLw8Tx6RDLVKbRIo6TVZsP1KCLQcuorG1y83QqpEcF4pls/IwdVQ6tBrfJtU7ZbLYsONkGdYfLkZ9SxccThe0GjUSo4KxfGoOZo8cBJ1W2Y4Is82BHefK8cXxYtS0dMDudEGrViE+PAgrxmdj7vAM+Ol8m7jvlMXhxM7CS/j8VBEqW9phc7gZscEBWDk6GwuGDoa/Xlr0QEg2pwu7Si9j7bkiXG9ug9XphFalQlSAESuGZWNJXiaC9Mp2dthdLuy9eg0f5xfgSnMLLE4nNCoWkUYjVmQPwfKsLIT6Kdu+7OQ47LtxHR8XF6CkudHNYFmEGfywIiML92cOQYSfsm2mLp7HoZrr+Kg8H4XN9TA7HVAzLEJ1BixLy8Kq9BzEGJWNDTylOFpfgU+unsf5lpswO51QMQxCtHosShqCVSl5iDcGKWJQSnG6pQKf3TiLcy03YHG5HdFAjQFzY4dgRdIIWQ7InYyijhtYf/MkzrVehcVlByEEASo9pkblYHHcaCQbIxUzLpsqsaPuKM63l8HisoEQwI/VY2xYLu6LHo9ko3jeiRRVmm/gUNMBFHYUwNpTlVXP6pEblIcpEdOQ5Od9+dib6q03cKZ1D0o7T8HGmUEBaFk9BvoPw+jQOYg3pCvejdZmr0Rpx1ZcMx2CnTOBgkLNGBBvGIYhwYsRrRfetSRV3c4qVHatu1W6nYKHijEgTDccAwJXIUw3sl8KHvqqe46IgMwmG/ZvL8SlizXo7rJCo1UjONSIyXOGIDM3weuHRSn1amOx2HFgbwlKi2tgMtmgVrMICjZg0tTByB2a2C8Mm82JA0fKUXTxJrpMNqhYBkGBekwcPxDD85K8RjWkMOwOFw6duoxzxVUwddvAMAwCA/SYOCIVo/OSvRaAk8JwujgcOn8Vpy5WoqvbBhAgyKjH+NwBmJCX4jVyIonBcThSXIHjJTfQ0e0e0AL9dBg7OAlTc1O9Rk6kMDiex7HyGzhUUoF2swU8BQINWoxOT8TM7DRovUROpDB4nuLE9SrsLbuKdosVLo5HoF6HUcnxmJOVDr1G3MmTwqCU4nTVTey8dAWtZitcHIcAnRbD4mOxIHOQ1+iMVMa5ulpsu3IJzRYznByHAK0OeVHRWDRosNdkVqmMgqZ6bLlWhkZLN+wchwCNFtnhUViWnoVArbiTJ4UBACWtDdh44yLqLSbYXC4EaLQYHByJZSnZCNWJO3lSGZc7G7Gpqgj11k5YXU4Y1VqkB0RgaVIewnXiTp5naPfGudHdhG01F1BvbYeFc8BPpUWyXwQWxA1HpF48AiSVUWtpxp6GM6i3tcDK2WFgdYjVh2NW9ChE6cQdMKmMZnsLjjSfQL21EVbOCh2rQ6QuHJPCxyNKJ574K5XR4WjFmbbDaLTVwMZZoWF1CNGEY1TIZETqYvuFYXa14WLHHrTYK2HnzFAzOvirw5EVOBPhOu8OmKQ5hGtHRdd2tNuvwMl3gyVaGFQRSPa/DyE64W38vuieI3KHaqtasf7jE9i/rRAupwuEuJc9CAEYhgHH8UgYEI5FD4zBrEV5YGVEORrqO7Dus9PYs7MYdpvz1tJKb0ZMXDAWLx+BuQuGyopyNDWbsG7TOWzfXQyr1XGLAbi3D3Mcj6jIACxZMAwL5+XJinK0dZjx+bbz2LKvGN0WO1iG3Kq14WGEhxqxbHYels7Jg15GBKKj24rPdudjw8EidJlttzMYBhzPIyTAgOXTc7FyZh6MMiIQJqsdaw8V4MsjhWgzWe9guP//IKMeyydk48GpQxHo53sEwmJ3YO3xQqw9XojmLnOfDH+9FivGZOOhiUNlFVCzOV347GwR1pwuQF2nCSzDgOfd9Uc9Swl+Wg2WD8vCY2OHISLA9wiEg+PweUExPjpXgOqOTqh6PoPeDL1ajWXZmfj2qGGIDfT9t+jieXxRehH/LszH9fY2qHqWCjwMnlJoWBZLMjLxxNDhSArqu6iTmHhKsf5KCT4sOY9LbS23MTzLOGqGxaLUDHwneyRSg32PQFBKsaWyFB+Wn8PFtobbGT0LeAwhmJeUge8MHo2MYPEJUIixp7Yc/752GoVtNWAJA/4OBggwKyYD30ofiyHBMT4zAOBoYznW3DiGgvbKHgYFBb1t2/HEiAysHjABOcGJshjn28qx/uYhFHS4l0Foz39Iz3948BgRkoHl8VORE+T7MgsAlHVdxva63SjqLBFkZAVkYG70TGQHCdd/EdMN82UcbNqG0s4L8CwG38lI8cvA5Ih5yAqUlk9ypxqsV3CudR2umI7DvdhMQMHD3ZeXAQ8OMfrBGBayBOn+42VFOdrtV1HevgZV3ftunRs9/03AgIJDiHYQBgatQpJxlqJIyj1HpJcKzlzHKz9cC6eDA8cJ7yP35ASMnJCOn72+Ajq99Am2pPgmfvHjL2C1OcCLNM7zfKY5eYn45W+WwU+k5siduny1AT/+xTp0d9u8Ns4jBMgYGI3fvrIMgQHSa4pUVDfjh6+tR3unRQKDIDUxDG/9fBlCRarJ3qnqhnY88/oGNLabvDIYQpAQFYw//3gpIkOlr//XtXbhqT9vxM3mDq9bcxlCEB3ij78+sxQJEUGSGc1d3fju3zfhekOrVwZLCEL8DfjHd5ciJUr65NdutuK7azahpK7RayE8lhAEGnT4x8NLMDha+uTXZbPhe+u34vxNd5NDMQxLCAwaDf65YhHy4qRPfmaHA0/v3IZj1ZWSGFqVCn+ftwhj44Xrs9wpm8uFHx7ajt2VV2/lDIkxVAyL96YvxJSEAZIZDo7DC6d3YtONEjAgouXoWUJACMGfxi/EnATpT5gc5fFa4S58fuNCn/k9tzPck+5vhi7AokTpCcQ85fHny3uw5sYxCdfhdlB+krkASxOk5zFQSrGmajfWVO3pMw+jtzzHHx+wAMvipkie/Cil2NmwF2ur10tmLImdjyWx832aYE+07MWGmn/dcjiE5J7IeUyJmI950at8qgxd2rEPu+vfhtv5EK4T4mHkBM3DtKinfCotX919ACcaXoLbiRKrRcIA4JHsfx9GRvwMrMyKsfcckR6VFFThhSf+BY6jkvtkMAxB9ohk/Ordh0T7xnh05VI9fvTUx3C5OMmddRmGYNDgGLzxx9XQaL1HLW5UteCpH34Cu8PlE2NAUjj+9OYDMEhwqmoaOvD4T9bAbLVLZrAMQWxUEP7+2wfhLyGi0NhmwiMvf4rObqvkiqYsQxAe7I+PfvkAggO8RxTaTBY8+Lu1aO7s9okRZNRj7QsPIiLIe0Sh02LDg3/8DDVtnT4xjDotPv/hA4gL9Z7waLY78MAHX+B6U6vkuhosIdCpVfjiu6uQEu7d4bE5XXho7TpcrG+UzGAIgYZl8flDK5EZ5d3hcXIcHt2yAWdqpXeFZggBSwjWLl2BYdHi4W7AvTT2nX2bcai6QnKvGgK3M/3JnOUYF+v9aZ+nFD86vhXbqsokb5f1THXvT1qGGfHen/YppXi5YAfWVeb7vCX3zRFLMC9e2tP+Hy7txKc3jvtIAH6WtRiL44WLrPXWmsrd+KRqt8+M7wxYiKXx0nYH7qzfi0+r1/nMWBI7H0vjpG2tPtmyH+tqPvCZMTl8LhbGPiTJtrzzIHbU/d5HAkF20GzMiPofSU5VjfkojtY/3/O/pH+DE40zMDbyFRAZ7TZ8mb//n92+azbZ8PIza8Dz0p0QwL0WX3T2Bta8f8irrd3uxM+f+9wnJ8TDKC+twz/+dtCrrcvF4YUX1/vkhHgYFZXN+NNf90uyff63G2HxwQkB3OXNaxo68Nu/7vFqSynF83/a6pMT4mE0t5vw4ns7Jdn/5J87fHJCPIyObiv+9+/bJH1XXvx8j09OiIfRbbPj+//cLInx6vaDuOaDEwK429PbXC5895PNcIlE/zz63cGjKPbBCQHcE7KT4/D4l5tgd/XdGba33jl9EqdrbvpUNI6nFByl+PbWTTDZvXfLfa/oLA5WX/epYR6FOwL6xN5NaLV67wD78eUL2OqDE+JhAMD3j21CnbnLq/2m6iJ8KcMJIQBeOL8ZN0ytXm0PNpTIckIA4Hclm3G5q86r3fm2cllOCAD8vWILSjsrvNpdNl2T5YQAwMbabSjuKPFqV2upxPqaf8piHG7egaKOM17t2uw12FX3pgwCRXHHLpR27vNqaXY24njDz269zhdGVfdeXOlcL+P9+aavxRH5y1/+gqSkJOh0OowaNQpnz579ypn7txfCYvZtYvWIUoqtn5+B3SbecvnwgTJ0dHhfxhBi7NxaAHO3eDvv46euobG5SxaD5yn2HSxFe4d4i+pzxZWoqm2T1XeF5ymOnLmK+qZOUbuL1+pRfqNRFoPjKc6WVqOiVnygvVLTjPNXamQzSiobUFrVKGp3s6UDh0srZDOuN7bhzNWbonZNpm5sL74kq+Irx1PUdnTh8BXxwbzTasO6ohJ5DErRYrZgV/kVUTuL04mPiwtkFTHlKYXJbsfmy+Widg6Owz9LzstiUFBYXS58efmi1/fy97LTMgjuYd9Feay9WiBuRyn+eeWkrCqpFO5rWVtxzqvtxxVHZZeeJ4Tgi8pTXu3W3zwku68LCwYba454tdtVv082gwGDnQ3eJ/Cjzbtk99khIDjUtM2rXUH7VgUl9AnOta7z+mBzrWsjKOUgt5xweccaUOr9wUaJvnJH5IsvvsCzzz6Ll19+Gfn5+cjJycGsWbPQ1NT0lTEppdiyVt7A4ZGl246je8W95s3rzylK5nE6Xdi3R3wQ3LT1guyS8ID7qW+nF8aGXQWKuukyDMGWfcWiNuv2FypisAzBhoNF4oxjxYoZXxwpFGecKlb0ebAMwWcnxBnrL3h/WhNlEIJPT4szNl4sg5OT37OCIQQfXxBnbLtyCRanuDPvTR8V5osOtHsrr6LNZpV9fgqKj8ryRRssHq2rQL3FJJvBU4pPr+TDIXK/81tv4rqpRfaUxFGK9ZUFsLgcgjaXu+pQ2lnjU+TodgaP3XWF6HIK3+9aazMKOq7I7uvCgcfJlmK02oUfbNodHTjfXiCbwYPHxc4yNNqE5yCzqxsXOk7IZlBQVFmuodZaJWjj4K0o6djTkzQqj9LqqEadtUzQgqMOXO3cqIABWFyNqLd4j+4o0VfuiLz99tt44okn8Nhjj2Hw4MF47733YDAY8OGHH35lzLLCatTdbFPU6ZYwBDvWCT9hVFxrxLUrjT4t+/SlbZvyBY/V1rWjqKRGVjTEI55SbN4u/DTW2m7GyXx5T/i3GDzF5r1Fgvei22LH/nNXFDE4nmJbr6Jpd8rudGHb6VLFjN3nL8Ns63sw53mK9adLFDMOl1ag1SS8HPDFuWJF/W84SnH6xk3UtgsP5p8ViDuO3sRTiov1jbjS3CJos/ZikaLusBRARUc7ChrqhRmXihQ1sAOABnM3TtQJTxifXStU1IgPADocNhyovSp4fF1lPlgZ6/C9ZeWc2F0rPCltrTmvmOGiHHbXFQoe31N/RlGXW4/2NQpHzY82y4sc9RYDBoebhZeo8tuPg6dKmsu5GadbhZffr3Qdg5OKR8S9iYBFcbvwsnWt+TgcvPdlQW+Ma12bFJ3Dm75SR8ThcODChQuYPv0/fR0YhsH06dNx6tTdIT673Y6urq7b/slR3c022e/ZI8pT0fPU1rYrZ1Cgoa5D8Hh9g/hyh1Q1t5gEdwzVN3cqctg8MpltsFj7nsAb24T5vsjmcKG9q+8JvLXLArtT2cABuCusNnV093nMZLPDZPOes+BNPKWob+/7u+3kODSZxJfSpOqmiCNys6NTQU/V/6hahFHV0dE/jM4OwWMVHW2KnDbAnWNR1SXMuN7pW65OX2IJQbVJmFFhar2rGqqvUhEGNWbhcanarJzBEgY1FuFxsc7WoqhbL+BeAqq3Ci/DNtqbZC+ZeERB0WhrFjzeYm/0addLX+LBo8XeIHi8w1EPRmbzOo8oOLQ5awWPdztrZTfI683oclQrOoc3faWOSEtLCziOQ2Tk7dX8IiMj0dBw9wf029/+FoGBgbf+xcf7XiIYAKwWh+zOsL1lE5hYAcBmET7mixwiSahWEb6vsgnku1i95MH4IiFHxGrvR4bQddj7715ZBCIiln5kmAXOZXH0370yC9x3F8/DJbIU4RPDIXxPrK7+uZZuEYalHxgMITA7xRjKP3cCArPIecwu5Q6u+zxiDGVP34A7SmURea9Wl02xI8JTCgsn/F5tnF328pJHFBRWEYadt/XLA5qVE458OqkVvjbg60sOTvjBxcVb+oXhov3zcCSkb9SumZ/+9Kfo7Oy89e/mTfGkPiHpDZp++RKJ1RLRGZSVEvdIq1UJ5hzofahlIiYici69TlnZ9d4yGPouPKZXWNq9t/wErsOgsLS7JIa2/xhGgfdr8FIh1SeGwPtVMQzUPvb78ZUBQFKzPkkMjXBBOz+18s+EpxRG9VfLoKDwUwmfx6hWVtLfI1GGSjmDgMCgEr5XBpVOcbSCIQQGVvi96litYgYBgYEVrrGkZXT9MH0DBpFuumqih9wE0t7SssIlB1SMAVCQH+KRmpFeK0qOvlJHJCwsDCzLorHx9p0IjY2NiIq6u+mSVqtFQEDAbf/kKC5JWudKMTEMQbzIeeLjlfWFANyFx2LihFsyx8b6Xl2yL0VGBgo6O7GRgYrX2AEgKEAPg4BTExnqD7VK+VfNoFMj2L/vwSPU3+C1zLkUqVUsIgL7/mH767QINCgfzBlCEBPcdy0RNcsiOtD35m13igBICA0SPJ4UHNwvA21SiPB3NCU4pF++W8nBwoy0oFDFDApgQKAwIz0wXHGOCEcpkgOEf+up/mH9kL/BI0mkX02SMUIxg6McEv2Ex8U4fYRiJ4GnFHEG4Ro10boo5cs/IIjWC/fdidBFg1M4gTNgEK4T7nAcoo0DL1pYzLsIWIRohFcOAtQJihJVPYwAtfLePmL6Sh0RjUaDYcOG4cCBA7f+xvM8Dhw4gDFjxnxl3EFD4hCfHK5oeYbnKeatEG5FnjQgHIMGxyje0bJgsXA54OjIQAzLTVTEIARYNC9P8HhwoB8mjExVvGtm8axcwR1ERr0WM0cPUryjZeGkIYJNBjVqFRaOzVTMmDcyQzC6wjAEK8ZkK5r4WIZgRnYago3CT2OrRihkEIJxqYmiDs0DQ7Nlnx9wO1N5sdFICRWeXB8ckqMof4MASA8NQ3aE8ITxQIZyRpwxAKNjhKu4rkrLVZwjEqozYEpsiuDxFclDFedv+Kk0mBmbIXh8YdxwxQwNo8KsaOEqrrOiR8neaeIRIQQzIoULp00MH6vc2QGPSeHjBY8PDR4PFVHWCJQHjzGhwh1v0/zHQcMoazxJwSEn+D7B4zF+46BllT3QUnBIC1yi6Bze9JUvzTz77LP4xz/+gY8++gjl5eV48sknYTab8dhjj31lTEIIFq4apWh5xhigx7jpg0VtFi4drmhHi1anxrSZ4tUQFy8YqojBsgzmzBRvY710dp6inSCgwILp4hPb8mm5inebLJkqXsZ62YRsxYzlE8WvY9noIYp2SnE8xcpx4texZGiWIieaoxQPjMoVtVmYlQGNgs7RPKV4aJg4Y27aQPh7aZQnJgrgkZw80S3y0xNTEaZXNpg/kjlU1PEbG5WEeL9A2VMfQwgeSh8KNSN8v7ODY5EeECGbwRKCFclDoWOFo4Ip/pHICUqUXUeEJQzui80TXUaK0oVieEiG/DoihMGEsBwEaYSd6EB1AEaGDFNURyQnMAvhWuHokZ41YHjIBNkMAgbJfgMRpRPuRKxmdBgSNBtENoMgXJuMKN1AQRuGqJAWsFQ2AyAwqmIRqR8u8/XS9JU7IitXrsSbb76Jl156Cbm5uSgsLMTu3bvvSmDtb02dm4OAIIPsaMLiB0dD46Vp3MTJGQgNM8piEOKOhui95JqMHpmCmOggWU/6hBDMmTHEa7+ZoVnxSEkMl8VgGIKpYwciwksvmMEDopCdFiObMTYnGYlR4p59SnQoxg5OlMVgGYKhqbHISBD/XsaEBGBGTpqsiAXLEAyKCcfwAeJly0ONBizKzZTNSAwNwsS0JFE7f60WD+TlyJqSWEIQ5W/EzIGponZalQqP5g6VxWAIQYhejwXp4n1aVAyD72RLKzt+FwMERo0Wy9LFHwYYQvC9zDGyFgMYEGgYFven5oraEULwxMBxshgEAEMYPDDA+314eMBERYmeKxK9R7KXx02RX+ODUiyJm+zV7r7oGbKXZ3jwmBs9y6vdxLA5ss4PABQ8pkZ4LyOfFzy/p3y6778SCoqRoSu81rJKDVwEhqhlMQCKjODViuplSdHXkqz6/e9/H1VVVbDb7Thz5gxGjZLePEmuDH5avPbuQ2BVrE+OAmEIRk0aiPsfn+TVVqNV4bdvrYJGI5xw2pcYhiAnLxGPfWeyV1sVy+D115ZBr9f4zBiUHoXvf3eqV1tCCF5/YTH8jTqfJnGGIUiOC8Xz35spyf5335+P0EA/nxgsQxAbHohXvyttUPjNY/chJjTQZ0ZYoBFvPDFPkv2rK2YiJTLEZ0aQnx5//vZCST/qX8ydgsyYCJ9yE1hCYNRq8PeHFoNlvP+0n5syHiMS4nxyeDxN6T5cuRhalffQ9TMjx2BSYrJPDIYQqBkW/1q4FH4SIiqPDxmBuckDfRpmGRCwDMGHs5YgWOe9MeSqtFysSMn2iUF6/s/7k5Yi0uA972d+/BA8kurb2Oh5P2+PWIp4P+8h+ImRGfh2irReLnfqpSFLkep/d27fncoNTsfjA6T1crlT309bhkEB3nv/pBiT8WjSA7IYq+KXITPQeyPCaH08ViU8KYsxM3KJpC68QZoYzI/5GTzddn3RsJDFyAj0Pr4bVOGYGP16z3KWb9/gAf7zkRqw2Kf3JUffqF0z/a2BWbH43fuPQGfQgPGyS4D0TCoTpg3Gz19fAVbiroLklAi89e5DMPrrvDoKnglo+KgUvPr7FVBLaKoHAHGxIfjTmw8gWEKE51aH3yHxeOPXK6CVuGMlKjwA7/36AYSH+kuaNAiAjNQo/PmVlYK7TO5UWJAf/vGL+xEbEeSV4W5KBgyIC8P7P1uJAAlN9QAg0E+Hfz67HAOiQyUtbxBCkBARjH//70qE+EsL8fvpNPjnk8uRERch6afNEIKoIH98/P2ViAqSloiqU6vwwcNLMDTRHT2Rwggz+mHNt1ciISRIEkPDsvj78oUYn5zYcw5xe5YQBOp1WLt6BdLCpSWEqxgGf507HzMGpN56n94YRo0Gny5ZjiEiuSG9xRCCd6bMxZK0TMkMnUqFj+Ysx4go4dB5bxFC8OtRc/BAWp5EBgMNq8IHk5dhYoz0Dr8/GTIT304bc+t9emOwhME7o5ZhRqz0Dr/fTZuO76ROu3UObwwGBL/MXob7YoVzze7Usrgpt5wRKQwCgmfSlmNezDjJjOmRk/FY0oM9DezFGZ7jDyQsw9xoaQ9OADA8ZAIeSHiqhyCNMTtqOWZHLZfMSAsYh/mxvwAD1mvND88Sy4jQ5Zgc8R3JjGjDaEyMfhMs0UhmpAUswciIF77yaAjw/3j3XY+a6juwee1p7N54ARazHayKAc9TEOJeZ+M4HgOz4rBw1ShMnjMEjISnyTvV0mLClvXnsX1zPrq7bWBZ5lYuASFuRkpaJBYtG4EZs4aAlbGLpL3DjE1b87F5RyG6uqxuBk8B8h9GUkIoli4chtkzhkh2dHqry2TFht2F2Li7AG2dlj6vIz46GMvuy8P8adnQelm+6kvdFjs2HCrCl/sK0dze/R8GdUdZXByPmPAArJiehyVTsqGTsf3Xandi3bFifHG4EHVtXVAxzK2kRkIIOJ5HZLAR90/KxbIJ2TDqhbckCsnudGH96YtYe7wQ1S0dtzEYQuDieYQF+GHVuBysHJsja8eNw8VhU0EpPj5dgIrmNqgY972ivRghBj3uH5mNB0flIsTP93wJF89j08UyfHy+AJeaWvpkBOq0WJWXjYeG5yLC6L1D8Z3iKcXWy+X4d1EBihsbeian2xlGjQarsrLxSE4eYvx9/71TSrGj4jL+XZqP8421/2HQHgblYVCpsWLgEHwraxgSAoJkMfbVXMW/Lp3D6cbqW86Ch8FRHlpWhWUpQ/DYoBEYECBvd93Rhqv4+NpZnGi63uP0EPCUgu1hqBkWC+Kz8UjaKKQFeO+C3JfOtlzDZ5UncaL58q1uxDylYMh/WDOjs/FA0ngMDIyRxbjYcR2bao7gZOvFPhmEEEwIy8GSuMmSIiF96Vp3BXbX78eZtgugoCAgoKBgQG714RkenIvZUdMxKCBdFqPGcgNHmneioOMkeMqDgAEF38MCAIqMgDxMCp+DdH/xnDwhNdsqcaFtI8q7DoKjLjC3GAwoKCh4JPkNw9CQxRhglLcc2eWowuWOL1Bh2g6OOnpdB9NzFRwi9cMwMHAlYv0mKnJCfJm//3/hiHhkszpwdE8JLpXUwGyyQa1WITjMiMmzhyBlkPA2KwDgOB5OhwtanVr0w3E4XDh2+BJKim+i22SDSsUiKNiAydMGY2CG+I+Z43g4HC7ovDCcTg7HT11FYXE1TD1OT1CgAZPGD0RmRozoa6UyXByPE+ev43xxFbq6bWAYgsAAPSaNTEPu4DjR1/I8hd3hhE4rzuB4HqeKK3Gq+AY6zTYQQhBo1GFiXgqGZySIRn/cDBe0XpbFeJ7izKVqHC2pQKfZBkopAv30GJeZ1JNPIuwQUkphc7igVYszKKU4f70GB0uuo91sBU8pAg06jE5LwKTBA6ASia5RSmF1uKCTwCiorsPesmtos1jA8RQBOi1GJcdjWkYK1Kyw00kphdXpglbFer3e4vpG7Cq/glaLBU6OQ4BOh+HxsZg1MFV0KYb2dP7VsOIMAChtasS2q5fRYjbDwXHw12oxNDoGc9PSoVMJO50ehpplofLCuNzWjM3XytFk6YaNcyFAo0VOeDQWpAyCQaQuiC+M652t2HSjBI0WE6ycCwFqLQaHRGJhUib8RWqfUEph51xgGUY0gRUAqrvbsLm6GPWWTlg5J/zVOqQHRmBB/BAEasSXlOycC4S4c1TEVG9tx47aAtRZ2mHl7PBT6ZBsjMDc2DwEacTrR9g5Z89SmvgDSYu9A/sazqHe1gILZ4cfq0OsPhwzokYiWCQxFQAcvLvLs8YLo9PZhaPNJ9Fga4SVs0LH6hCpDceE8LEI0YgvWzl5FygoNIz4Q0+3qwvn246h0VYLK2+BltEhVBOO4SETEaIJF32ti3eBgoeaEY8g2zgTyjoPoMVeCRvXDTWjQ4A6ApmB0xGk8TKHUBd4ykFFNOJzCN+NStMetNuvwMF3Q0W00KsikOw/BwEaeQ7hnbrniPST6qpbsePLs9i3OR9d7e4KeQxDkJIRgwUPjMbE2UOgVVgQrLG+Azs352P3tgK0t5lvMZJSIrBw2QhMnpmluLBZc4sJO3YWYsfOIrS1dbuf3hiC+LgQLFo0DDOmZQoWI5Oqtg4ztu8rxtY9RWhqMd160o2NDsLiOXmYPSUT/kZlNTg6TFZsO1KCTQeKUd/iLk1PCBAdFoil03Mwb2ImAgXqjEhVl9mG7afK8OXhItQ0d7gZACJD/LF0YjYWjc9CSICyXRrdNju2nyvH58eLUNXU7n4yBBAe6IelY4ZgyZghgrVMpMricGJH0SV8eqoQ1xpbbzFCjQYsHZ6F5SOHICZI2W/K5nRhZ9llrDlbiEuNzbe2uAYb9FiSMxirhmUjPjhIEcPucmH31av4uLAQFxsabjGCdDosHjwYD+bkiNYZkSInx2Fv5TX8+2I+Chrrb1Wd9ddosTg9A6szc5Eeoqw2kYvncaDmGj66lI9zTTfh7GEY1RrMT8rA6oF5yAxRlsDPUR5HG67jk6vncbq5Ek7eXaPCoFJjdmwGHkwdjuwQeZENj3jK40xLBT6vPIOTzdduOQk6Vo2pkRlYmTQKOcHxip6kKaUo7LiOTTdP4HRr+W2OyOjQDCyOH4fcoBTFjMumCuxuOIqzbUWw8+6KtGqiQm7QYMyJnoQhgQMVl3mvNF/H0eb9KOg4CzvvrkjLEhUG+mdicvgMDA7IVsxosFbgfNtOlHQeg4N3NyRkoEKC32CMDJ2HdP8RYIiyMu9ydM8RUaiWxk784aVNOH/8KhiWgOduv0WEIaA8hcGoxarvTsGyx8b7/KNobzPjD7/bjtPHLrvDlXdsOyXEHe7V6TVYsXoMHnhsos+7c0wmG975424cOXoZhECQodWqsGTxcHzr0YmSc2M8slgdePv9/dh/tAw8xV1bWz0Je2oVi4Wzc/Hkw5N8XjKy2Z14Z81h7DhaCo7n+9yWTQjAMgzmT8rC/6yeBJ2Pxc0cThf+uOEYNhy9CFdPY707MYQQMASYMyoDP1k1xedqri6Ox593nMBnRwvgEGB4cg9m5KbhF8unIcDH5Ryep/jrwdP41/ELsDqcIAIMCoqpGSl4ZdF0hBh9c6wopfjHyXN4//g5dDsct75HvcX2hN4npCbhV/NmINLfN8eKUop/FxTgz6dOodNuvxXKv5PBUYox8fH4/axZiJUxRnxWVow3zhxDm80qyhgRHYvfT56FAUHCdVOEtKmiFL+5cAjNVvOt8/XFyA2Lxu/HzMHAYPEn6760u6Ycvy7ciwarSYDBgKM8MoIi8Zvh85AVLB4B7ktHGy/jdyU7UGttv3W+vhgpxgi8lL0QuSHCtVmEdKHtKt65tAE11hZRRpwhHM8OXIKhIWk+My51Xcd719fiprUeDJi7dvh4/hahDcXjySswLMT3pZZqyw2sqfoANdYqUUawOgTL4lYjL1i4ZpWQmmxV2Fb7Z9Rar9xaYuktz9+MqmBMj3oU2UHykpTl6p4jokA3K5rx/GMfoLPdAl5io7YZi4biR68tlpxbUl/Xjh8/+RFaWkx3OTlCmjA1Az99dYlgQa871dxiwrPPrUV9fYekOiQEwMiRA/DKy0u8blv2qL3DjB++9CUqa1qlMQiQmxmP3/9iCfQSJ3GT2YYf/H4DLt9oklS4iiEEGQMi8cefLIVRYpTHYnPgf/68GQXXaiXVnmEIQUpsKP72o2WClV7vlN3pwo/+uRUnL1VJ2nTIMgTxYUH4x9PLJEdHnByH5z7fiX2l1yTZswxBVIA//v3EMsQKVHq9UxzP42fb9mJzcbk0BiEI9TPgo4eWYUCYtEmcUoqXDx7Ep0VFkhkBOh3WLFuGQeHSJnFKKX53+ijeLxTusH0nw0+twcfzliE3Uvok/qfiE3i7ULjL650MLavCv6Ytx6hI6X22/nXlDH5TtE+SLQMCFcPgvXErMCFKuMDanVpfdQ6/urgVgPei5AwIGELwxrCVmBolXoupt/Y15OO3pZ/dyusQkztrhuCnmaswI2qoZMaZ1kK8deWf4CkviQEQfDdlFWZEChc/u1NlXcV47/o74KhL8hbjpbEPYlqk9O3CVeYSrK18FS7qkFw5dXLEg5gYsVIyQ6l8mb//n94146vamk144dsforPdLNkJAYB9m/PxwZu7Jdl2dVrwwjNr0NLSLdkJAYDjh8rx59d3SiqmZTbb8fwLX6ChQZoTArgHl7PnbuB3r2+X9Bqb3Ykfv7YBVRKdEMD91FxUVoOX39gmqRuvw+nCj9/egsuV0pwQwJ0UWX6jEc+/swVOl/fyyS6Ox0/e347C63WSC+DxlKKirhX/8+dNsDlc3u15ip+t2Y1Tl6slVz7geIqbLR148r2NMAs04estSile3rQf+yU6IR5GQ5cJ3/5wIzos0hqi/XbfEclOCOAurtZqtuCxTzegpVta46y3T56U7IR4GF02Gx5avx51Ejt2v1d4VrIT4mF0Ox14ePt63OiQ1nn7k0v5kp0QD8PmcuGxA+twuV24M2xvbaosluyEAAAPCifP4XsnvsTFtjpJr9lfX4rXLm7tcRCkMTjK47kLXyC/tVIS40zrJfym9DPwPWmZ3kR7OL8p/QxnWy9LYpR2XsVbVz4ARznJDAqK966vxenWAkmMSvN1vHf9bZ+cEADYUPspTrcek2TbZKvqcULsPpVvP9z0Kc617pRs/3XqniPSSx++swftrb45CB5t/OgELhd7b9L3yT+OoLG+wydHB3BP4ru2FqDwQqVX288+P42bN1vB+XgdlFIcPnIJp05f9Wq7btsFXKlo8rmSKc9TnLpQgf3HvE9mWw5dRNHlWp8ry/I8RX55DbYeLvFqu+tMOU6WVvnM4HiKsqpGfHHQ+wB18OI17C+66nMpco6nqGhow78Onvdqe+paNTbnl/lc4onjKWrbOvG3g6e92hbW1OOTs4U+EtwTbLPJjLcPnvBqe7mlBX89c0YWo9Nmw6+PHPFqe7OrA6+fljbo9xZPKcxOB146tt+rbZO1G788593uLgbciaw/PeX9wabLYcOLF3yfWCjcfWmeP7fV64ONlXPgpaJNPpfConCPJz8r3ADeS1l5F8/ht6Wf+Uj4j35TuhYuXvyhg6c8/nztI9ntAN699gnsnPgDAaUUn1T9HZyEaEtfWlv9ISwu7876ttp3eyIhvjP21P8d3U5pjvTXqXuOSI+6Oiw4vKPIZwfBI5ZlsP0L8QHUanFg9/ZC2SXbWZbB1vXiT3EOhwvbthfIZjAMwabNF0RtOI7Hxp0FskudM4Rgw458URtKKb7cWyC7gzUB8OVe7+/x84OFspPeKAW+OFQIjhf/znx2rFB20zSeUqw7Uew1uvPp6ULZfXY4SrHhfAksDqc443yRIsa2kkvotIpHXj4tKpJ9rzhKsffaNTR1d4varSktkt3Lh6MUx2uqUNXZIWr3xdVi2S0mOEqR31KH8vYmUbuNVcW3Ejl9FU8prnW1oKC1RtRud+1FmF12WTVMeVDUWztwqvm6qN3x5hJ0OM2yJlYKig6nGcebxR86ijrK0Wxvk12N1crZcKJFfFysMF9Fva1WdpM5F3XhTJt4BK3BegO11suyGRQUBe3SI2hfl+45Ij3au+mCzxGE3uI4Hod2FKOrwyJoc2DPRdht4oO9N8bJo5fR0iQcfj52/DJM3dLC7H2J5ynyC6pQU9MmaHP6QgVa2sQHe1EGpSi/2oArFY2CNvnlNbjZ0CF7MKcAquraUHS5VtCmrLIBl6qbFPWOaWzvxqnSKsHjNxrbcP5ajaKmaR1mKw5eFF5yqe8w4fClCkV9diwOJ3YVC4e42yxW7Cy9rIjh5DhsKi4TPG6y27GhtFRxg7kvSoQnJZvLhc/KihUxGEKwtkx46cjF8/j4cr6iUuosIVhzWTjaRinFx1fPyj7/LcZ14cmVUoq1N04rajDHEgafV4o/oG28eVx2/xvAnZOysUY82rar4YjsvjGAOx9lZ/0hUZsjzfsUMQCKQ017RMejC227FDEoKM617QBPlXX97W/dc0R6dHhnsaIJCQBcTg5njwgP5kf2lSpqZgYAoBQnjwozDh+5pLgSHsMQHD0uzDh08rKijsCAO1Hy8MkrgscPnr3i8w6euxgsgwNnhRn7869KKoUuymAI9l8QYRRdVdQRGHBPfHsLRBhl0vNChEQIsLNI5DO/UnFrW6tcUQA7SoQZx6uqYHPJe8L3iKcU2y5dEjx+pu4muhx2RQyOUmy5Kry0WNRSh2artHwYMcbWG8KMy51NuGnuUODquBm7b5YJLlfUWjtwxdQgO4rgZvA41nQFNq7vB7AOhxnFnTcUOW08KIo7KtDh6Pue2zkH8ttLFXUFpqC4YalBk6217/dAeRR0nFXcebjF0YQ6m3CUqqTzqGJGt6sdtVbh8eS/oXuOSI/aW+Q/4XvEMAQdIpGCtlaToo7AAMCwDDrahQe51tZuxQ4VwxB0iER22jrMijoCAwAIQUenMKO9ywJe6cTHU3R0WUUYVkhLvxMWx1O0m4QZbd0WxY4hTylaTCKfR7fFaxltb6IUosmkrWaLYocKAFrMIgyLRWFz9/+cR/CYVfiYL2q3CX/mLbb+YZicdsFlv1a7MkfHIyflYXb17Zi12ZWPiYB7Eu909H1POhymfmEAQKez7/drcnUrcqZuZ/T9fm2cFVw/RRlMzs4+/85TDna+f75bFlffjP+W7jkiPZKyi8ObCCFwuYTP0y8M4Fadi6+KAS8MsWuULOresSIkjuu7XohvCCrOUOjoeCSWv8HxVKmv42ZwYox+ug4v90pJiN4jsc/DxfP90tdCLHLj8pI42R+MO2tfKOIInEvp8lVvOQWupT+vwyl4Hf14rwQSVvuTIeRsKI1SSGJ8Ddfx39I9R6RH/gHKqn4C7slT7Dz+AcqqfgLuHA6jSO2KwEDlDEohWgU1MECvfMIggL9RuM6Hv59vnYD7EkMY+PuJMAxa5ctYhCBQpCFfgIweNn0p2E/4c/XXa2XvBuitIJHiaQE6bb84PIF6MYauX64jQCt8zwM0yn/nAGAUKeEeoO4fhoZhoWX7runTXwyxcwWolY8l3hj+amVViqWcy6jqP4afwLn0bP8xDKq+y+qrGDVYoqySt0c6Vlnl5v7WPUekR3ljUr126JWiIcOThRkjkhXnVvA8RXaecC+A3JxExZMrx/HIyRauipibGa94+YfjeOQMFi7alDcoTlFiJOB+is8bJNxddVh6nOgTuhTxlCIvXYSRGqc4t4IQguGpwoyRyfGKJ3CGEIxOEf7MRybGKQ7ssIRgTLIwY0RsrOKYC0sIxiYIM4ZGxcjeMXMbI1aYMSQsymt/FykMsaJmAwMj4KdS1vqBAUFuSKxgT50Ev1AEe+k1400EwABjOPxVfTsiYdoAROqUlegHgEhdMMK0fRfNMrB6xOmjFEf0AlRGROv6bjDIEhZJhhTFDB2jR6xe+HNPNGTealAnVyxRI0YvvZjd16F7jkiP5q4cJXvrLuDOqxgyPAkJKcKdMOcuGqYot4IwBCnpURiUGStoM2d2NpTkXxICxMQEIU/E2Zk1OVNy9VUhhQb7YewI4R/DtNHp8FPYY8ffoMXUkcLdNifmpCBEYW8anUaFuaMzBI+PSktAbEiAouGJZQgWjcoSPJ4dH4X0yDDFidArRgqXsk6LCMPQOGWTOEcpVg3LFjweHxiICUlJsrfvehgP5eYKHo8w+GHOgHTFjEeG5AkeD9TosHhApqK8HY5SPJoxTPC4XqXGiuQ8RdfBg+LhNOEurmqGxYrEEYp2tFAADyaPEXw4YgiDxXHjFE3gBARL4sYJ9mwhhGBu9BRFeSIMCGZHTYRKxMGcEjFLIYPBuLDJ0DDC0bYRofNkb911M1jkBE25FxH5pioxNQJZwxJlRyx4nmLBA2NEbSKjgzBybJpsBuUpFq0Q70kQHOyHSRMHgWXl/7CXLBouGlUx+mkxZ0qm7KUThhAsuW+oaFdanUaNRVOGyL5XDEOweFo2tCIOk4plsHxKruzJlWUIFozNhJ9IuXqGIVg1MU92PRSWIZidNxDBRmGHiRCC1WNzZefUsAzB5EEDEBUo3gX1oZG5siMvDCEYlRTvtcz7w7m5svMfGABZkZHIihRvHvdIVp5sBgGQHBiMUdHCESoAeGhgnqLchCiDEZNjBojarEoZqihXJFCjx8zYQaI2SxOGyz4/AOhZNe6LFXY+AWBOzAhFThtLGMyOEXaoAGBC+AhovXS9FRMFMN1LmffcoBEwsPIjSDx4TAibJmqT5j8M/irf+x39h8FheMh9sl//VemeI9JLj/1wFuQ8VjIswcAhcRgzVfjJ2KNHvjMZLMv4jGFZBkkpEZg8PdOr7UMPjoNKpfJ5iYZhCGKigzF7lvcmTw8sGQm9TuPzJM4yBKEhRiyanePV9v45wxDgp/PZGWEYgkCjHitmCj+1erR8Ug7CAv18dqoYQmDQafDQTOGnVo8Wj85EbEigLIZGpcITM7w3xJqXm4GUiBCfGQTuZoFPTxvt1XbGoFRkRUf4/BRO4L6WH00Z69V2UnIyRsbFyXvSJwTPT5jg1WxEdCymJCTLdkB/NmaS199WVmgU5idlyI4m/HTYFK9by5P9Q7EyOU92LOHHQ6YI5qB4FKkPxOoB3j83IT01cBoMKvE8qUC1H1YniU/AYlqdNA2BanEHQM/qcH/CfNmM+TFTEaoNErVRM2osir1fJoFgXOgUROiiRK0YwmJG1GMyCQSZgRMQpRd3cP8buueI9FLm0ET8+LfLQAiR7CgwLEFUbDBe/dvDUEnoKps2KBq/+PUyEIaR7CgwLIOQMCN++4cHodV5T1ZKTAzDr15dCpZlJE/iLEsQGGjA679bCYOEZnExkUH4/YtLoFKz0hkMgZ9Bi3deWY4ACUsi4cFG/OH5JdBqVJInWJYh0GvV+ONPliAs2Hv4Mdhfj7/8cAkMOo1PDLWKxZ+eWYSYMO/N4ow6Ld5/cgkCDdITcBlCwLIM/vj4AiRFen8C0qlV+MdjSxDuL92pYggByzD4wwPzkBEjvKTokZpl8fdVixEbFCDZUSBwR2zeWDQbeXHeW9AzhOD9BQswICTENwaA386YgXEi+SG37AnBuzPnIzMswmdn5OXxUzEjOVWS7Rvj7sOwiFifnZEf503EwmRpzeJeHjob4yNTfHZGvjdoLFYOkNYs7ocZMzE9arDPjFVJo/BQsjQn5pHkGZgV5Xv0ZXb0cDySPEOS7fzoqZgTNclnxuiQPKxOXCzJdnzYFMyK9M3hISAYHDAE9yc8Isk+K2gSpkY+5DMj3jAYC2P/x6fXfV261323D509ehm/efZz2GwOdxv1Pu4QyzLgOB5ZwxLx8p9Wwz/It6zpgvM38OoLX8Lcbe+zjXpvRnpGDF57834Eh/q2rldaVoufv7geXV1WEEL6TDBlWQKOo0hOCsPvfrMC4eG+3efL1xvx/K82oK3dDIYhfebAeP4eFx2Mt15ehpioIJ8YFTUt+NEbm9DYavLKiAoLwDs/Xozk2FCfGDebOvDMnzbhZlOHMKOnTXxYoB/+9INFGBjvffLurfr2Ljz9/mZcb2gFy5A+k3E9jGA/Pf70xEJkJ/nWrr3ZZMaTH21GWV2TIMPzffPXafHu6gUYMUB8meFOtVuseOqLrcivqRNmwB3ONmjU+MPSuZiUKpzE3ZdMdjue3rYNJ6qr+2xr72EAgFalwttz5mBWmm8t4S1OB36wfwf2V14XZHg4KobF61NmYXG69G6yAGDjXHj+xE5srSwXZXg61r46agYeSM/1ieHkObx0YSfWVxZ5ZYAAL2RPx2Ppo3xicJTHG6W78FnlabCEEVx2YkBAATw1cCqeSPUeOeotnvL44PpurK06CAZEsMiZ59gDiVPxeMpswdyQvkQpxYaa3fj85nYQQITBgAeP+6Im49HkZT4tHVFKcbBpNzbVftbTwk+cMTZ0ElYlPAaW+JZ3d6FtN3bVvQ8K4d42HkZm4EQsjP0BVAqWp3yVL/P3PUdEQOZuGw5uK8SWNadQU9ly2zGGIRg/MxPzV41G1rAk2btUrFYHDu0pweYvz6Cy4vZum4QhGDMhHQuXjUDu8GTZDLvdicNHLmHj5vO4evXukuqjRqZg8aJhGD5M/o4eh9OFIyevYP2OfJRdqb/r+PCcRCydOxRjhg2QXS3V5eJw5MJ1rNtbgMI+yrbnDYrD8pm5mDg0BSqVvB0LLo7H8Ys38PnBApy7dHcDwyEDonH/1FxMzUuFRi0vWZfjeZy8VIXPjxXiRHnlXcPH4PgIPDAhDzNy06GTmRDM8xSnr1fj09OFOHyp4i4nNz0yDA+Ny8Oc7IEwaORtB6SU4lx1LT49V4i9l67dlTsyIDQYD43Mw4IhGTBq5Q1+lFLk19djTWEhdl65ctfuo4TAQDySl4clgwcjQCd/O2tRUz0+KSnElqvld9XUiDUG4NEheVg2KAvBOvmJzaVtjVhzuQAbr5fAfke9iyiDEQ8PHIYVqUMQppefY3C1sxlrr1/AhsoiWO+oZBqqNeDB1OFYkZyHSL14LpCYKrtbsK7qHDZWn4fljiZwQWo9lieNxLKEEYjSe48UCqnO2opttaewtfY0zK7b21X4qXRYEDsa82PHIEbv28NGbzXb27Cv8Tj2NBxD9x2N5nSMFtMjx2FW1ATE6MXzjcTU4WjHydbDONK8DybX7W05NIwGY0InYULYNMTofXsQ6C2zqwMF7ftwrnUHTK7b23KwRI2coKkYHjLnv7Icc88R6UdRSnH9Uj1am7rgsLngF6BDcloUgsOEoxNtzSZcKb6J7i4rVGoWQaFGZA1PFly6oZTixrUmtDR3wWZzws9Pi6QBEQgNFx4wOtrNuFRSA1OnFayKQVCwH7LyEkV3s1RWtaCxsRM2mxMGgxaJCaGIiBC+ryaTFaUltTCZ3BGVoCADhmTHQ6sVnryqa9tQ39gJi9UBP4MG8TEhiI4UHpS6zXaUlNWg02QDgbtGSXZWHPQiCaA3G9pR29QJs9UBP70GcZFBiIsMErS32BNaLHgAALh5SURBVBwovlSLjp4KqIFGPbIHxsBPZAmqtqUTVY3tboZOg5iwACRFCS+R2OxOFFx1MyilCPDTITs1BgEiNUbq27two7EdZpsdeo0a0SEBSIkSHlztThfyr9eivdsKjuMR6KfDkKRo0UTWxs5uXG9qhclmh06jRnSgP9IiQwUdW6eLQ35lLVpNFjh5HoF6LbLioxDmLzxBNnebcbWpBV02O3RqFSL9jRgUGS7IcHE8LlTXoqXbAgfnQqBOh8yYSEQGCP+mWi0WXGppQZfNBq1KhXA/P2RFRAgyOJ5Hfm0dmrrNsHMu+Gt1yIwMR4zIONJhs6K0pQlddjvULItwgwFDwqMEl294SlHQUIf67m7YXE4EaLTICItAfKDw973LYUdJawM6HTaoGRYhOj1yQqMF80EopShsrkdtdxesnBP+ai0GBochOVD4u9jttKO0vR4dDhtUDINgjR5DQmKgFtjxQSnFxbYG1Jg7YHE5YVRrkBoQhtTAMEGGlXOgtKMWnQ6ru5aOxoAhQbFQM32PP5RSXOpqQLW5DRaXA34qLZKNoUgLEJ7o7ZwTl7qq0eVy/24DVHoMCkiAlhUef66bGlBtaYbFZYee1SLOEIo0/2jh7zvvwrXuSphcZlBKYVQZkGpMgpYVHn+qLQ24aWmExWWDjtUgSheKVGO88HeRulBproDZZQJHefip/JBgSIaOFf7dNtgaUGuthZWzQsNoEKoJxQC/AYIMnnKos16F2dUFnrqgY/0QrU+FTkHyrFLdc0T+C6KU4uK5G9j+6Smc2HPxrtB+QLABcx8YgzkrRyLcx6WJ3ozyizXYtu4sjuwtvauKqjFAh7lLhuO+JcMQFSN/b/7lS/XYsvkCDuwvvavCqsGgwdx5eZi/IA+xcfKzt69VNGHLzgLsPlAKh+P2/iI6nRpzZ2Zj4X25SIyX/9Rzo6YVm/YWYvvhEtjstzO0GhXum5SJJTNzkJIQLptR1dCOjYeLsOVoCcy2258Q1SoWc8ZkYPnUHAxKlP9kVdvSifUnirH+xEWYrLeX41axDGYPHYgVE3IwJClKduSsvsOE9Wcu4otTReiw3P4UyjIEM4akYdXYHAxNipXNaDJ1Y31+CT49W4RW8+2lqhlCMG3gADw4MhejkoUHdW9qNVuw7mIJPskvQuMdXXgJgMkDkrF6aC4mJCfKTlbtsFmxrqwUHxUXoNZ0dwPKCfGJeDgnD1MSk2X3Mupy2LHhWgn+XXoBlaaOu46PjorHo4OHYUZCqmAdEG/qdtqxpbIUH105h2tdd/dQGRoWi0fSh2NW3CBoWHlRRqvLgZ21Jfi04gwud90dkc0KisGDA0ZhVsxgUQdDTHbOiQONxVhffQrlXXf3aUkzRmNZwljMjM6BTsTBEJOTd+FkSxG21R1FedeNu47HGyKxIGYSJkcMh0GgZoo3uXgXCjsKsb9pPy6b7u7JFKmNxPTI6RgbOhaGfizS9lXpniPyNcvcbcNvnlmD/BNXb+V19CWGca+ffu/nC7DgId8y0W02B3738w04deSydwYFHnt6GlY8Ms6nAd3hcOGN3+/AwQOlXhk8T7H64XF49LGJPjFcLg7v/HUftu8pvpWf0pc8uQcrF4/A97412adlI47n8e4nR/DFznzBHIbejEXTs/Hst6aJbie+U5RSvLfpJD7cfkYS474xGfjFYzOh9mHZiFKKD/edw7vbToAI5K30ZkzLScWvH57j85LOmuMFeH3bEQAQ3J7rYYxLT8Tbq+eJblnuS+vzS/Dy9v2g1DtjeGIs/nL/AtEqrH1pW9klPL9rDzieCjN6ciiyoyPxj6WLEGrwbUDfW3EN/7N7B+ycS7BihIcxMDQMHy1Yikijb7ldR2tv4HsHNsPici+v9MXxMJIDgvHJrBWI9/dtKeRsUzW+c3Qdupz2W/k8d8qTrxRrCMRHU+7HgADfHgouttfiydOfot1hEWb05HuEa414f8xqDAwU3zVyp66Z6vGj/A/RYjcJ5pUQEFBQBKn98NbQRzE4ULhgWF+qsTTixYt/Q5O9TYThvj4/Vo+Xsr6DrEDfCoY125vx9pW30WBruJXXIXQdWkaL76d+H1mBwrWFvgm654h8jTJ32/DjVe+h6mqDT8XKHv7hTKx6StqWNZvNgZ9872NcKav1ibH84XF4/AfSMsqdTg4//cnnKCyo9qlq6vwFefifH82W5Iy4OB4v/moTTp277lPNixlTBuNnz86V5IzwPMUr7+7EvhPCHVjvFCHAhOGp+PWz8yU9wVJK8ZuP9mPz0Ys+MUZkJOCPP1wsOYfl7U1H8fFB4Tbtd4ohBNnJ0Xjv6aWSnZG/7juFv+477RMjPToMHz+5AgaJuR8fnryA1/celcxgCUFiaDA++/ZKyc7IZ4XFeHHvAcEJry9GdIA/1q++H2F+0sLXmy6V4X/37QJ8YIQZDNi04kFEG6XlZeyuvIInD20BqHAi5Z2MQI0Om+evRmKAtCjosfoKfPvIl+Ah7LDdyTCoNFg342GkB0qLHl5orcLjJz+Bi+ckX4eGUeGj8Y8hM8j77ioAKO+swdPn/w4755TEYECgYlj8adjjyAlOksSoMtfjucJ3YOMckvrJMCAghOCXmd/F0BDv5RwAoNHWiF+V/woWl0USg/T85+nUpzE0WNrOp/+GfJm/723fVSBKKX7zzBqfnRAA+PgPe3FoW4Ek2zde3uSzEwIA6z4+gR0bzkuyfeetXT47IQCwbWsB1n15VpLtXz84hFNnfXNCAGDfoTJ89NkJSbYfrDvpkxMCuHeQHDt3DX9ZI22y/GT3eZ+cEA/jXHk1frfmgCT7L48V+eSEAO5IQ/GNery0Zo8k+60XynxyQjyMK/UteHbNDknflX3l13xyQgB3VdGq1nY8/dlWSd/5Yzeq8NJe932V+tXiKEV9lwmPr98s2lDQo3N1NXhu/25QHxktFgse3rweNpfTq/3FlgZ8//BWUEolTaweRqfDhgd3fwmTo+8uur11tbMZ3z22HhzlJRen4yiFxeXAwwc/Q7vde/fXGnM7njy9VrIT4mHYeRe+c+oTNFnvXu66U822Lvwo/0PJTgjgduxcPIf/zf8X6ixtXu07nd34xcW/SHZCPAyeUrxW9gGqLQ1e7a2cFW9eflOyEwLg1k6cv13/GyrNlZJe803XPUdEgUrPVyL/xFXZZdv/9eYur63ur12qx/ED5bIZ//7bQTidLlGbmpo27NldLLt/zMf/Pgqr1SFq09xiwsZt+bILIH+67gxMJpuoTafJijVbpDlFd4oC+HJXPlraxVufW2wO/GPLKXkMCmw5WoKapg5RO4fThXe3S3O87hRPKfYWXMGV2mZRO47n8c6u47IZxy9XorDq7h1SvUUpxZv7jskquMVRivPVtThRUeXV9s2j8q6DoxQljU3Yf+26V9u3T5+UzbjW3obtV+9e879Tfyw4CZ76XiScoxQ13Z1Yf63Eq+3fyk7ByXOyGC02M9Ze8/7w9K9rJ2HzwUHwiKcUXU4bPr3h/Te8rvoETE6r7wxQ2Hgn1lYd82q7q/4E2h0mnzvr0h6HZ131Pq+2x1uOo8XRIovBUx5b67b69Lpvqu45Igq07dOTsrejAkBzfScuHLsizlh3VhGjq8OCk4fFIwRbt+QrasZntTpx6GCZqM223UWKeqG4XBx2HxAfaHccLlHWIZYC2w6KRzp2n74Em0PcsRMTwxBsPFwsarO/6Bq6LN6fboXEMgTrjoszjl26geYus6iNN8bnJ4tEbc5W1qCqrUO288kyBJ+eLRS1uVjfgNLGJtkMhhB8fEGccb2tFadrb8oub09A8K/CfFGbuu4u7L95TVHJ9n+XXhB9mGizW7C9qkw2gwfFJ1cuiP7GzE47NlcXyC5vz1OKLyvPw8EJ/8bsnBOba8747IR4xFEeO2rPw+wS/o1xlMP2umOye8fw4HGkOR+dTuEHG0op9jful3V+D6OwoxBtDu/RnW+67jkiMtXeYsLxPSWCCZ1SxLAMtq0RftLqNllxYGexMgZDsOUL4ScMu92JndsLlTXjI8AmkSUgl4vD5h0FihigwIatwgMtz1Os210gu98K4B4EN+wpFO3I+/n+AkUOFc9TbDxSDLtIlOqzIwXKmsvxFFvPlKLbKjzQrj1RKLtXkIexp/gK2rqFQ/VrzxYpZhy5cgN1HcKh+k8Li5U1fqMU52pqca317l0jtxglyhgUFGUtzShqFA7Vr71cpKhrNgVQaerAyfpqQZt114sUOToA0GTrxqG6a4LHt9UUw87Ld9QBoMtpw9464QebQ40lMLnEo6PeZOOd2FMvHN0521qKdof3JSIx8ZTH/oYzgsfLTeVosjcpYgDAkeYjis/x39Y9R0SmLhffVNStFwB4jkfJ+UrB49cvN8Dp9L5+LcrgKcqLbwpO4FWVLV6XVbyJUqCiogkOe98DUH1jJzq7rMoYXs7T1mlGY4tJEcN9HgsaW/oegCw2B27UtSpydgDAbHWgqr69z2M8T1FS1SD76dsju5PD1boWweP5lXWCO32kysXzKK25e0umR+eqahQzKICiGuEloDPVNYonVwDIrxVmnK69qZjBEIIL9XcX4vPobGON4s+cJQzONwkzLrTUKOoOCwAqwuB8891bZD0qaKtW1K3Xw8hvE3aoijoqoVLQJA9wJ5Ve7BBe9ivrqgBL5G1Z9oiCoqyrQvD4VdNVMAqnYAra51bf/2u654jIlLlLmUfukdViF8wTMXf3D4PnKWwCzkZ3PzEAwCRwru5u+csMkhnmfmQInMukYLnk7nP1fR1mu0Oxo+NRlwCD43nYvOQNSWaIRF26HcocXI86bcIMk135Z8IQgi6b8O+g0678N8IQgi6R99puU+aouxlAl8h7lZJoKkVdTmFGl9MG3zNQbpcnV0RI3S6rYqeNB0WXU/h+mF1WSE9LFpbY0oyFsyiKgnnU7RLPa/u/oHuOiExJaXAnRe5OvH1/GeWWKu/zXALvt7+uAwBUqr6/TkJ/l8cQuI5+ZKiFrkNBrs5d5xK6jn5kCNUsYQhRtPRzG0Pk/cottHU3Q/g72h8MSqkoQ6giqW8Q8fOI3UdfJFZ4rF+uw8t51IRVGA9xL/WKMVT9wAAgWAXWw0A/UDSiDHktHL6q8/w3dc8RkSmxEu++KCDYT9AR8bXJnZD0Bg3UAr1RgoP7pwQwyzIwGvuu+RDsY0NAIXlKwPeloABDvwxOABAc0Pf7DfTTKUrq7a1QAYZOrYJOZh+bOxXi3zeDEIJAg/zeLL0VKsAAgBCD/N4svRXmJ8wQOyZVFECYSGGzCD8/xd8tF+URKnI/ogz+ipc0OJ4iVCdyHXp/RbkugHspQIwRqvPzqQld3yII1QqPSyEafyhK1IJ7GStEIzy+Bmn8Ze8i9IgBg2CNcKG5AHUAeJlJvR4REARr5FfR/qboniMiU4OHJiEoVNkkzrAMpi7IEzyeMjAKkdFBihgsy2DK7CGCx+PiQpCUFKbod82yDCZOHiS4uycs1B+Zg2IUTeIsQzBqxAAY9H0X0TIatBiZnagoOZJhCHIGxSIkqO/PVaViMWVoqjIGAdLiwxEb3vcARQjB7GEDFTEIgLiwQAyMFS4+NS9vkCIGAIT5G5CTIFx8akF2huLIi79Wi9HJwpUwFw7OUOwk6FQqTByQJMxIz1AcpFcxDGYMSBU8Pn/AINm7QDyioJiTNFDw+LyEDMW5LhylmJcg3IF4dkyW7B0z/2HwmBMrXDV0elR2vzCmR2ULHp8QPtTnLbV3igePieHC4/vw4OGKzg+4P/ORISMVn+e/rXuOiEyp1CzmPTBG0eTKczzuWzVa8DjDMFi4cqSidUSO4zFv2QjB44QQLFk2QlFeAsfxWLRomKjN0gXDFO2a4XiKJfPEqwgum52nKDmS5ymWzxYeOABg+bRcZQwK3D89T/QzXTkxR3GS56pJuaKMFaOzFTEYQrBqbK7oUtKKYcIOsBSxhGDl8CHQikSIlg7JlN3PxcNYkjUY/lrhBogLB2ZAr5IfpWIJwby0gQjVC0cS7ksaiACN8HuQwpgen4pYo3AFyykxqYjQy4+yMoRgdESCaKn3kWFJSPALke0cMiAYHBgtWl01IzAO6f4xIApc0HhDKPKChTvSxhsiMSQwVVGUKkQTiOEhmYLHQ7WhyAnMUZSw6sf69YtD89/WPUdEgWavGCV7GZFhGQwdl4aYBPH+DTPm58rOf2BYgkFZcUhJF+/fMHVaJvQCkQavDIYgMSkMmVnirawnjk1HYIBellPFMASREQEYMTRZ1G50XjIiQoyynsIJIQgK0GPiCOGnVgAYmh6HxKhgWQ4oAeCn02DmSOGnVgDIiI9EZkKkbIZaxWL+SOGnVgBIjgjBqJR42VERQoClI8V7XUQF+mPqwAGylwN4UKz04syEGPSYnzFQNoOjFA/m5YjaGDUaLB+cpYjxcHauqI2WVeGhQXmyI0gcpXhksLijzjIMHk4bLnty5SnFw+nikx4hBKsHjJJ1fsD9mT8o4fUrEsbJ3gFEACxP8N6Ha37sJNlRKgKC+TETwHpZppoeOV125IUBg8kRk6Fm5DUL/CbpniOiQKGRAXjqpUU+v45hCPz8dfj+K4u92gYEGvCjFxf4zCAMgVarxv++vNCrrV6vwQs/m+87gxCoVCxe+NkCrz9qtZrFiz+eJ4Phvl8v/nie14mZZRi88j/SetLcxujhvPqDuV4ThAkheO0790HFMr4vZxHglSfmQKf1PnC8snomdGqVzwwK4JUHZyJAQg7Iy8umw0+rkTX5vbh4GsL8vS9N/mLOFAQZ9LIm8ednTER8SJBXuxcmT0Skv1EW4wfjRmNguHC7e4+eHTUOCYFBshiP5w1DXpT3/ilP5YxCelCYzwwC4IGBORgfk+jV9lsDRyA7NFoWY37CYMyKE3eiAWB50jCMDEv2+XvFgGByZDrmxwsvmXg0OyYP48MzfHaqGBAMDUnB4jjvzs6Y0CGYFD7M58gLAwbp/olYGDvZq+3ggMGYFDZJFiNWH4t50b6Pqd9E3XNEFGruqtF49NnZku0ZloFfgB6/+dfjiPYSDfFo2n05eOq5OQCk5WgxDIFer8Gv/7waCcnSmlSNnzAQzz0/F4QQyQyNVoXf/G4F0r1EXDwaMTQZLz0/DyxDJEVGGMbt6Lz280UYMlg84uJRzqA4/PrZ+VCrGEkDIUMIGJbBqz+Yi+FDvA/kAJCRFIm3f7AIGrVKktNDiJvz0rdmYVKetK6cqdFh+MuTi6HXqCVFLTwWP10+FXOGD5LESAgNwj+eWAqjTuNTZOTZ+8Zj2Shpyy5Rgf7418NLEWTQ+TT5PTVxFB4dI62hV6ifAR+vWIpwo59PjMeGD8UzY4WXRnsrUKfDxwuXIS4g0CfG/YOH4KfjJkmyNaq1+HjWcgwIDPGJMX9ABl4bM0PSb0qnUuOfk1ZgUFCET47CtNg0vD56niSGhlHhTyNXIjs4TrKjQACMDE/Gm8OXeY0iAO5k09eyV2FoSIrkSZyAIDMw4f9j77zDo7iuNv7e2dUW9d4QAgkJVJGE6L333rEB44LjxLEdx45jx73HNU7c4m4Dtum9914EKkioIpBQ713bZ+73xyJC0czOzsj+XPZ9HqUwZ+9vZ3Z37plzzz0HbyUug1LEDiKGMHi8z10YKLC8csdrQBDmGoyX4v4AtcJ2lJkQgmU9l9mV58GAQaAmEE/0eQIaRdcknf9/6yfrvltcXIxXX30Vhw8fRlVVFYKDg7F06VI8++yzUKnELQP8GrrvdujYrov4+p3dqKloAqNg7ih2xigYcByH5GG98fBLs0U7ITfr9NE8fPHv/agobYBCwdxRcZVREHAsRUL/nvjz09MQ2lOcE3KzzqdcxScfH0DJtfpOGR3/FhsXgsf+Mgm9IgLsZmRkleA//z2EK8W1UCgIWPbWr2DHv0X1DsJf/jge0b2D7GZkX67Ee18fQt7V6hvt5W9hXP+3yB5+ePzesUiMFufo3Kz8khq8vfowMq9UCDLCgn3wxJLRGBQrztG5WVer6vHG+sO4cLlMkBHq54kn5ozEqHj72o8DQEl9E17bfAinL5cIMoK93PHEtBGY1Le33YzK5la8vOsQjhUUgVxvL38L43pb+0B3Vzw+bhhmJQgvLXWmuvZ2vHjgMA5ctvaO4WP4uTjjsWFDsDjR9pP37Woy6PHiscPYdTkftBMGc/3cvDVa/HnAYKxIEM4H6kwtJiNeOXcIWwpzwHXSAK+D4a5S44/xg/BQ30F2Rx90FhPeSD+EjVczYeasRRNvpnS0nHdVqnBf1EA8Ejvc7lwcI2vG+zkHsaE4Fabr1VZvZVj/v1bhhLvCBuKR6LF2bzG2cCw+L9yPDaVnYGBNd3Rf7rgqTowSs0MG4eHeUwS31HYmlnJYe20vtpQfhZ413Lg2N4uAQEkUGB8wCA/0mg2Nwr58H45y2FO1B7srd1vri/AwGMJgsPdg3N3jbmgVXbMr7aeSPfP3T+aI7N27F+vWrcOSJUsQERGBS5cuYeXKlVi2bBneffddUWP8mhwRAOA4DumnLmP7mtO4dL4I+nYTFEoGbp7OGDszCVOXDLaZE2JLlFJcvFCE7etTkHG+CHqdCQxD4OquxehJcZg+bwC697QdarbFuJRVhq1bLuDC+avQ6UwghMDNTYORo6Mwc2Y/hIX7y2bk5ldiy650nEm5gnadEYQAri4ajBgSiVnTktC7l/1Ozu3Kv1qNTfszcPx8Idp1RlBYd9gM6xeOeZOSEBMhLpojpMKyWmw8kolDFwrQpjOC4yhctCoMieuJheMS0TciWHbhoqKqBqw/eRH70grQ0m4ARymc1U4Y1CcUi0clon9EiGxGSV0T1p/NxM70XDTrDLBwHFzUKvQPC8GSYYkYEhEqe/tyWWMz1qdmYdvFXDTodLBwHJydVOjXPQh3D0rEiIiespJPAaCqtQ1rL2ZiU1YO6nU6mFkWzioV+gYGYHlyIsb0Cpddf6S2vR3rcrKwLicLte3tMLEsnJ2cEOPnj+V9kzApPEKwNokY1et12HA5Cz8WZKKyvRUm1gKt0glRXn5YHpOEqT37QK2Qt9W72aTHxqtZ+LEwHRW6ZhhZCzQKJ/Ry98Gy3smYERoDjVJeDkKb2YDtpZlYV3weZbomGFkzNAonhLp4Y3HYAEwLiYeLUnqiLgDoLEbsr8rAppKzKNPVwciZoWacEKT1wtzuQzAlOAkuSnnRAyNrwvHaNOysOIFSXTWMnAkqxgn+ai9MDhqG8QGD4OYkbzu5mTPjQuMFHKo+hDJ9GYycEU7ECV4qL4zyG4URviPg5uQmi/Fz6RfhiHSmd955B59++imuXuUve3uzfm2OyO2ilIqaHK7klOPgpvOorWiC0WCCq7sWYdHBmDh/IDx9hb90YhnFl6txYHs6qiubYNSb4OyqQY9e/pg0Jxk+frYZAGxySq/VY9/ui6isaIRBZ4Kzixrde/hg8rRE+Afy76e3h1FR2YTd+zJRXtEInd4EZ60K3YK9MG1yXwQFenYJo6q2BbsOZaGkogHtOhO0Gid0C/TE1LFxCA327hJGTUMrdhzPRlFFPdp1JmjUSgT5umP6iFiEh9h2JMV87vUt7dh+JhuXy+rQqjdCq3ZCgJcbZgyOQe8Q29EyMYzGNj22nc9GTlkN2vRGaFRK+Lm7Ykb/aMSF2nbyxDCa9QZsy8hBVlk1Wg1GqJQK+Lm5YHrfKCR2D7L5ejGMNqMR2y7lIb2sAs0GI1QKBXxdnDEtpg8GhHbrEobObMb2vFyklJej2WiAE8PAR+uMKb17Y2j3UJtRDTEMg8WMnVfzcbq8BE1GPRTEypjUMxKjutvO2xDDMLIW7C3Jx/GKIjQZDWAIgZdai/HdIzC2W4RNJ08Mw8yxOFiRj+NVl9Fkslad9VRpMTIwEuOD+9iMnIhhWDgWJ2vzcbwmD00mHThK4aHSYrBvJMYFxEJlw8kTw2AphwsNeThVl4VmUxtYysHdyRmJXpEY5Zdoc/lG7P39l6hfrCPy3HPPYe/evbhwofMGaUajEcabyiC3tLSge/fuv1pHREiUUhzbmYEtXx1DwcUSKJQMOJaCUgqGIaCw5kiMmJaI+Q+OQa+YbpIYpw/nYtOqU8jJKIFCwYDj/sfo0NCx0ViwYgT6xNu/PAEA504XYuOPZ5CRdg2MgoByuIVBKcXgYb2x8K7BiEsIlcS4kFaMdZtScD61yHp96P9+pIRYt94O7B+GxfMHoV+i/UsgAJCRXYoftp3HmbSr1h8/tYbeCSFgiHULcXJ8KBbP7I8h/fi3/gkpq7AC3+9OxbHU/zUOszKsIXeWo0js3Q13TUnGqGThHTx8yi2pxqoDF3Aw7TI4ag1P386I6xmIu8f1w8Tk3pJudJcr6/DN4QvYk5EPluNAYF0uILBuO2c5DtHd/LF0ZBKmJ0dLiqQU1Tbg61Op2J6RCzPL3ljSsTKuL635+2DZkCTM7SdtG29JYxO+PpeKzZk5MFostzGs5xHm7YV7BiRhQWKcpChHRWsrvky9gPXZl6Azm28sERFYcx0slEN3Dw+sSEzCXfF9oZawVbhG14YvMy/gx9yLaDWbOmUEubhhRVw/LI9NglZClKPeoMNXuefxfUE6mk0GKAhzo55Hx//217pgeZ9krIhKhquT/VGOZpMeqwrP4YerF9Bg0nXK8FY5467w/lgeMQgeKvuXJ9otRqwtPo31JedQZ2ztlOHhpMW80IG4q+cweKnsrxdlYE3YXn4SW8tPoNbYdAuDAQMOHFwUGkwNHoJ5IaPgoxZ+UPs16hfpiBQWFiI5ORnvvvsuVq5c2anNSy+9hJdffvmOf/+tOSIWM4sPn9uA/etTQBgCKlDPQaFgAAI8+f7dGD1DuMbFzWJZDp+/uwfbfjgLhiGCNTwUCgYcpfjLC7MwaY5wPZCbRSnF158dwdrVp20yOvJXHv7LJMxewF/XpDPGmrVn8NV3J2wzrh9fee8o3LVwkF0T7PqdqfjPN0c6zZHojHHP/MF4YLHtLYA3a+uRTLz17SGQ604NL+P6ZLh4Uj88tmSUXZP4npQ8vPDdXgDiGHOGxeGZJePsKi1/KKsQf1u1CxylggxCrA0RpyT1wWtLJkJlxwR78nIxHvlxB8wsK8yANSdgbFQ43l0wFVqV+An2fEkZHly/FQazRbDQV8fVHxoWig/nzoCrWvxW94tVlVixZTPaTCZRjOTgYHwxczY8NOKXEXLqa7B89wY0GvQ2C5YREMT6+uPbKfPgqxU/wRY212PZwbWo0bfZZDAg6OXhg9XjFyHQWfwyQml7I+47uQZluiabvWQYQhDi7Imvhy9Fdxcv0YwaQzMePv8tittqbW7NZUDgp3HHxwPuRZir+Hy7RlMrns38HIVt5Ta3GDNg4OHkgjcT/oBervY/bP6S9ZM6Ik8//TTeeustQZvc3FxERf0vc7+8vByjRo3C6NGj8eWXX/K+7vcQEaGU4r0nf8ThLRfEFxG7fpd67pMVGDbZdoIdpRSfvLkTO9al2P3+nnxtLsaLdHi++vQw1q45bTfjkScmY+ZccUV41qw9gy+/PW4348H7RuGuheJ2RGzcnYYPvjpsN2P5vMF48K7homx3HL+E177cbzdj4cQkPLF0jCjbA6kF+PuXu+wanxBgxuBYvLhM3K6L4zlFeOSrbaBUfBUHQoAJfSPxzjJxW6tTikpx37ebwFEq+jfCEIJhET3wyd2zRDlVF8srcfeaDbBwnOgGagwh6BcSjG+XzBXlVOXW1mL+uh9hZFnRDAUhiPHzx9oFC6F1su1UXW1qwKytq6Ezm0VXTVUQgnBPb2yedTfcRBRRK2trxszd36LZZLCL0c3FA9um3gMvte2oRY2+FfOPfIk6o21H52aGr9oVm8auhJ/GdqG2JpMOy09/gipDs+jKrArCwE2pwZphf0KQ1rbD024x4LG0D1CqqxVdH4QBA61ChY+SH0eIs7zcu1+S7HFE7I5lPvHEE8jNzRX8Cw//X9i6oqICY8aMwdChQ/H5558Ljq1Wq+Hu7n7L329Nu384g0Ob7XBCAOB6eP2fj65GVWm9TfMjuzMlOSEA8P4LW1BcyN/avUOnTxRIckIA4KP39yI/t8KmXVrGNUlOCAB8/vUxZGTytxLvUHZBpSQnBABWbTqL06lXbNoVltbija8PSGKs35+O/WfzbNqV1jbh2W/22F2qilJg+5lsbDl1yaZtTXMb/vrdTruckA7G/ouXsfp4mk3bJp0Bf1qz7frSm3gGRylOXi7GZ8fO2bTVmcxYuX6rXU5IByOtrALvH7P9vTdaLLhv62aY7HBCAGthsuzaGrx67KhtW47DvXs32eWEdDCuNDXgmRP7bNpSSvHAkY12OSEdjPL2Zjx+coco+7+kbESdsd1uRp2xDY+d2yDK/oWLG+xyQqwMDq0WA/6aukZU75kP8tejVFdjV5EyDhz0rAnPZn0hu3T9r1V2OyJ+fn6IiooS/OvYnlteXo7Ro0cjOTkZ33zzDZgu6sb5axXHcdjw2WFJ1Vgptb5+1/fCN0FKKdZ/c0JyghMhBDtFODEbfjgjeQcFwzDYst42Y92mFMkMhYLB+k3nbdqt33lBcnVRhiH4YZttxoaDGZILRRNCsGZ35zlVN2vj8Uy7HYQbDACrDlyweaPddDYLZov0Ju/fHU0FywnfaLekZ0NnNktq804BrDqbDpPFImi3PTsXTXqDJAZHKX5Iu4h2k0nQbl9hIarb7ZtYb2ZszMlGo14vaHe0tAjXWpokM3ZfLUBlW6ug3bmaUuQ11UpisJTiaMVVXG1pELS71FiB1PpSSZMwSylS60uR3VgpaFfcVotTdQUSGRwKWquQ3lgsaFdraMKx2gxJ1Vg5cKjQ1+FCQ67dr/0t6CfzDDqckNDQULz77ruora1FVVUVqqqqfirkL14XTxeiurQBUu/kHEux54czMBnNvDb5WWUovlwtuXMky3LYvy0N7W0GXpviq7W4lFkquXcMy3I4eigHTY3tvDaVVc04d/6qLMaZlEJU17Tw2jQ0tePomQLJPVc4jiIjuwzFZfxRqjadEbtP5khmUEqRX1yD3Kv8vxuDyYLNJ7OkMwCU1DQh9XIZr42ZZbH21EVJk3eHalvacSK3iPc4x1GsOZsuq+9Ri96I/dmFvMcppVh1Pl1WozyD2YIdl4SjVN9lpMtq+MdyHDbmZAszstNkd9P9Me+i4PFVeamiCozxSUEIvi9IF34PVy/IZDD48aqws76pNEU2Y/21s4I2uyrPyPpeMWCwteykjBF+vfrJHJEDBw6gsLAQhw4dQkhICIKCgm78/V61Z+1ZMHYkBXam9lYDTu/nD6Pv3ZLK2wVXrEwmC47vE2DszJDN4DiKg/uy+BkHMmXXrCCEYM/+TH7GsRxZkx5gLfa18xD/eew/mw+zhZXN2HaM//M4klGIdoPwE7oYxuYT/OdxKq8YDW3CT+i2xBCCDaf5GeeLy1DRJPyELoax9jz/Z36pqhqFdQ2y+twSAD+k8U/gVxsakF5VKctpowDWXORnVLa14kRZsaxuuhyl+D4ng/d4k1GPvaXSoggdYinF2ssXYeGJhOktZmwvzZLJ4LCtNBN6S+cPaBaOxdbSC7IZh6qy0WzSdXqcUoqdFadldVDmwOFCYx5qDU2Sx/i16idzRFasWGENFXfy93tV2ZXqOyqu2iuFgkHltTp+RnHdHdVQ7ZVSwaCylD+cWl7WIJuhUBBUljfxMyr4j4kVgTWywsuoagKR6eywHEVFNT+jrLpRdoEulqMorWrkPV5a22TXrhc+xrUafkZJXbOsJ3zAOvEV1wowGppkjd/BuFYvwGjk/6zEigIoaeIf51pzk2wGAJS3tvDeL0tam2Q5Ux2qN+hh4JnAy9qbZTlTHWq3mNBk7NyJrTG0wsTJc9QBwMSxqDV07sQ2m/XQsfIcdcDakK+Kx0kwcmY0m9tkMwCgymA7D/C3pt930sbPLH270baRDRGGCI6j6wIGAOh0/OO0dwGDUgq9AEOvN0lelukQy1HodPw3IL3B1CWOcZvA9dALLKPZxdALfOZG+TdZK4N/HJ3RJNsR6RiH95jJ3DUME/81Fzpmjwxm/jwUnblrGBylMLKdc7qKAQBt5s4/k65ktFt4GDz/3rWMrrknWsfqnKFnu5DRhWP9WuRwRH5GObvKb1BEKRUcx6ULGADg7MK/rc/VVV45ZsC6bKJ15h9Hq1XJXppRKAicnflrPjhrpHWevVnWsvT85+GsEV9zQkiuAtfKWa2SnHd0C0MrcK3Uqi55OnYRuB7OKqeuYQj0snK2o86IkIS21jqL2HYrRgpCeEu4uzh1zfcKAG/hsZ+Fofw5GPLvV7bGcrazt4wg4zfSyM4eORyRn1GhkYGyc0RYC4duAh11Q8P9ZOdvWCwcQnrwlxnvHuoLhULmkgbLISSUv1x6aIhwKXUxohQI6ca/9z+0m7fkBM8OMYSgexA/o0eQFyxyl7EYgp4CpeV7BHjxrsHbwwgP4u+D1NPPS7aToGAIwgP4GWG+4gtT8YkhBOF+/NcqzEc+gwDo6e3Jz/DqGkaohwfv7reeHp6yEiM75O/sAg1PTZQQFw8oZSR4dsjdSQ1PVeeTq7/WDWo7m9B1Jo1CyVtLxEPlDFeZfWYAa8JqkNaz02NqhQreKvnlJggIgrTy+pH9GuVwRH5GTVkyWHaOiJunMwaP529LPXlef9n5GxqtE0ZOjONnTE+8o2OuvVIoFBg3ib+V/OSJ8bITSSkFpkzkZ0wcGS15626HWI5ixnh+xriBfaBRybvRshzFrNH8hezGJPSCm1beExnLUcwbwX8ew6J6wM/d/lLXtzMWDuE/j+Qe3dDd20PWBMtRisUD+RkxAf6IDvCDnI+dArg7OYH3eE9PLwzsFiI72rY0IZH3mL+zK8aG9pK1a4YhBMti+IsXeqg1mNYjShZDQQiW9E7kzZPSKJwwp0eC7B0tc0IToVF0HolSEAZzuw+QeR4MJgbFw82JvzjbjOBhYGR8exkwGOQT85ss925LDkfkZ1T8oF7WaIbE7yrDEEy9ayicBCa2yOhgREQHS07CZBQMJs5OhkZgSaN7Dx8k9Oshq8bHuImxcHfn/1H7+7lj6KBeMhgEI4ZGwteHv8S0p7szxg2LklVHZEDfHggRiIi4aFWYNiJWOoMQxIYHoncofxRM5aTEvBHxkq8VIUBYoDcSwoN5bRQMg8XDEmRNrkGebhjah78PECEEywaLb2PQmbyctRgX3UvQZnn/JMgJhLmonDAtpo8wIyFRVgTJSaHAvOgYQZtlsUmyds0AwKIofucTAJb16Sd7Z87dkcKf6ZLw/rJ3tCwJF25NMbf7AFnnwVIOC0IHCdpMCRI+bkscOMzsJq5S829NDkfkZxQhBAseGitpPZ8QQOmkxLS7h9i0XXjfCMH+NUIMhgAzFg+0zbhriORkUkopZi+wzVg0f6CMOiIUC+fZ7mmzcEay5MgLx1EsmSWCMSEJIESS/8lRimXTbDPmj0yAkpH2PEYpsGJif5tF8OYOjoPaSSnZGVkxpr9NZ2lWYjTcNGrpjKH9bDammxbTB74uzpKekAmApf0TbZZfn9CrF7q5uUtmLI6Lh7uNfjMjQ3oiwtNbEoMhBHMiYuDvLFwaPdmvGxJ8giQzJnaPRKibp6BdlEcABvv1lMRQEIIhfmHo4xEgaNfdxQej/aMlRSwUhEGsRwj6ego37PRRe2BsQLIkBgMGoc4BSPbqbfdrfwtyOCI/syYuGIhpdw+16zWEWP/j2U/vgV+w7fXnkRPjMH+FnZ719QZlT725AN172m7wNHBIBO55YJR9jOt6/OlpiOhtu0V837juePgPYyUxHv3jeMTF2O4m3Cc8AE89NFES48G7hmNgYk+bdj2DvfHSHyZLyiddPn0AxgyItGkX7OOOt1ZOA8j174sdWjgqAdMHCz99A4Cvmws+vH8miJ0MAmBm/2gsGc6/nNEhd60G/102GwqG2OWMMIRgfHQvPDDCttOmcVLiy8Vz4KRQ2M0YGhaKR0fYfhhwUijw7Zy50Do52TXBMoSgX3AwnhkxUpTtN1Pmw12lsYuhIATR3n54dfh4m7aEEHw+ei58NC52M8LcvPHO0Gmi7P81cD4CtfY5bgpCEKj1wPsD54myf6nvfIS6+Nq1DKQgDLxULniv392iqlU/2ns+wl2D7XJGFISBq1KL1+MfBNMFOTm/Rv0+z/r/UYQQ/PHluZi1YgQAa1daISkUDJROSrzw33sxcIztyaJD9z02AYsfGCmaoVAwePqfCwRzQ27X3SuG494HR18fwzaDYQie+Md0TJ6WKJqxYM4A/PmhcSIZ1uOP/nE85s4S30V4+vh4PPXQRDCE2FxC6Tj+0NIRWDZXfCh24uAovPLQFCgY8Yz7Zg3CnxaIdyhH9e2Fdx+cASXDiGbcPa4f/rZwtOiWAIMiQ/HxA7OhViptMjom+bmD4/DyoomiGUmhwfhqxTxoncQzpsT1xnsLp4penooJ8MfqpQvgplbZnPw6GGMjw/HJ/Jk2Iy4d6uXtjXULFsFLq7XJ6Dg6rHsovpk9F2qRnYq7u3lgw6wl8Hd2telUket//QKC8cP0RXAWuSsmwNkNmyYtRYiLh2hGtJc/1k28C+48Saq3y1vtjB9H3YswV1+IiR0SEIS7+eLHUSvgrXYWxXBz0uCLQQ+gt1vgjfcpJAYEwVpPfD34QfhpxCWiahVqvJ3wJ0S79xTN8Fa5419JjyBQKz9B/9cqu7vv/pyyp3vfr1FnD2Zj6zfHcfH05Ru7aShHwSgIOJaDk0qJ8fMGYPZ9I9G9l3DokU+ppwuxZc1pXDh9GQxDQGBtY29tZ89BoVRgzNS+mLN0KMJFRCk608W0Ymxal4KzpwpACAEhNzOs/z16XAzmLhqE3lHSKuteyinD+s0XcPJ0AQDcwuj4Co8c3gcLZvdHbIy0dtp5hVVYt+MCDp8puO3943oTNorh/SOwcEYykmK7S2IUlNRi7d5U7D+bBwvLgWEYcBwHhhBQWJd7hib0xOJJyRgUx59PIaSrlfX4/nAadp3NhdnCQsEw4Ci9EcVgOYpBUaG4a2wSRsSHCw/Go2u1jVhzPB1bU7JhNFs6ZSSHd8PSkUkYFx8hqfdReWMzVp1Jx8bUS9CZzFDewiBgOQ6J3YOwdHASpsb3lsSoam3D6vPpWJuehVaj8X8MACBWRlygP5YNSMLM2ChJxelq29ux6mIGvs+8iCaD4RYGIQQWjkMfX1+sSEzC3OgY0Y7OzWow6LAqOx2rszNQb9DdygCBhXLo5emNFbH9sDAqnndbsJCaTQasyU/HqvxUVOvboCTMjUqizHVGqKsnVkQl467IRGiU9m9jbjMbsa4oFauvpKBS32JlXP99M8TKCNK6Y1mvgVgc3l/S9l8Da8bm0vNYV3wGZfqGThl+ajcs7DEEC0IHCiao8snEWbC38iy2lJ1Amb4GCsLcuE8RQsBSDp5OrpjZbThmdRsOdyd5ieC/RNkzfzsckV+AyotqcWjLBdRVNsGoN8PZTYPw6GCMnZ0MF4GETgAw6k1QKBVQOgnfvKrKGnFwZzpqKpph0Jvg7KpGz4gAjJueADcP4ScKk9EMQohgkiwA1FQ348CeTFRVNkGnM8HFRY3uoT6YMKUvPL2Ef2gmk8XKsHEe9fVt2HvwEsorGqHTGeHsrEZINy9MGh8HH2/h9W6TyVocSmXjPBqb27HnaA5KyhvQrjPCWatCcIAnpoyJhb9A8isAmM3WbqtqG4zmVj12n8pFUXkd2vUmaNROCPJ1x9ThMQj2E86at1hYsJxtRqvOgF0pebhcXos2vQkalRKBXm6YNigaof7CS3zmDoaTQnCCbzeYsCstD7llNWg1GKF2UiLAwxXTk6MEt+oCgIXlYGZZaJyUggydyYzdWfm4VF6FZr0RaqUCfm4umN43Cn0ChZcRxTKMFgv25BYgrawCLQYjnBQK+Lk6Y2p0H8QFCT8EsBwHo4WF1gbDxLLYV3gZ58rK0GI0wknBwFvrjKmRkUgMDBJ8LctxMLIstEphhpljcbC4EKcrStBkNEBJGHhptJgc1hsDArsJvpajFAaLGVqlk833cqT8Co5XFqHJaABDCDzVGkwMicSQwB5dwuAoxcnqKzhWdRlNZmtVVk8nLUYFRmJ4QC/ByAylFHrWDI3Cyabd+fqrOFaTi2azDhyl8HDSYohvJIb59xFcwvkfQym4nEIpxaXmqzhZl4lmcztYysFN6Ywkr0gM8YmDkrHf6fy1yOGI/IbFshzOH8rG9m+PI+vsFVjM1vLIWhc1RsxIwozlwxERL+1pvUMcxyH15GXs+PEsMs4UwmyyMjRaFYZNiMH0JUPQp2+I5A6/gPUHmnG+CNs2nsf501dgvu4kqNVKDBnZBzMXDEBsQnfZjMysUmzbmoYzZwthNP7PERk0MByzZvdDUqLwjVMMI6egElt2p+P4mQIYrjOcnBQYkNATc6cloX9iT9nF2fKKqrHxQDoOnS24Ua3VScmgX3R3LJiYhCGJYbJLyV8uq8WGIxex93w+2q9XWVUqGCREBGPx2ESMTOglu5T81ap6rD+ViZ3nc9F6vVqskmEQ1yMQS0YmYlzfCDgp5d2cr9U1Yt25TGxLy0GTztq8UcEQRAf74+4hiZgU3xtqJ3lbqsuamrEuNQsbMy6hQWedKBlCEBXgi7sHJGJabB+bCa22VNnairWZmViXdQl17e2gsOZF9PLxxrLEJMyKjhIs3iZGNe1tWJubhR+yL6K6vQ30+nmEeXhhWVwi5vaJhbta3tbweoMO6woy8UN+OsrbW0BxvU6KmyeWRSVhfkQ8PNX2Rx1uVpNRj03FF/H9lVSUtjWBgoKAINjZHUt69cOCsET4aORFHVrNBuwoy8Da4hSUtNWDgzXi5K9xx/we/TE3NBl+GuEHld+THI7Ib1RHt6bii9e2oqG6BYyCuaMmiULBgGU5RPbtjr+8swThEpYoTh/MwX/f3IHaymZBRlifQDz28hz06Wu/03P+TCE+emcPKssab4zXGSO0py8ee2Ya4pPsX6LIyLiGDz7Yh5LSBigU5I66Jx3/1i3YC489NhH9+4fZzcjOr8A7H+/D1Wt1UDDkjuJoHf8W4OeOx1aOw/BBEXYzCopr8PoX+5BfXNMpo2P5yM/LFY/ePQoThkTZzSiqrMfL3x5A1tVKQYa3uzMenTccM4by17HhU2ldE1788QBSC8s6ZxACjlJ4umjw8NShWCgisfV2VTa14oVN+3G6sAQKQu7YrtnBcNOo8dDYQbhneD+7ndDatnY8v/MAjl4usi4P3sEAOGrd4rty6AD8YfhAu3cANer1eO7AQey7fLlTBoF1451WqcS9ycn4y9AhdjuhLUYjXjh+ENsLrV2EO2MAgEqhxPK4RDw1eITdS0Y6swkvpRzE5sJssJSConOGkmGwODIBzw4Yy1tcjU9G1oI3Mw5gXVEGLBzbaUI4AYGCEMzp2RfPJ02Es53LOWaOxb9zD2BtcQrMnPVB43YOAwIQYEpwPJ6Nnw5Xp99fddTb5XBEfoNa//EBfPPPnaJsGYbASa3ES988iMRh4reDbf/+DD59Y4f1/9j4VjAMgULJ4Ll/342Bo8RPfvt2ZOD913cAlNrcNksIAcMQPPPqXIwcLz5R98iRXLzx5nZwHGz2kunIN3jqb1MxSaDA2u06lVKI59/aBo6loutFPP6H8ZgzVXydjPOXruHJ97bCbGFFb2P+85KRWDrd9s6RDmUUluORf2+B0WQRXWV25fRBeGiW+J1fOaXV+MMnm9BuMIlmLB3dD0/OHinaUSisrsO9X2xEs94gmjF/QBxenD1edLTqWkMTlq/agNq2dtE1KabH9cFbsyZDKdJRqGhpwV3rN6CipUU0Y1yvXvhoxnSoRDoKNbp23LVtHa42NYr67hIAw0J64Mups0XnfTQZ9bh73zrkNtaIYjAg6OcfjG8nLOAt1X67Ws1G3H/8R2TUl4vqesuAINozAN+Nult0BEZvMeHR8z8gpe6qqF1vDAjCXH3xxZAV8P2dR0fsmb8du2Z+Bdrzw2nRTghgTXg0GSx4acXnuJpTLuo1R3ddxKev77A6ICJ+cRxHYTGzePXR75F7sUQU48zxfLz/2nZQzrYTAlidCJbl8Mbzm5F+vkgUIzW1GK+/sR0sK67Tc0cS6tvv7MKZM4WiGFm55Xj+n9tgsXB2Fa3612cHcehEnijb/OJqPPnuVpjMFrtqqXz043HsOJolyraosh6P/HsLDEbxTggAfLHzHH44mCbKtqy+GX/4ZBPa9OKdEABYczQNXx5IEWVb3dyG+7/cZJcTAgAbz1/CB/tPirJtaNdhxeqNdjkhALDrUj5e33tE1HexxWDA8o2b7HJCAODwlSt4et9+UQyd2YQVOzaiSKQTAlhvB6fLS/DI/l1gRbQSMFgsuO/gRuSJdEIAa2fbtNoKPHR4K8wiuvGaORYPn9qAjAZxTkgHI6+5GitPruVtJnizWMrh6bSNOF9XJHrrPQeK4vZ6/PHc6i5t6Pdbl8MR+YWrqb4Nnzy30e7XUUphNlnwryd/sGnb3mbABy9strviK6XWnJX3ntlo8yZoMlrwzsvb7AN0cDiKd17aarN0PctyePOfOyQXKHvrrZ03Elp53wuleP2D3ZIKrREAb320FzqBLrcdjNc+2wczy0o6l7e/OYTm1s7brt+s11cfgtFkkVQB9F/rj6Om0Xbb839uPIJ2g0kS46Ndp3GtptGm3bt7jqNRp5fUN+irYxeQW1Fj0+6Do6dR3dpmd3VOCuCH1Eykltp+IPj43DmUNDVJYmzLzcXRItvO+hcZF5DXUGc3g6MUB4oLsftKgU3bNfnpSK+tkMQ4WVmMTYWXbNpuLs7E6Zpiu79XLKXIqC/H94WpNm0PVGTjSHWeaEfnfwwOl1uqserqKbte93uWwxH5hWv/urOSe8dwHEVhVhkuZ5YK2h3ZkQGjwSyp4ivlKMqL65B1QfgmePxQDtpaDZImVkop6mpbcf60cMTizJlCNDS0i3oyvJMBtLQacOKk8I02LbMEFVVNkiZWCsBgMOPg8VxBu5yrVbhcUiu5qqyFZbHzeLagzdWKeqRfLpfV9G/LCeHIS3l9M07mFElmKBiCDacyBW3q23TYl1Ugi7H27EVBm1aDEVszcySXCFcwBN+fF2YYzGaszcySziAEq9MzBG3MLItVlzIkl55nCMF3WemCNhyl+C7X9iTPJwLg29xUwd8wpRTfFaTI6km0qvC8zevwQ9E5yb1jOFCsK0qBRUR0xyGHI/KLFsty2PHtCUnl2jukUDDYuZo//EwpxbY1p2X9qBUKBjt/OCtos3VdiuT+N4A1J2XbhvOCNlu2psrancIwBFu2XBC02bwrTVajPEKAjTuEb7Sb9mfIYlAKbNifLujIbDyaKYvBUYoNRy/CbOG/0W46nSVrRxLLUWw+cwl6k5nXZvOFS7KaI7Icxfb0XLToDbw227JyYRI4TzGMfbmXUdvWzmuzq6AAbSbpoXyWUhwvLkZpczOvzaHiK6jX6yQzOEpxoaocBQ11vDanKotR2tYsqYowYHXW8xprkVFXyWuT0VCOgpZaWYyy9iacqeF/eLrcUo2MxhK7oyE3q97UjmPV+ZJf/3uSwxH5BaswqxR1lU2yxmBZDse386/nlxXVoqyoTt7NnOVw6mA2WJ6bdV1NCwpyK2Q5VBxHkXr2CnTtxk6Pt7UZkJ5+TXIUoYORk1OB+vrOlxzMZhanUq7IiiJQChSX1qOM53OllOLguXxZDACoqmtBwTX+JYe9KXmyGY2temRe4Z8wdqflyWr8BgDtRhNSCvgjersy5DNMFhYn8ov5GZfkTyYcpThccIWfkZ8vu1svIQT7Ll/mZ1wpkM1QEIJdhfzXY3dxPpQyy5QrCYNdxfy5VHtKc7uEsaeUPzJ5oDJbVkdgwBpB2l8pHJl0yCqHI/ILVjPPhGivDDoTTMbOcx+aGvif0uwRx1K0tXb+VNnU2DUMAGhu6vyJronn36WIb6yWNr3sSe8Go7nza2IwWmAyd004t7Gl8/PgOIoWHX8EwB41tPJf96Y223kqohht/Ix6gWNiRQhBQzv/OLXXa3jIkYJh0NDOfz1q29plf7cUhNyoadIpQ9cmm0EIQYOBn1Gnb4dFRiddAKCgaDAIfObG9ju2AtsrlnKoN/IzGoztsqLEgNX5rDN0zT38ty6HI/ILFmuR94O+dazOJ7efhSExx6VzRudjdSmDZ6zba5HIkYXvPETsShDN4DkPCnG7luQwAMiOuPxcDGKT0TWfiUVgHKFjYkVtjGP+Oc5DphMCWM9D6L2ynJwFk5sZ/A4/2wXnAeBG3RGHhOVwRH7BslXeXawUCgYa586L+Li6d13hHVee9+vq2pWMzsdyc+tCBs/7dXOVV2Hy1rE6ZzhrVHZ3z+VluHT+fhUMA62N8vBi5e7Mf01cNfKqfophuGvlfyYcpXDX8n9/PDTyv1scpXDX8L9XL6383zq1wfDWaGU/5QMQrLTqqbLd4M+WGBB4CDTLc1dpJCeRdkhBhBluXVCQjADwUv32esj8FHI4Ir9gRcSHQKWRVyaaUTCIHRjOmzQY2ssfLjInccIQRMQEQ6Xu/L0GBnvBy0e4D4wYdQv1hodn531xvLxcEBzsKXsS9/V1RUBA58V3tBoVIsL8ZCVgAoC7qwahIZ132mQYgoTe3WSXhNeondC7hz/v8eQ+3WUlqwKAk4JBbBh/o8RBvUNlMxhCkBjGXyF4SIR8BgD0F2KEh8qeXDlKMSA0hJ8R2l12/gZLKQaG8DMGdZPX+gGwRkMGB/OPMyiwu+SdPzcYlMOgQAGGXw/ZkReWUgz046/YPMAnrEuiO/19esoe4/cghyPyC5azqwYTFgyEQkZ/D47lMPPekbzHVWonTFk4EIxCxi4NjmLWMv5Kmwolg5nz+8vaNQMCzFk0iNcJIIRgzuxk6eNfH2P27GTB6z1verKk7cEdYhiCWZMToRLodbJgUpKspFsFQzBjVBycBSISi8YmylrWUDAEkwdFwdOV/0l+0YgE2Ywx8b0Q4MnvxC4aLJNBCAb3CkUPX/4mgEv69ZU1uTKEoG9wAGKC+B3DhfHiq/p2JgKgl7c3BnTjd6gW9ImDk8wma0GubhgVyt8OYWZYNJwldN29Wd5qLSaF8leEntAtCp4qeREkZ6UTZobG8R4f6h+BQI28at5KRoFZ3cVXUv49y+GI/MI1fflwWfkPnr5uGDyB/wcHANMWDZQ18Tm7qjFysvCNdMos+/t63CyVSolxU/oK2kyaFA+ljIZpDEMwZbIwY9yIKDhrpS85UEoxY5IwY1RyBDzdpN9oWY5i7njhXi2DY3og0Ft6CWqWo1gwWpiRGBaM8EBvyUF0lqNYNEKYERXkh77dAyVHE1hKcffQREGbUG9PDAsPlczgKMWygcITUoCrKyZGREiOvFAAK/olCf7GPDQazO4dLZnBgGBFfJLgdXB2UmFRZF/pDEKwNCpJsFy9SqHA3b2SJX8eCkKwMCwJWgGHSUEYLA4bBCLx26sgDKZ3S4C7TIfp9yKHI/ILV8+oYIydKz2acP+zM6GwMTkHhnhj+uJBkpc17n18Eu+yTIe8fV2xYOkQaQAASx8YCRcbORqurhosXzZMMmPx4sHw8hJe09WonbBy6QhJ4xMAc6f1Q6C/h6CdUqnAn5fwR7EEGQSYNjIWYd18BO0YhuAvC6QxGEIwtl+E4LKM9b0QPDFrpN0VezsYQ/qEYmCk7eWEv06W9nkoGILE0CCM7GO74eFfRg8DQ+yflhSEIDrAD5OjI23aPjpkMJQK+7MfFIQg3MsLs2Ns92P6U/IgaJROdk/iCkIQ5OaGxTHCTjQArIwbCDeVWhLDR+OM5VH9bNoujxwAH7Wz3Q4PQwjcnDS4r88gm7bze/RHkNbD7m28DAg0CiXuj5T2vfw9yuGI/Ar02NtL0HdwhN3OyNInpmD8/IGibB96ZjoGjIqy2xmZd+8ITF8yWJTtvX8ci9ET7O/cOnVOPyxaLs7BuOuuIZg61f7OrePGxeDeFeJuHHOnJWHhTPuWgQiAYQMj8PB9Y0TZTx8Vh/vniLuuNxgE6B8TiqfvnyDKfkL/3nhsvn03S4YQxIYF4tX7p4iyHx4Thn/MH2t9f3Ywegf74r37ZoiKog0ID8Hr8yeC2MFQEIJQH098fM9sKEUsffbtFoj35061NmIUCVEQgkB3N3xx1xyoRHSV7ePnh09nzoSCYURP4gpC4OPsjO/mz4Ozk+0lkZ4eXvhq6hw42cnwUGuwZsYCeKht55MFu7jj2/ELoFEoRTsKCkLgrFRh9cRF8NXaTvD00bjg25F3w1mpsouhZpT4euQSBDsLPwwAgLuTFp8NvgduSo1oZ4QBgZJR4MOBSxHqIvww4ND/5Oi++yuRyWjBB0/9iCObL0ChYHiXawhDwBCCP7w0FzNETqwdYi0sPnl9B3avSwGjYMDxMBiGgMIaCZl/3wi7llxYlsOXHx7Eph/OglEQcDxbYhmGgFKKu+8fiWUrR9nFoJTi229PYPWa0zda2PMxOI5i0cJBWLlytF0JopRSfL8pBV+uOQEQ8DI6Wt7PnpKIR1eOEzXp3az1+9LwwZqjAL2zVfvtjKkjYvDMAxPhZOfy1LaTl/DGmkPWbZE8d4MOxrjkSLxy32Ro7Nx1syctHy98b+2fA9p5N4EOxvDonnjn3mlwVtu3BHY45wqeWrsbBnPnrdpvZgwM745/L50uuFumM526eg2PbtyJNqMJxAYjoVsg/rtoFrxdOk+w5tP5sjL8Ydt2NBsMYAjQ2VdLQQhYShHt54ev5s5BgKt9yeAXqytx/+4tqNPrwBDS6Xerg9HL0xvfTp+H7u62J++blddYixUH1qNK12aT0d3VA99NWIhwj86TuPlU1FqPe4//iLL2Jl4GAwIOFAFaN3w9Ygn6ePLn6nSmcl0j/nRuNYra6m68Xz6Gt8oFHw9ailhP/lyd34vsmb8djsivTEW55di56hQObkiByXhr6Wtvf3dMv2cEJi0eDG9/6der5EoNdq09h/2bU2G4rUGbh7cLpi8ehMkLBsA3wL4b080qL6nHri2p2L01Dbr2Wxmu7hrMmNsfU+f0Q0CQp2RGVVUTduzMwI4d6Whru7Uiq4uLGtOmJWDGjCR0C+ZPVLSlmrpW7Nh3EVv3ZqC55dZCT1qNE6ZN6ItZkxPQI0T601F9Uzu2HcnCpgPpqG++tQiTWqXE9JGxmDs+Eb26+0pmNLbqsO1kNtYdybijmZ2TUoFpg6Mxf3RfRPcIkMxobjdge0oOfjiejoqGlluOKRUMpvTrg0UjEhEXGiA5n6jVYMT2tFx8fzod1+qbbjmmYAgmxkViyZBE9OsRLJnRbjJhR1YeVqeko7Cu4ZZjDCEY36cX7h6QiEE9QiQz9GYzdubl47v0dOTW1t5yjAAYGx6OZUmJGNajh+RcCYPFgj1XCvBtVhou1lTdwRjRvSfuiU/C6NAwKBhpwXMTy2JfSQG+y03FhZo7G/8NCQzFPdHJGN89AkqJDAvH4VBFAVZdPo9ztdfuOJ7sE4LlkQMwoVuUYO6JkFjK4WTNZfxYdA5nagvvcEDjPLvhrrDBmBAUA7VCXrLub0UOR+R3oPZWAwqzStHWrINCwcDDxxW9E0J580EopShIv4aasgYYdEY4u2nQIyoYIb34JxZ9uxGXs8vR2qwDwzBw93JBn/gQKJ34GVdyylFZ0gCDzgRnVzVCwv3RI5KfYTSYkZ9TjpZmAwgB3D206BPbDSqBJ+6rl6tQXtIAfbsRWhc1uoV6IzySP1/BZLIgL68SLS16UFC4u2sR1ScIaoG8luKiWpSU1EOvM0GrdUJQsBciIvknSIuFRe7lSjS16EE5Cnc3LfpEBEArsHOlpKwexdfq0a4zQqNxQmCAB6IiA/kZLIfcq1VobNGB5SjcndWICguAi0CdjbKqRlwpqUObzgiNygkBvm6IjQziZbAch5ziajS26mC2cHBzViO6hz/cnPkjBxV1zSgoqUWbzgiVkxL+Xq7oGxHMG2HiOIrcsmrUtehgtrBw06rRJ8QPni78iX3Vja3IKa1Bq84AlVIJP08XJIYH806QlFLkVtSgtrUdBrMF7lo1egf6wceVPzpR29KGS6XVaNEb4KRQwNfNBUlhwXDimbwopcivrkN1axv0ZjPcNWpE+PnA340/OlHfpsPF8kq06I1QKhj4uDgjObQbVAJRrPy6OlS0tEBvtsBdrUYvH28EufEnGjfq9Ugvr0SzwQAFw8Bbq8WA7t2gFlgeKmyoR2lrM3RmM9xUaoR7eSHEjf9Bo8VoQGplBRoNBjCEwEujxcDgbtAKLA9dbW5ASWsT2i0muDqpEObujVA3T177NpMR56vL0Wi0Ovheai0GBHSDq4r/+17S1ojitga0mY1wUarRw9ULPd34oyx6ixnna0rRYNSBgsJTpUWyXwjcBWqMVOiaUNxWhzaLAVqFCt2cvRDu5sdr/3uVwxFx6IbaW/U4vCEF2748gvKrd/YeiRscgZn3jcaQKQm8DoYt6duNOLIjHdtXncK1y9V3HI9KDMXMZcMwbFI8VGpphbSMBjOO7r+EbWvP4Ur+nf1NIqKCMHvxYIycEAu1xNorJqMFx4/lYevmC8jLrbjjeM8wP8yZ1x9jx8dCK3HnjNnM4sSZy9i8PRVZOXc+IXYP8ca8mcmYODZG0MEQkoXlcCr1CjbuSUdq9p19WroFeGD+lH6YOioGbi7SasiwHIczWcVYfzADZy4V33E80NsNC8cnYsbwOMk7gDiO4mx+CdYdy8Dx7Kt3LBv5ebhg0chEzBkSCx93aYWjKKU4f6UMP5zOwOFLV+4I7Xu7arF4SALmD4qHv4e0WjiUUqSXVuD7lIvYm31np2APrQZLBvTFwuR4BHtKu89RSpFZWY016RnYkZN/R/VTN7UaixPisCSpL0I9PSUxACC7thprMi9ic34OTOytlUldnJywKDYed8UloJeXfUssNyu/oRar8zKwoSALBvbWyqRapRLzI+OxLCoRfbylT/5XW+qx5nIa1l/JgM5ya2RZxSgwNyweS3snI8ZLegTw9y6HI+IQACDjZD5eWfEZ9O3X+4p08kl35GkE9fTFa2sfQXBP+37cOWnFeOnBb9DarAch6DTHoCMXwy/IE699fT9CI+z7cV/OrcBzj65BU0M7CEM6bZ5HiDWnxMvHBa9/uAy9+gTZxSguqsXTT65FXV0rb15Jx/m5u2vx2j8XIibWvnXgsopG/O259aioarbJcHZW4fXn56BfAn/Rpc5UVdeCx1/bhGsVDfyM6/+hVinx+l9nYkiS7V0jN6uuqQ2P/WsLCkpqb+RDdCZCACeFAq/+YQrG9uevC9GZGtv0eOyzbcgsqrTBIFAwBC/dPQHTB9reNXKzWvVGPPbdDqRcKRVkdCx9PDdnLBYOsb1r5GbpTGb8dcMuHC0ossmgoHhq4kisGGLfVnejxYK/7dqH3XkFggzF9RyKx4YPwcND+WvydCYTy+LZIwewMTebN0+ig8FSiof6DcDfho6wa9mI5Ti8fPYQvstNF8W4JzoJLw4eZ9eyEUcp3rl4FJ/lnLHBYMBSDvPD++L1gVNk11/5PcrhiDiEc/uz8Mq9n4FSKqrrLaNg4Oymwb92/U1wueZmZZwpxPP3fQWO40TVIWEUDNQaJ7y39k8IixLnKORkluLvD30Li5kVySBwUirx9mcrEBXPX2XyZhVersbjj6yC0WgRx2AIGIbBP99djMQkcY7CtdJ6/OmJNdDrTKIKcDGEgBDgjRfnYvCAXqIYFTXNWPnsD2hp1YtiEAIQELzyl2kYO6SPKEZtYxtWvPoD6pvbRTMoBV64fxJmDBe3Y6qhVYfl761FZUOLXcXKnl4wBotHJYqybdEbsPzj9bha02BXI7jHpw7H/WMGiLLVmcxY/s0G5FTW2MV4aORA/GWcuF1iRosFK9ZvRmpZhV2M5cmJeH7caFHOiJllsXLXVhy/VmxXj5cF0bF4a9wkUQyW4/DnI9uxp7hANIMAmNKzNz4eO0uUw0Mpxd/P7sLGokyRBCtjVHAvfD5ygeQclt+r7Jm/HVf2N6iinDK8vvILUI4T5YQA1gqsulYD/rHwP2hrtt3RtKyoFi//4VuwIp2QDobRYMKz936BJhGdhasrm/DcI2tEOyFWBoXZbMFzj65BTVWzTfvGhjb8/ckfRTshgHXJgOM4PP/MepSXNdi0b2014Iln14t2QgDrkxtHKZ5/fRuuFNXatNcZTHjs1Y2inRDA6iBQSvHSf3Yjp/DO5a7bZTJb8Mh7m0U7IR0MAHjtm/1Izbtzmeh2WVgOj/53q91OCAC8teEITmQXiXhPFH/5bqfdTggA/Gv3Sey9mC+K8eTG3XY7IQDw3+Mp2JQurn38P/YcwIWycrsZq1IzsCo1Q5TtKyeO2O2EAMCG3Gx8fOGcKNt3Uk9gtx1OCGAN8O4uLsDbF46Lsv8k+7RdTkgH41jFFbycut+u1zlknxyOyG9QP7y/B6yFfysmnziWQ11FE/b9cNqm7YbPj8Bksoh2dP7HoGhuaMeuH87YtN38/Rno9Ua7q75yHEV7uwFbfzxr03brllS0tuglMUwmC9avtc3Ysfci6urb7J5YKbVuqV6zzva12ns8B+XVTfYzYHV6vtpgm3HwfAGulNdJK6lOgU83n7JpdiK7CJeuVUtjEOA/207aLMGfcqUUKVdK7Z68O/Sv3Sdtfl+yyqtxOP+qZMb7B04KdgQGgMt19diWkye5k/K/Tpy+sdWZT2Utzfg+66LkbrcfnT+HVpNR0KZO347Ps1IkEoAvss6jTt8uaNNiMuCjbNvfv85EAfxwOQ3l7bYfbBySJocj8htTfVUTTu/J4K0BYkuUUmz/6ig4gTbcrc06HN6WLpnBcRQ71pyGxczfhtugN2Hv1jTeOiM2GSzFni13bj++WWYzix1b0ySXt2dZiv37stDWahCw4bBlR5rk/jQsR3HsZD4aGvlvtJRSbNidLrmUOsdRnEkvQmWN8I123cF0WWXOL16uwJXyOkG7H49KZ1AKXK6ow6VrVYJ235/MkNUor7yhBecKSwRtfkiRx6hv1+FIwVVhRnqmrGZ8bSYT9uQXCNr8eClTVmsGE2vBlrwcQZt1BVmSnSnAWqZ/fUGWoM3WokswscJOl5AIIfixMF3y6x0SlsMR+Y1p7/enO6+yZIdqyhqQfiyP9/jBzRfAWuR1pmxuaMeZg/zh5yN7swSdCDHStRtx7AA/4+SJfLTcVvvDXlnMLA7s478Jnk8rQk1dqywGpcCu/fwh5Yt55bhW0SDrY2cYgq0H+Rn5JTXIKaqW/IQPWOt4bDp8kff4tZpGpBRIj1R0MNYd52dUN7fhaM5V2c34fjzNz2jU6bEzK18WgyEEa85l8B5vN5mwMfOS7GZ836XyT64mlsX3ly7K+jwA4NuL6byOOMtx+C4nDZyMby8FxXc5aWB5Hp4opfi24Lzk8QGrI/395bQ7dgo51DVyOCK/MWWfK5TVwA6wdsvNTrnCz0i9s2iQFEZOWjE/42KJrK7DAKBQMMjJ4H9yzc4qg0Ip9ydAcOlSGe/RrJxy2efBUYpMAUZmXrmsp2/AGhVJz+HP4bhYUC65F1GHWI4iNY//PC5evXPLtBTGhcsC16qkUvbEynIUF67yM3Iqa+7YPmuvOEqRXsJ/PQpq66G3SH/C72BcquJ/r8VNjWg2Ci+r2BIFUNTUyLs8U6VrQ7XOdr6YLQmN02I2oLi1Ue7zGZpNBlxrs50T5pD9+lkcEaPRiMTERBBCkJGR8XMgf7dqEQjhixUhRDBhtbWpXfJSww1RoL2Ff0mjvdUgq+swAHAch7Y2fkZbm8HuHJfbRSlFq0BUpa3NKHnJ5GYJRW5a242ywuc3GAJLTK06I5gu2DXQohNg6I2Sl2VuH0fKMXvUbuSP1rUauoZhYlmYeJyNFpkOwi1j8bzfrmQ08zFM/N8Huxk8Y7XYyFHpCoZD8vSzOCJPPfUUgoODfw7U715Si5LdLicn/sJjTnb2GelURPi9KpQK2ZMrIQRKgYqVyi5gAMLXQ6lkJHWfvYMhcK3s7S0jmdEFO/35qpR2MORGK6wM/tuaEN8eCW3l7CoGAN4aGU5duJWU73p15XZVfkbXXSu+Wh9deh6OeiI/iX5yR2TPnj3Yv38/3n333Z8a5RAAn0APu7v03i6O5eDpx19C2svXDYzM5QZKAU8f/kqVXj4udjWh60yEEHh681fc9PKSVo3zZikUjOA4Xp4uspfKGIbAx5v/Wnl7OvOuj4sVIQQ+nvzn4e3uLCvnoUO+AgwfN/uaw/HJW2CcrmJ4CZSK97GzyR2f3DVqXkfEt4sYKoUCrqrOqwT7Osv/fQC4UQK+U4ama84DAHx4xvJSacF0SVwS8NV0zTVx6Fb9pI5IdXU1Vq5cidWrV8PZ2fYXzmg0oqWl5ZY/h+zTqFn9ZS83cJRi+Ix+vMdHTkuQvGPmBoPlMHJaAu/xURPiZC/NsCyHURPj+BljoruEMXpsNO/x0SP6yHZEOI5i3Ch+xqiBkSAyb7SUUkwczs8YkRguGGkQI0KAyYOjeI8Pie4BrUpewzBCCKYN4D+P/uHd4CHQN0eMGEIwvR//ecR3C0CAQL8ZMVIwBDP68p9HpK8Pwrw8ZX3qCoZgWnRv3qhgiLs74v0DZC2XKQjB+LBevH1uvDRaDA0KlbX7R0EIhgaF8jo7GqUTxoVEymIwIOjrHYRuLtIbfTrEr5/MEaGUYsWKFXjooYfQv39/Ua9588034eHhceOve/fuP9Xb+81qyJQEuAs8PdsSo2CQPDoGQT34O7kmj+gNvyDpP0iGIYjp1wM9e/M3qotNDEVomK/kBElCCHpG+CNaoLpqRGQAoqL5m7PZhgCBQR7ol8xfIj0k2Av9k3rIiu54eTpj6OAI3uN+3q4YOTBCVsKqi1aFcUP5y7B7uGoxeUi0LIZKqcDUofxl2J3VKsweGiuLwRBgzlB+51OlVGLRkL6yJldKKRYO5i/1rmAY3D0oQRaD5SiWDOBnEEKwvH+S5PE7GEuTEgVt7umbJGu5jKUUy/vaYMT0k7X7h6UUK2KSBW2W9U6WxeBAcU8fcfOYQ/bLbkfk6aefBiFE8C8vLw8ffvghWltb8cwzz4ge+5lnnkFzc/ONv9JS25UYHbpVTiolpq8YKXni41gOM+8bJWjDMAxmLh8uOb+C4yhmLhcuYU0IwewlgyWnJVBKMXvxYJvvcc68AZIjFgTA7LkDbF7ruTOTJTMYQjB7ehKUNqIR8ycnSV46YRiCWeP7Qm0jGjF/bIJkhoIhmDYsFq42GvktHC6PMSGpt+DSDAAsGBQvafwOxojoMAR7CZesnt8vTrIjomAIkkO7IcLfR9Budmw01EqlpKiIghBE+/uhb5BwO4fpkX3grlZLYjCEINTDA0NDQgXtxodGwE/rIul6MYTAX+uCcaHCbRCGBvREqKunJAYB4K7SYGoof4TKIXmy2xF54oknkJubK/gXHh6Ow4cP48yZM1Cr1VAqlYiIsD7R9e/fH/fcc0+nY6vVari7u9/y55D9WvDwBITFhtidx0EIMGHxEAwYz/9E2aGZy4Yhpl8PMAr7ftiEIRgxOR4jpthuHjZ5Vj/0G9zLbqeKYQj6D43ExBmJNm3HjIvB8BG97c6rYRiC+L7dMXM2/xJWh4YO7IVJ42Ltju4wDEFkRAAWzx1o0zYpJgRzJyXYPWEoGIIewd64d95gm7YxYYG4Z6q4Piu3MwJ93PHHebb7p4QFeuPPM8T1Wbmd4ePmgifmjrRpG+Tljr/PFHa2OxNDCNy1Gjw3Z6xNW28XZ7w8c7wkhrNKhddmTbBp66ZW4+1pk+zelsoQApVSiXem2e4Do1Yq8cHEqXY/dBBYk0T/PXGazdcqGQYfjpkBAvvyugmsSyb/GTPDZkIqQwg+GDoLSmJ/tggBwb+GzIRa0QVJ+g51qp+s6V1JScktOR4VFRWYNGkSNm7ciEGDBiEkxHZDMkfTO+lqqm3FMwv/jZL8StFP4yNmJOGpT+4TvfOmtVmH5+79EoXZ5eIYBBgwMgrPfbwMKrW4XABduxHPP/Y9sjNKRG0ZJoQgvl8PvPLBXdDaePrukNFoxkvPbcKF83e2mu+UwRD06ROEf76zGK5u4vINzGYWL7+1HSdOXxZlzzAEYT188f4bi+DpIS6hj+U4vP7JXuw9niuaERLoiQ9fWAA/b/7k5JvFcRRvrzmETUfE9exQMAT+3m749KkF6OYnbjmPUop/bzuJbw9eEM3wdnPGF4/OR88A8e3n/3vgLD7ab7u0fQfDXavBFw/OQ1Sw+A7V355Owz/3HQOB7TqDCkLgrFbhq2Vz0TeEf9nydq3NyMTz+w4BIhlqpRJfzJ+NQaHimkICwJa8HDx5cK+1iaYNW4YQODEKfDZtJkb1EN/ZeW9xAR4+sv1GnyVbDIYQfDxmJib3FN/Z+VjFFTx0fCPMlLPJILDeT94dPAOzw2w/nDl0q36R3XeLi4sRFhaG9PR0JCYminqNwxGRJ12bAZ+/sBGHNpy73nvm1o+aMASUo3Bx12Len8Zj0aOT7K4VYTSY8dVbu7B3fQosZov1JnUTpoOhdVFj9j3Dcfcj46Gwc7upyWTBNx8dxK6N52E0Waw39ZsZ1zu8qjVOmLFgAFY8PE5w+3FnYi0cvvvmOLZsOg+93nxjzJsZgHXL8bTpSVj50BioRTpTHeI4itVrz2Dd5hS060wghNz5mRDrTpzJ4+Lw8INj4aztfEcDnyil+GHHBazakoLWdgMYhtzhJBJCwDAEE4ZF4fF7x8DNxb7kTUopNhzKwJfbz6KxVd8pg7G29sW4/pH4291j4eVu/+6ILacv4eOdp1HX0s7LoKAYHd8LzywcC39P+3OjdqXn4YPdJ1HZ1AoFQ+5YFmKuf0bDo3ri+bnjbC7JdKb9OZfxzv4TKG1s7pTR8W9DwkPx4vSx6OnjZTfj6JUivHH4GK42NHbOuN7yvn9IN7wycSx6+/HngPHpdGkJXj5xBAX1dTfG64yRFBiEV0aNQ5y/uC7eN+tCdTlePHMQl+qreRgMWMohzicALw8Zj/4B3exmZDVU4sXz+5BRXyHI6O3hhxeSJ2BoYE+7GQ45HBGHblNLQxv2rz2DvWtOobaiESajGRqtGj36BGHGfaMwYkY/qDTydiu0tehxaEsqdv14FtXl1xkaFbqF+WLG0qEYNT0RGjsn1dulazfi0O6L2LnxPCrLrAyV2gnB3b0xff4AjJ3SF84u4qIgfNLrTThyKAfbt6airLQBRqMZKpUSgYGemD4rCRMmxcPVVd6uC6PRjCMn8rFlZxquldTDYLAy/PzcMHNKAqZMiIe7W+c7AMTKZLbg6LnL2LQ3A4UltTAYzHByUsDP2xUzxsVj+ph4eIuMtPDJYmFxLP0K1h/KQN61auivM3zcXTBjRCzmjIqHrwTn4BYGy+FkdhHWHstAVnEVdCYTlAoFvF21mDk4FvOGxSPQS1w0h08cR3GqoBg/nMpAelEF2k0mKBkGXi5azEiOxoLBfRHiLW+3BKUUZ66W4PuUi0gpKkW7yQyGEHg6azA9PgqLB/SV5IDczjhfVo7VqRk4VVyCNpMJDCHw0KgxNaoP7krqi0hf4bwTMYz0qkqszsrAkeKraDNZC7u5qzWYEhGJu+MSEOPnL4sBAJm1lViVm44DJYU3qrK6qdSYEBqB5dFJ6OsXJJuR21iNNZfTsLc073rRMwpXJzVGB/fCsshkJPl265I6Q79X/SIdESlyOCI/jSilNn9gJoMZx7ddQMaxXLQ2tYNhGLh7u2LotET0Hx9vs2y5GIbZZMGpvZlIPZaH1iYdCAHcvFwwaFwsBo+PtRk5EcOwmFmcPZqHlBP5aGmyVot189Ci/7DeGDou2mbkRAyDZTmknCnE6RMFaG7SgXIUbu4a9BsQjpFjoqFSy2dwHMWF1CIcP5mP5hY9LBYObm4aJCWEYsyoaGhsOJJiGJRSpF0qxeFT+Whs1sFiYeHmqkF8VDdMHBltMzojlpFRUI4DZ/JR39wOk5mFu4sacRFBmDIsxmYyq1hGVlEV9qTkoba5DSYzCzetGjE9AzBjcAzcbUSAxDAAIKesGjvO56K6uRUGkwVuWjWiuvlj1sAYeAvUGbGHUVBVhy3p2ahsaoXeZIabRo3IAF/MTY6Fn5twTYuOW7stTmFdPTZfzEZ5cwt0JjNc1CpE+PpgfkIsAt2FnTyxjKLGRmzMzkZJcxPaTWa4qlQI8/LEgtg4hHgIO3liGaWtzdiQl4WrzY1oM5ng4qRCD3dPLIyKQ08PYSdPLMMh8XI4Ig5JVmNNCzZ/sh+7vz2O9hY9GAVzo2aIQsmAtXDwDfbCzJVjMPOBsdBIiEC0NLZjy1fHsGvNKbQ26W5lKBiwLAdPXzfMWD4cs+4dCReReRg3q73VgK3fn8aOtefQ1NB+Y9ybGe6ezpi+aCDmLB0KNwnRAb3OhK0bz2PbxvOor2vrlOHqpsH02f0wd9EgeAkUV+OT0WjGth3p2LwtFdU1LbcwOpYrnJ1VmDY5AQvnD4Cvj/3RAbOZxbb9F7FhVxrKq5o6ZWjUTpg+Lg6LZ/ZHoL/90QELy2H70Sys3ZeOa5UNUFwfl15nUI7CyUmBaSNicffUZHQPsD86wHEUO87m4PtDaSgsr4OCYcBRDpT+bxlHqVBg6sAoLJuQjPAg+6MDlFLsTsvD6uNpyCmtufU8CAGFdYliclIfrBiTjN525JPczNiXfRnfnUrDxdJKK4PSG+fRoYmxkbhvRDLiuonPJ7lZhwuu4KtzqThfUs7LGBsZjgeG9Ee/EGmVsY8XF+PL1As4WVICxfXrw1EKhlgr33CUYlTPnniw/wAMlliu4UxFCT7POI+jpUU3PoObGSylGN6tB1Ym9Meo7uJzVhySJ4cj4pAkFeeU4x/z/oWmulabBcsIQxAWE4LXNjwG7wDxE1N5US3+sfRT1FU1gWNtJKQxBN3C/PD66ofgFyx+YqquaMQ//vAtKksbbCbRMgyBf5An3vhsBYJDxU9M9XWteObxH1FcVGuzgBzDEHh5u+Ctf9+NHmHiJ6bmZh2efn4j8gsqbSbRMgyBu5sWb7+xEJER4tfmW9sNeObNrbiYa23iJsRRMARajQrvPj8PcX3ET0w6gwnPfLgTZzOLbSZtKhgClZMS7z4+C/1jhbd93iyj2YJnv96Dw+mFd+T3dMZQKhi88+AMDI8XPzGZLSxeXHcAO1NzwRAimOyoYKylDN5aOhUTEiJFM1iOw2s7j2BdSqYoBqXA63MnYlYSf32W28VRincOn8BXZ1NtM64ff2nyWCxJ5i9AeLsopfj32TP4z9mzneZh3M5gKcUzI0bigeRk0VEJSim+yLyAN84eE834S/JQPJY8xBH5+Blkz/zt6L7rEACgoqgGT05/W5QTAgCUoyjOLcdTM98VbJB3s2orm/Dkgg9RV9Vs0wkBrE+45cV1eHLBh2iqF9ehs6m+DU/e+yUqyxpF7eThOIraqmY8seIL1FWLq+Tb2qLHEw+vRkmxbSekg9HY2I7H/7gKlRWNohh6vQl//ftaFFyuErWTh+MoWlr1+MuTP+BaSZ0ohtFoxpOvbkJmXjkotd1KhuUodAYTHntxPQquVotiWCwsnnx/G1KyrB2bbZ0Ky1EYTBY89s5mZBaI68bLchz+/sUuHMmwdowWcx4mC4u/fLoN5/L4uzPfLI6jeO7HfdiVZt2RZGvHBctRsCyHJ1ftxJFL/J2sbxalFK9sP4T1KZmiGRyleGbTPuy6mCeKAQBvH7I6IaIY13fJvLj3MNalZ4lm/OfsWfzn7NkbY9hiAMCbJ47jq7Q00YyvslLxxtljdjE+SD2Nf6eK2ynl0M8nhyPiEDiOw4uLP4S+1WBX6XaO5VBxtQbvP/KtTVtKKV79w9doaWy3m1FX1Yy3H1styv7Np9ahvkacM9UhluXQ3KjDq3/9QdQW4Xde34HK8kawIpypDnEsha7dgOefXCeK8a8P96P4Wp1dhdA4jsJgNOPp5zaKKl3/8XfHkHu5ym6G2cLiydc2wWiy3Yb+s02nkZZXald1TkopOI7ir+9tQZuIbrnf7ruAE5lX7eoIbXW8KP766XY0ttp2pNeeysCe9Hy7Cux17CD726pdqGpstWm/NT0HGy5csrsuCAHwzKZ9KK6z7eTuy7uMr8+l2kmw6sU9h5BTVWPT7nhxMf59Vtpk/+bxY7hQXm7T7kJVOV47c1QS44PU0zhWWiTptQ79NHI4Ig4h7UgOSi9XSeq7wrEcTu9KR5WNp/DctGJcziyV1KOGYzmknyzAtctVgnZXC6pw8XyRZEZ+VhkKLgnfBCvKGnDmRIGkSqksS3GtuA7pqcWCdvX1bTh0JEcSg+MoqqqbceZcoaBdS5sBOw5mSSrfzXEUDU06HDmdL2inN5ix4UCGpOq4HKVo0xmx95RwTRSzhcX3h9LsnrwBqzNiMJmx7XS28HvhKL49Iq6myR0MWPNjNpwRrrtCKcVXJy5IqmBKAVBQ/HAuw6btl2cuSK74Sgiw+oIIRuoFyX1dGELwTbrtqMiXmdIZCkLwZaa0z9Ohn0YOR8Qh7PjyiKxuugzDYM93x4UZq07a3GkjyFAw2LXmlKDNrnUpshgKBYMda88KM7aly+obo1Aw2L5R+Ca4a+9FyeMD1nyRzduEb+Z7j2TDwrLSGYRg425hxoGzedAbzZIZALB+f7pgpONIRiGa2vSSx6cUWHskQ7B78an8YlQ1iVsa7EwcpVh/OhNmC//1TrtWgau1DZIcKsC6TLPpQjZ0Jv7rnVNVg4sVVZJ7x7AcxfZLuWjWG3htipsacbKkRHJfF5ZS7CssRHUb//Wubm/D/uJCWYwTZddwrblJ0usd6no5HJHfuRqqm5GyP0tWN12O5bDrm2O8E0Z7ix4ndmXI6nTLsRz2rz8HM89ygMloxoHtabIYLMvh6J5M6No7Xw7gOIpd29JkddNlWQ6nTxagsaGd12b7rgxZDI6jSM+4hqqqZl6bLfsybCdsCDEoRV5hNa5eq+W12Xw4U3LTQsD69q5VNuJSYSWvzaYTWbKaywFATVMbUvL4+1ptOpMlqxEfADTrDDiafZX3+MZU+Qy92Yx9lwr4GRezZTMsLIftl/jzUTZcypbV5bZDm3JyeI9tLBCOYImRghCsyxOf8+LQTyuHI/I7V3VJnV1r63xqa9JB19r5k1JdVRNYi3QHoUNGvRnNDZ0/KTXWt8FktJ2zYEsWC4f6ms6TVtvaDGhvs52zYEuUo6jhcRIsFhb1IhNzbamiqon3WGV1sxw/5IbKq/mdndLqJslNC29WWU0T77Fr1Y2yusPeYNTyM4pqGiU34uuQgiEoq+e/VkW18hlKhkFZAz+juKErzoNBaVMT7/FrTU2yPw+GEJQIRCuuNTfKnrgogGst/AyHfl45HJHfufQ8T/+Sxmrr3BHRt5u6kNH5+9Xruo7BFxExdCVDx3MeenlLGbcyOn+/FpaDRUbk6BaGnv+aGAWWCexj8I+j7wKGgiFoN/Cfh84k/3MnhEBn5B+nXeCYPWoXeK9txq75rbcJLLe1m0yyHVyOUsHzaDebIffby1GKNnPX3fsckieHI/I7l1ZmSfSb5cxTllzrIq+0+62MzoubyS3tLmYsrXMXngcfQyuv1P7NcuF5v0oFAydl1/z0XQSqrWpUXXMuQgxnO/v9dCaWo3AVYLio5X/ulFI4C4zjquma75aLin8cN7X83wgB4CpwzV3VKtlLZQwhgufh4uQE+3vo3slwU3XdPcMheXI4Ir9zBfX0A5G5bgwAHr5u0Lp2/sP2DfIS3dFXSFoXNTy8O+9d4unjCk0XTOJOTgr4BnRefMfFVQM3d3k9YABrMmlgkGenx5RKBfz9uqZ4XzeBInAhQfL6mnSouwCjR7B3lxSOChV4r+FBPrKSh28w/PkZvQJ9ZOdWsBxFTwFGhL98hoXj0MNX4Dx8vbuEEeYt8Hl4ecl0EazRinAvAYanNziZcRcCINxG2XeHfj45HJHfuTz93DF0aiIUMp6QGYZg+r2jeCcdFzcNRs/qJ3vXzOTFg3kdGpVKiYlzkmXvmhk3IxFann4nDEMwfXY/2btmRoyOgocnf0n5WTOSZE3gDEMwsH8Y/P35HZrZkxMlj9/BiOsTjB4h/NVo541LkJV/RAjQK8QH0WH8lWLnjegrK7GXAAj2cUf/3vzlxRcMiZedW+HtqsXw6J68x+f3l89wUaswMZa/iuuCxDjZDJVSgemxUfyM2DjZOSIEwNyYWN7j83rHdomzszAqXuYoDnWVHI6IQ5jxwBhZyaQUwOTlIwRtpi8bLnvXzNS7hwraTFswUPaumWkLBwkzZiXJ3jUzY15/QZspk+LByPhlchzF7Bn9BG0mj4qBSiXcjM8WY97UJEGbcQN7Cy6r2BKlwIIJwk7ZyL7h8HaT0UWYAItGJwo6lwMjQtHN213y5McQgkXDEuCk4I8K9g0JRO8AX8m7jBSEYEH/eGgEmjhG+vmiX0iw5KUTBSGYHR8DNw3/kkaIhwdG9uwpq8bHlMje8HXm/0z9nF0wJby3LMbo7mEIcZPXUdmhrpPDEXEICSOiEBYbIimawDAEI2f3h183b0G7PgmhiEkOk1SvhFEwGDAmGiHhwu3Fe/TyR/KwSDAK+29QjIJBXHJPRMYI91AJCPLEyLHRkqIiCgVBr8gA9E0U7qHi5emCSRPiJUVFFAxBSDcvDBwQLmjn4qzGnEkJkiY+BUPg7+OKUYOFe6ioVUosntRP0gTOMASeblpMGsr/9A1Y812WT0iWQLiei6BRYcYQ4T4tDENw39gBkhYDGGKNIswbLPz0TQjBAyMHSNplRK6/xyWD+tq0fXBIf1kRi2X9E23arEzuL7nGB0cp7k8WdqIB4IG+0s+DpRQrEwZIeq1DP40cjohDIITg5R//DFcvF7scBUbBoEdUMB7713JR9s9+ugLe/u52OTyMgkFgqDf+9sFSUfZP/3MBArt52X0ePn5ueO7dxaLsn3hmOkJ7+trljCgUBO4eznj1nUWiHIxH/zQefXoH2sdgCJyd1fjnawtEXeOHlo5EQkyIXQyGIVCplHj3+flQCTx9d+i+2YMxJKGnXU/hDCFwUijwwd/mwllEEufS8cmYkNzbLqeKIQQMQ/Dvh2fD09V23s/8IfGYM9C+JQFy/T8/uG8m/D06z226WdMTorB8qHCUqXMG8N6iaeju7WnTfmzvXnh4uHDUj0//nDEJvf19bdoNDQ3F0yOEI6R8emXsOCQEBtm0S/QPwqsjxktiPDNoJIZ2E99Q0aGfXg5HxCEAgH+ID97f83f4BnvZnpiIdf2+d1JPvLX9Sd6dLLfL298d7254BIGhPrYTZInVQerZJxDvrHsEbh7iwu9uHs5455uV6NHLX9TERAhBt1AfvL9qJTx9bE8WgHXHy7sfLUNkVBDI9WshyGAI/AI88K//3gM/gbyNm6VWO+GdNxYiPi7k+vsUtmcYAi8vF/zn/bsFk1RvlpOTAm//Yy4GJva8zhCGMAyBu6sGH7+2GOGhtickwBqxePPRGRiV3Ms6hg2GgiFw0arw8TPzBXNDbn9fr907GdMGRYtmaFRKfPzoXCRFdBPFIITg+QXjsWBoX9EMlVKBD++fhaF9eohiAMBTk0fhvuHWCI+tpQcFQ6BgGLy/eBrGx0SIZjw6cggeGTFYNIMhBG/NmIRZ8dGiGSuT++OZESPFMQgBAfDq2HG4O0F8h9+lMYl4dfh4EJEMAPjH4FF40BEN+cWJ0K6oZvUTyZ42wg51jVob27H9yyPY8eURNNW2QKFUgHIcQAgIAVgLh269AjDrwbGYvGwEVBr7d6q0t+ix6/vT2P7dCdRXNUOhZG50sSUMAWvhENDdG7PuGYEpdw+FRkKegUFnwq4NKdj+41lUVzTdyiAELMvBL9ADMxcPxtSFA+DiKs6ZulkmowW7t6dj64bzKC9rgELB3EjO7GB4+7hi5rz+mDk3WdKOG7OZxZ79Wdi09QJKSuo7ZXh6OGPm9ETMmZkMT4EkWD5ZWA57j2Zjw840XLlW+z8Gvf55sBzcXDWYPTEB86YmwZdn55KQOI5i35lcrN+fjpyr1VAwDAAKjlodCZbl4KJVYfaYeCya2A8BPm52MyilOJBagB+PZODilYobO0QovX6tOA5alRNmD4vFkrFJCPHzlMQ4mn0Vq4+l4cKVslsYzHWGykmJWQNisHRkP8GdMkI6XlCE1afTcbrwmtXpIdZlC4Yw4DgOTgoFZiRGY/nQJEQGiHMKb9fpohJ8l5KGo4VFdzIohYIQTIvtg3sH9kNMoPCyKJ9SysrwTVoaDly1diBmYF0aUTBWBgEwJbI37k/uJyoS0pnSqyvxddYF7L5aAAqAAbEyCG7UG5nQMwL3xydjYFCIJIZD9sue+dvhiDjUqVgLi7N7LyL9WC5aG9vBKBh4eLti6PQkxA/tLfj0zHEcTHoz1M4qQTuW5ZB6LBcXjuahtUkHEMDdywWDx8ciYWgkGIGMTY7jYDKYodI42bRLP3sF547no7VJB0oBd09n9B8eieShkYJLGJRSGPVmqDRKQQalFJnp13DqeAFamnTgKIWbuxb9+odh8LBIwR1JlFIYDWao1E6CkShKKS7llOP4yXw0N+vBshxcXTVISgjF8KGRUCr5EyEppTAYzFCplDbPN7ewCodP5aOxWQeLhYWbqwZ9o0Mwekik4FIMpRRGkwVOSoXNZaH84hocOJuH+mYdTGYWbi5qxEcEYdyg3oK1RzoYSqUCShuMwvI67EnJQ11zOwxmC9yc1YjtEYDJA6KgFaiDYQ+jqKYBOy/koqa5zcrQqNGnmx+mJUfBVSChk1IKo5mFQkEEE1gBoKS+CdsyclDZ1Aq92Qx3jRqRAb6YkRgND62w82w0W0CINTIjpPLmFmzNzEFZUwt0ZhNc1Wr08vXG7PgYeDsLO89Gy3WGjfOoamvFppwclDQ1od1khqtahTBPL8yNiYGfi4tNBgColcJLgbW6dmwqyEZRcyPaTEa4OKnQw90T8/rEItDFfsfWIXlyOCIO/exqrG7Gnu+OYffXR1BX3mB9QmQIgsL9MWPleEy4exhcPYVvOLbU3NCG/WvPYveaU6gubQClFIQQBHT3xrTlwzFx0WC4e8ljtDbrcXBrKnb+eBaVJVYGCOAX6IGpiwZh8vwBopdw+NTeZsSh3RexfcN5lJfUg+MoCAG8fd0wdU4/TJmdDB8/eTdOvd6Ew4dysHVrKoqLa28wPD1dMHVqAqZNT0RAgLxdA0ajGYdP5mPTzjQUFtXc2E3k4a7F1HFxmDUlEcGBnrIYJrMFR85dxob96ci7WnVj+6m7iwZTRsZg7vgEhAYJJ0rbksXC4mjGFaw7lI6sq5U3qs66atWYMjgK80cnoFc3aVGHGwyWw/Hsq/jxeAbSrpTfYLhoVJjcrzcWDk9AVIi0qEOHWI7DyYJi/HA6A+eulMJ8neGscsLE+EgsHpyA+O6BshgcpThztQRrzmfg5NVrMF1v5KdxUmJCnwjcNSABSSFBsrafU0pxrrQMqzMycORq0S2OyJjwMCxLTMSg7iFdUqPGoZ9ODkfEoZ9NulY9Pn5iNQ6vOwNK6Y3ljxuyRnyhVDlh+gNjcP+ri+Bk57ZRg96Ez17chIMbUsBauE5rUxBCoFAymLhoMB58cQ7Udi7nmEwWfPXObuxZnwKLmbXukLj9VBgCQgjGzkjEn56baXdVWtbC4ZtPD2HbuhSYTJZOm84xDAEFMHJcDB59ZjpcRebfdIjjKFavOon168/BYDCDENyxE4NhCCilGDq0N/76xBS7l3Mopfh+UwrWbDgLnd4EQsgdnwnDEHAcxeDkMDz158nwtdN5o5Ri/d50fLX5NFrbjWAIuWOXhIIhYDmK/rHd8Y8HJyNIQiG4Lccz8fHmU2hq0wsykiK74bl7JqJHoP1LLTvP5+JfW4+jrlUnyIjvEYgXl0xAZLD9Ts/+rMv4586jqG5ug4KQO3atdDCigv3wytwJiA0Rl39zs44WXMWre4+grKlFkBHp54NXpo9Hv+7CO9A606lrJXjh4EEUNzZ1zrj+b2FeXnhlwjgMDXUknf5S5XBEHPpZ1FTTgr9Pfwsl+RWiuvcShiB+WB+8uvGv0IicxNuadXj2rk9QmFUqqn4HYQh6J4Ti9e//BBeRORn6diNeeOg7ZKcWiyrAxTAEPSID8OY3D8BDZATGZLTg5b+tRerZK6K2aDIMQXB3b7z96T2ioyMWC4vXXt2GEyfyRdkzDIG/vzvee/8uBIqMXLAsh3/+Zy/2HRHXAVXBEHh5uuCD1xYhNERc5IJSine/OYTNBy+KZri5aPDhswsQEeonmvHhphNYtfeCaIZWrcJHj89FXLj4XIbP9p7FJ7vPiLJlCIHaSYmPH5qN5AjxuQzfnUjD27uOiWYoFQw+XD4Tw3v3FM1Yl5qJF3cdAmC7aTNDrAmu/54/DeOjxCfRbsvJxZN79oKC2vyNEFgfPt6dMhmzYsQn0Tr088me+duxa8YhSTLojHhu3nuinRDA2nX20ql8vH7Px6IKj5mMZrx83xcovFQmuogY5SguZ5bilQe+hNlkuxsva2Hx+uM/ICdNnBMCWKMO1wpr8MIfvoXRYLvpGsdRvPXCZqSevSq6TgTHUVSUNeAfj6zhbcJ3syileP+9PTh5UpwT0sGoqWnBU39bi5YWvajXfPT1EdFOCGAtbd7Q1I7Hn1+PhsZ2Ua/5fMMp0U5IB6Ol3YBH39iAqrrOOyffru/2nhfthHQwdAYT/vyvzSipbhT1mnUnLop2QgDrsofRbMHD/92KyxV1ol6zLTVHtBPSwTBbWDyyajsulVWJes3+3Mt4cdchUNh2QjoYLMfhsY27cOFamSjGsaIiPLlnLzhq2wnB9ffBUYon9+zF8aJiUQyHfrlyOCIOSdLWT/aj8OI10U5IhziOImXvRRzdYPsGvfeHM8hOuWI/g+WQeeYy9q21zTi8MwOpJwrsrpbKsRwuXyrH9jWnbdqePpqHk4dz7S53zrEUJUW12LDqlE3b1NRi7NuXZXdBLI6jqKxswupVJ23aZudVYNOONPsA1xkNjW34fPVxm7ZXSuvw7dZzkhgtbQb8Z81Rm7bltc34eJPt872DQSn0RhPe/v6wTdu6lna8temIJIbRbMEraw/atG3RG/DSFtt2t4vCmrPyj/X7bH4n9WYzntm+XxKDoxR/27rXZuExM8viyd17JbUDoJTiid17YGZZu1/r0C9HDkfEIbvFshy2f3bwznwQkSIMwbZPhW+glFJs+1r8k94dDADbvz5u8+a2ffVpyU3/KKXY8f0Zm9GdbetTJPen4TiKnZtTYTYL32i3bU2V3GeH4yj27LkIvV64Ff2W3enXt93aL5ajOHAsF61tBkG7zQcyJDdmYzmKYxcKUdfYJmi36dhFyZ85y1GczbmGspomQbvNZy5JqpIKWCfwzOJKFJTXCtptS82B2SJtAuYoxZWaBmSUVAra7bqUjzajSVJVWY5SVDS34tSVa4J2BwqvoEGvl8SgABr0ehwsvCLh1Q79UuRwRByyW+f3XUR9ZZPk11OOIj/1Kgov8t+gss4UoqKoVvLNnFKgtLAa2SlXeW0KLpWhMKdCskMFALVVzUg9WcB7vKS4DpmpxbL607Q06XD6aB7v8ZqaFpw5c1lWnx293owjR3J5jze16HDoRB5YTjrDYmGx59Al3uPtOiN2Hc+W15iNAtuPZPEeNpot2HwsS9bnwTAEm45l8h63sBzWHs+QVUpdwRCsP8nPoJRizekMyeN3MH48w78ERinFqpR0WQ3mFIRgzfkMQZtVaemS+98A1pyU79LTJb/eof9/ORwRh+zW8S0pknrG3CyFksGJLed5j5/YlS6rI/ANxk7+G9TJfZdkdesFrN10T+7jn1xPHs6R3aaeYQiOH+TPy7DmhchjEAIcOZzDe/x0yhVZjg5gdQ4PneB3qM5lXYNRRF6PkDhKsf80PyMtvwxtets5N4IMjmLfOX7GpWtVqG/VyWKwHMWeNH5GQVUdyhqaJUURbmbsy+JflixrakF+dZ08BqU4VlgEg7nzz7VBp8f58nJZThtHKc6XlaNBJy7PyaFfnhyOiEN2q7Gmxe68jTtF0CyQWNhU1waOlbehi3IUzQ38YfomgWNixbKc4DjNDe2yHRGOo2ioEziPRh0UEhr93SxKgYYG/mTSxmad5CWTmyWUsNrYopPd3r1jHF6+TAehQ01t/JNeQ1vXMNr0Jt4IVFcxLCyHdlPnS3IN7V3DoBRo0nd+vep1XcMAgAZ9143l0M8rhyPikN1ibeQriBOFRWB9m69eiF0ESmEReK+chQOV9bxnlcUkcB4s1wUECF8r2U7hdYZAwh/LcrYb3vxCGELXo6uuldASVVcxhMayyFm+un0sXkbXnYeZh8HSrmN05ft16OeVwxFxyG65e7tKTva7IULgJlBp1dVTK3vZhFEwgs3yXD20sqszEobATaAgmKu7VtyeRxvyEGK4amTlPHTIXaDuiquLGlwX3OjdBXr6uDqrZTufHePwyU3gmD1y0f70DCelgresvocA31658ZSjt1VC3h7xvV93ddcxunIsh35eORwRh+xW/PA+sicM1swiflgffsbgCNlPlqyFQ9ygXvyM/mFgLTLzHjiK+P49+Rn9esg+D0II+ibzMxISQ2U7IgxD0C+Jv0tsUnyo5MThDikYguSEnrzHE6PkNyRTMAQD4virbcb3Cpa9VKZgCAZEdec9HtM9AE42+ruIYfQX6AwcGegLF4GeOWLEEIK+3QN5e+r08Pa02WvGlgiACD9vuKk7d0QC3VzRzV1+H5hu7u4IdJPXesGh/z85HBGH7Nb4JcOgknkT9A70wKApibzHR05PgrOd5c1vl6uHFiOmJ/EeHzw2Gp4+8nrTqDVOGDurH+/xpAHhCAz2lJVLqlAQTJrJfx7R0cEIC/OTtapBKTB9Bj8jLNQX8dHdZE3iLEcxW+AzD/b3wOCEnrIZ8yfyn4evhwvG9ouUle/CchQLxybyHnd31mD6gGjZjCUj+c9Dq3LCvAHxshgcpVg6jJ/hpFBgSf++sna0UADLBybxRh4ZQrAsKUlWbhABsDwpUdb7dOj/Vw5HxCG75eLhjPF3DZe8dEIYgpl/mACFwFOjWqvClLuGSt6dwygIpi4dBpWG32FSKBWYvmSI5ImPUTCYODcZzgLl6hmGYNaigZJvtIyCYNTEOMGlGUII5sztLzliwTAEQ4dGwM9Gr5a50/tJjrwwDEFSfKjNMu8LJiZJZhACRIUFoE+YcB+VRWMTJW8RJgBCA7zQr7dw9GbR8L6ytiH7e7hieExPYcYgeQwPrQYT4oRLsC/sFy95fADQOjlhenyUoM38uFjJ9WkAQMkwmBcXK/n1Dv3/y+GIOCRJi/46DRpXjd2TuELBwCfIE9PuH2PTds7KMXD10IKxc0cIo2Dg7uWCWfeNsmk7/a7B8PJzs9vhYRgCZxcV5t030qbtpJn9EBjsZTeDEAKVSokl946waTt+fCx69PC1e/cMIdZtzsvvsc0YNSQSfSIC7H4K7+gLsnLZcJu2gxPCkBgVIsk5JCD40xLb55EY2Q3D4ntKeoKmAB5bMNJmblF09wBM6tdbcg7SX2ePsDk59/TzwvwBcZIjYY9PGQ6VUrgBZaC7G1YM4o/42dJjo4fARSXcgNJLq8XDgwdJZvxp8CB4aeUtITn0/yuHI+KQJAX29MOrGx+HUqUUPcEyCgbO7lq8uf0puHvbXs/1CfTA69//CSqNyi6GRqvCa9//Cd4i2tx7eLng9S/vg7OLPQwCJ5USr/x3BQK62e7G6uKqxpsfLYO7u/gEXIYhUCoZvPzeEnTvabsbq1rthLfeXgRvb1fRzgjDEDAMgxdfnIOICNvdWJVKBd5+YR4C/T1EOwrE6oXg+SemIS6KP+fh5vf09hOz0DPY2z4GgGdWTsSAOP48l//ZE7z5h+noE+pvtzPy5JIxGJXIn3d0s169exKSwoPtZjw6fRimJAtHETr03OyxGBbZw+6I28rRA7BgoLhox5Pjh2NidITdjKUDErFisDgn5pEhgzE3NsZOAjAvNgaPDBls9+sc+mXJ4Yg4JFmxQ3rj/YPPwcPXmmzGN3F0TPBBPf3w4fGXENpHfHvwiPju+GD7X+ET4H59LGGGb5An/rXjr+gVKz7xsUdEAP69/mEEXncqeBnXz8/T2xXv/fAQogWSO29XUIgXPly1EiE9fAQZHU/Qbu5avPv5CiQOCBPN8PNzxyefrkB4uP8t7/dOhvW/nZ1VeOfdJRgyJFI0w8vTBf99dylir3+GfNGRDoZG7YS3XpiLscPFTawA4OaiwWcvLUFyTHebDEIAlZMSb/5lJqaPjhPNcNao8PlTCzG8b5ggo4PjpFTglfsnY/E4/pyK26V2UuK/f5qLiUm9bTIYQqBkGDy/aBzunzhQNMNJocBH98zCnP6xohgMIXhq2ij8ZbLt6FSHFAyDD+ZNw9KBidb/L+BYMYSAAHhs9FA8N3m06IgQIQRvTZ6EhwYOALHBUFxnPDRwAP45eZLsnW8O/f+L0K7YL/cTyZ42wg79/8lkNOPktgvY9ukB5J2/s+dD0phYzHpoPAZOTpScV2IxszizLxPbvz6OSyl3MuIHR2DmvSMxeGI8lE7SdiywFhYpx/Kxbc1pXDx7JyMqMRSzlg7B0AlxUKmEQ9q8DJZD6tkr2LY+BRfOFN6xtTcyOgizFg3CyHExUAvktwiJ4yjS0oqxbWsqzpy5fEfuSFi4H+bOHYAxY6Kh1QqHzflEKcXF7DJs3pWG42cu35HXERrijfnT+2Hi6Fg4O0tnZF2uwKb9GTh0ruCO3UfdAjywcFI/TBkRAzcX6YnN2UVV2HAkA/vO5d1R7yLIxx2LxiZi+rBYeLpKD//nldVg/clM7EjJgem2mjD+Hq5YPDIBswfHwceNPxfIlgqr67D2bCa2XMi+o5Kpt4sWS4YkYv7AOPi7S99dUlTfiLUXMrEhPQvtpls7T3tqNVjSPwGL+sUjyEP6TpiSpib8eDETP2ZmodV4axVcN7UaS/rGY0lCX4R6ekpmOPTTy5752+GIONSlKi2oRNW1WuhbDXDx0KJbRCACe/jx2rc365B9Oh8t9W0AIfDwdUPc8D7QCkwsFUW1qLxWB12bAc6uGgT19EVwT36Gvt2A7HNX0HK9AqqblwtiBvaCixv/xFJV1oDy4jro2ozQuqgQGOKNkDB+hkFvQvb5IjQ3toNyFG6ezoju10OwjklNVTNKi+ugazdCo3GCf5AneoTzM0xGM7LSS9Dc2A7WwsHNQ4vo+BB4ePHv/KmtbcW1a3VobzdCo1bCz9/9+g6bzp8izWYWlzJL0djYDouFhZubBlHRwfASWEqrb2xD0bU6tLUboVYp4evjiogwf16GheWQlV2GhoZ2mM0s3NzU6BMZCF8f/smrsUWHwpJatLYboXJSwNfTFX0EGCzLIaugAnUNbTCZWbi5qBHZ0x+BAgm5zW165JfWolVngJNCAR8PF0T3COCNLHEcxaUrlahubIXRaIGrsxqRoX7o5se/JNiqNyKntBqtOiOUCgbers6I7RHAmw9CKcWl4ipUNrTAYLLAVatGr2Af9PDnXxJsN5qQXVaNZr0BCoaBl4sWcSEBcFJ07qBTSpFTVo2yhhboTWa4alQI9/dGeIAPL0NvNiOrvBrNBgMIIfDUatC3WyBUAoy8qlqUNDRBZzLDRaVCT18v9A7gX3Y0Wiy4WFmFJoO1UaKnRoOEoECobeS1OPTLkMMRcegXrysXr2Hn5wdxYPVxmAy3PllpXNSYfO9oTH9wPEJF5BXw6Vp+JXZ9exz7fzwN422dZVUaJ4xfNBjTV4xCWIx0RllRLXb/cAb7NqRA13br05uTSoExs/phxt1DEREnvUZGVXkjdm28gN2bLqCt9dbutUolg1GT4jFj4QBExYdIDlPXVLdg14407NiahpaWW8txKxQEI0ZFYdac/ojr210yo76hDbv2ZmLLjjQ0Nt1ajpsQguFDIjBnRj8kJYRKZjQ267DzcBY27c1A7W2l9wkBhiSFY97kRAyUsU24uU2PnSeyse5gBqrq72xTMCi2BxaOT8TQhDDJu0Fa9UbsPJeDH49moLS26Y7j/SNDsHh0IkbF9+KtA2JL7QYTdqXl4vuTGbha03DH8cQeQbhreBLGx0dIrouiN5mxKysfa86mI7+67o7jccEBWDo4EZNje0PNU7zNoV+nfjGOyK5du/DKK68gMzMTGo0Go0aNwtatW0W/3uGI/PZkMVvw4aPfYM9XR6BQMrwFxRgFA47lMP/xaXjgzSVg7LihsyyHL1/ahK2fH74xTmdSKBiwLIepy0fgT28uEtxOfLsopVj1r31Y+8khUYyxs/vhL28sgJMdSzqUUqz7+gS++fgQGEJ4t7V2MIaNjcbfX59n95LOlo3n8elHBwDAJmPAwHA8/8pcONtZPXTXvky8/+E+UCrEIGBZioS4ELz2wly42VlH5sDJXLz+8T5YWP72AAxjvY7REYF45+k58BKIWHWmY2mFeO7T3TBZLLzbpTsYESG++PcTc+HnZd9SyJnca3jyix3QG60OemeYDkYPfy988uc5CPaxnZh9sy5cKcOj32xHq8EIwscgBBylCPZyx39XzkGYv/DW69uVVV6FP6zeikadHoSg0+vVwfBzc8EXy+agTyB/RNChX5d+EY7Ipk2bsHLlSrzxxhsYO3YsLBYLLl26hIULF4oew+GI/LbEWli8vOBfOLc73a7KrOPuGo6/ff2QKGeE4zi88/C3OLrlvOjS6oQAgycn4NmvHhSVw0IpxX+e24i961LEAWB94k8cGoFXvrxfdA7L5+/vw6bVp0UzGIYgqm8I/vnpPaKdkVXfHMeqb07YxQjr5Y8PPlwOrcjcj3WbUvDpl0ftYoR088LH7y0V7YxsPXAR73x+kHdS7YwR4OuOz19fAm+BVgM3a8/pHLz4+V7RDAVD4O3ujK9fuAsB3uJyJg5nFOJvX+4EQCGmRIiCIXBz1mDVk4vR3c9TFON0/jU8/NVWcJSK6nqrYAi0Kies/vMiRATa3sEFABeKy3D/qs2wsJw4BiFQKZVYff8CxAbb3sHl0C9f9szfP8muGYvFgsceewzvvPMOHnroIfTu3RsxMTF2OSEO/fb02VPf4+zuNLvLwx/64STWvLZZlO2at3fi6GbxTghgfVI7s/civnpZHGPjF0ftckKsDIqM04X46EVxjB3rU+xyQgBrpCE3swzvvrhFlP2BfVl2OSEdjKIrNXjlxc2iPscTpwrsckI6GGXljXj2lc2iipudu1iMd784CED8x85xFDV1LXjyzc2CDQU7lFFQjpe/3GcXg+UoGlp0eOTdTTDcltjZmXJLqvH017tAqTgnpIPRqjPgjx9uQpveaNP+SlU9Hvt2O1gqzkHoYOhNZjz4+WY0tfN3He5QWWMz/vj9NtFOCACwlMJoseCBVZtR0yK/K7ZDvy79JI5IWloaysvLwTAMkpKSEBQUhClTpuDSpUuCrzMajWhpabnlz6HfhurKG7Dtk32SG8Cte2c7WhuFb1AtDW3Y8NF+aQAKbPvyMBqqmwXN9O1GfP/hAWkISrFvQwoqrt25Vn6zTCYLvv34kDQGR3F8fzauFlQJ2rEshy8/OyyJwXEU589dQc6lcuH3Qik++/qYZEbmpTJcSC+2afvZ9/Y5Ux1iOYr8qzU40clOrzsYm09J+u6yHEVxRQMOniuwzdh9Fhy1vx80y1FU1Ldgx7kcm7ZfHj4PM8vaXYWX5SjqW3XYcDbLpu3Xp1KhN5tFOyEd4ihFi96INecy7HtzDv3q9ZM4IlevXgUAvPTSS3juueewc+dOeHl5YfTo0WhouDMpqkNvvvkmPDw8bvx1787fWMqhX5d2f3lYVj8Ji4nFgdXCE87+tWfkNbGjwN7vTwmaHNmeBqPe9tMtnxiGwZ61ZwVtTh7KQVuLQdBGkKFgsHPDeUGblLOFqK+T/uSpUDDYtuWCoE1GVinKKhqlMxiCLdvTBG1yr1Qhv6hGVnn7jXvSBW2KKxqQmldm98TaIUKAtQeEz6OqsRXHL12VVbL9x6MZglGqxnY99mbkS2ZwlOLHkxlgBbowtxtN2JKWLYux7nwmTBaLbWOHfjOyyxF5+umnQQgR/MvLy7vRLvzZZ5/FvHnzkJycjG+++QaEEGzYsIF3/GeeeQbNzc03/kpLS+WdnUO/CFnMFuz47ICsDrEUFFs/2st7o+U4Dtu/PCKrKzDHUez4+ihYgVD9tu9OyWoux7Ecdq89B5OR35nZ9uM5WY3fOJbDgR0ZaG/jd2a2br4gr7kcy+HYkVw0NbXz2mzZkWZ3yflbGBzF2fNXUF3DHxndsi9DXuM3jiIjtwzFZfW8NpuPXJTFoBQoKKlFzlX+KNWmk1kgMlx1CqC0tgnnC/jvmVtSLoGVmRJY29qO47lFvMe3X8yFUaYT0WIwYl/2ZVljOPTrkl2OyBNPPIHc3FzBv/DwcAQFBQEAYmL+V7JXrVYjPDwcJSUlvOOr1Wq4u7vf8ufQr19VRbVormuVNwgFqor5x2mqbUVtufSn75vHqSnrPGqnbzeipLBa8tN3h3StBpRdre30GMdxyL9ULstpAwCT0YKiy9W8xy9llslmsCyHgrxK3uOZWaVgWXkMSoGcvAre4+nZZbKiCB3KyudnpObJZzCE4OJlfkZaofSIS4cUDEHGVYFrVVQhy1EHrA3m0ov4l+TSSypkd8FVMgxSS/jPw6HfnuzauO3n5wc/P9vbq5KTk6FWq5Gfn4/hw62lhM1mM4qLi9Gjh/iy2A79NtTWzP/UbPdYje3w7KQoVVuzrhNriQyesdpabCfqiWd0Ppa+3SR7srjB4FneYVkORoGIjD1qbeWPuuh0Jt5jdjEEIjttOtsJmrbEMASt7fyMVp30ZbJbGALjNAvwxYoQglaB69HUBecBAC0CSbHNeoPsqAtHKVoN8j9Xh349+kkqyLi7u+Ohhx7Ciy++iO7du6NHjx545513AAALFiz4KZAO/YLl1IWFipQ8dTiklnXvfKyfgaHqfCyFsuvStvjer7XZHX9dEnvkJHBNFAoGMNvekWKTIVDfRWrLgJtFKRVkKHmqhdrHED4PqQXD7BnHqQuulXUc4fMQu72ZT4R03Xt16Nehn6yU3TvvvAOlUolly5ZBr9dj0KBBOHz4MLy8bHcrdei3JU9/+4ot8YrgRoO92+Xh4wbZd8Dr8uRhuHk4g1EQcDKXGwDA06fzIldqjRPUGicYDfIjFl4+ndfHIITAzV2L5ib5USQvgfLynp7O0FcJ70ISxfDkLzrm4+mCRpnRMEohWNjM19MF5TVNsr5aLMfBy52f4efhgoIyImt5huMovAV61fi5u0JBiKyIBQWFt6vAtXJxBsMwggmttkXg4yK9545Dvz79ZG6nk5MT3n33XVRXV6OlpQUHDhxAbGzsT4Vz6Bcsn2AvRA+OvNEhV4oYJYOBkxOhde28wJWLuxbJo2LkMRiCuMER8PLvPDdJ6aTAsInxshiEIQiPDkJQaOd9PAghGD05Tt6TPrF2+w3vHchrMm5CHG8HYLHy9nZBjECX4wljYmQlxAKAi4saSYn8y7kTR0TLSh4GALVKiSFJ/F2OJw2Oku3fKhQMRiX14j0+ObmP7BwRCopxiRH8jMTespdNWI5icmJv3uNT4vrIdEKsTtvU+D6yxnDo1yVH/Muhn0WzH57EWwZdjDgLh1l/miRoM+P+0fIYHMXM+0cL2kxfOlQWg3IUM5cPF+ylMmPhwDs6zdojAmDWkkHCjFn9ZEV2CEMwc05/waWk6VMSZSX2MgzBzCkJUAuUxZ82JlZyPxfAmuA5dXQsXARK1k8eEg2NxG7LHYyJA/sIRkTGJ0XCTWtf2fzbGSPjwhHkzZ/gPzI6DH7u4qrIdiaGEAzoFSJY6n1gWAhCvT0l7/9hCEFMkL+juurvTA5HxKGfRcPnDIS7jyuIhCdkhiHwD/VF8oR4Qbv+42LhG+wpiUEYAg8fVwyZkihoFz8wHCHhfpKe9AkBnF3VGDVdmBEZHYzescHSognEmpMzYYYwo3uoD5L6SW/8xhCCqTbOw9/PDcMGR0je+kopxYypwgxPd2eMHx4lmcFyFHMmJgjauGhVmDEiTvK1YjmK+eOEGSonJRaM6Ct5xwnLUSwelShoo2AY3DUsUTKDoxR3DRdmEEKwfEiSpPE7GMsGCzMc+u3J4Yg49LPISaXE06v+bPfrCLEW6Hpm1cM2e80oFAz+/t/7rcsa9txriZXz9//a7gNDCMFT798FhZNCQodYgr+9uwQare0eLU+8PAdqjZP9DAr89aXZcHXT2jR9/G9T4eKiljTBPvrXyfDmyXO5xe6P4+HurpXE+OMDYxAc5GnT7s/LRsHX21US4/6FQ9Crh+2dgH+YOxTd/DwkOTx3T05GfESwTbv7Jg1EryAfuxkEwLxh8RgUFWrTdunIfojtHmA/gwBTEvtgXBz/0k+HFiTHY2BYd7sdHoYQjOkTjhkJ0Xa9zqFfvxyOiEM/m/pP6ItnVv0ZjJIRFbVgFAyUKiVeWP84YoeKWzOOGxSBZ79cCaVSIWpiIgyBQsHg6f/ej6SRUaIYkXEhePnze6FSK8UxCAFhCB5/ayEGjxeXJ9Wzlz9e+2gpNFonUbkcHff8Pz8zDWOmCEeOOhTczQtvvX+X1RmxI19k5UNjMW2GuKdefz83vPfmIni4a+2a/JYvGYIFc/qLsvXycMa/X1gAXy8XuxiLpiXj3vlDRNm6u2jw0d/mI8jX3S7GrFFxeGThSFG2LhoVPv7zHPTw97LLqZrUvw+eXjRWlNOqcVLik/tno3eQn12OwujocLy2eKIohkqpwEdLZiAhJFA0gwAYFNYd7y2YKmupzaFfp36y7rtdIUf33d+mMo/n4uPHv0NRVgkUSuaOsuwd/9ZnQC/8+d8r0Kc/f5Ifn/JSi/DJM2tx+WLJjRb2tzCu/1t4bAj++MZCxA2OtJtxJaccH7+0Bblp1wQZoREB+MNzM9FvOH+SH59Krtbiwzd3IvNCMRgFc0d+SgejW6g3HvzrZAweZX+SX0V5I/79/h6kni/qdFtvByMg0AMP/nEcRo2x/4m1prYF//roAM6kXOmU0fFvfr5uWLliJCaOsz+xvaGpHe9+eQgnUgoBAl6Gt6czHlg4DLMm9LWb0dymx7trjuBgSj4oxR0Jph1t7T1dtbhv5iAsmpBkd1SrVW/EuxuPYldKnrX3DA/DTavGion9sWL8ALujQTqjGe/uOIat53Ng4ViA3rrhjBDrbiIXtQrLR/bDHyYMsttBMJoteO/ASay/kAnT9WrFtzCu/3+tkxPuHpSAR8cNFdwa7NCvS/bM3w5HxKH/F1FKkXf+CnZ8uh/ndqejvUUPQgAXDxcMm90fMx6cgIiknrI5hZkl2PHNMZzZcxG6Fj0oABc3DQZN7Ivp941Cny5gFOVXYtf3Z3BybybaWvSgHIWzqwbJI/tgxrKhiOnXU8Iyzq0qKarFrg3ncXTfJbReZ2idVUgaFI6Ziwehb7J8RnlZA3ZuT8eh/ZfQ0qIHy3Jwdlahb0IoZs3tj379w2TvgqmsbsbOPRex7+AlNDXrwbIstBoV4mK6Yc7MfhiYHCa7NkhtfSu2HczE7qPZaGjSwWJhodWqEN0rAPOnJGFoci8oZTLqmtqx/fglbD+ehbrmdpjNLLQaJ/QO9ceC8YkY0y8CSpm1QRpaddh+NhubT11CTVMrTGYWGrUTIoN9sWhkAsYnRUIls0ZPs86AbRdysPFMJiqaWmEyW6BRKRHm743FQxMxJakPNDIZbQYjtl3MxdrzmShrbIbRbIHGSYkePl5YMrAvpsVHwUVte7nSoV+XHI6IQ786dXwNbU2mlVersffrwygvrIS+1QCtmwbdIoIw+f6xCAoTzrQXy6gprce+NSdRdrkKujY9NC4aBPX0w8S7hyEkgn9LrD2Muqom7F93DiWXq9DeaoBGq0JAd29MmD8QPfoECb62g2OL0VjXhv1bU1FUUIX2NgPUGhX8Aj0wYVYSwruI0dykw/5dF1GYX4m2NiPUGiV8fN0wfnJf9ImxnRchhtHaasC+g5eQl1eJtnYDVColfLxdMW5MNGJjutl8vRhGu86I/cdykJVXgbZ2I5yUCnh5OmPc8CgkxoZ0CUNvMGPfmVxk5JWjtd0ApYKBl7szxg7qjf4xoTadPDEMg8mCA6n5OJ9XiuZ2AxQMAy83LcYkRmBorO3kZDEMk8WCAxcLcSb/Gpp1BjAE8HTRYnRsL4yICbPp5IlhOPTrl8MRceg3p7SDmdjw7nZcOHARDMOAUgrKURDG2myR4zj0n5SIhU/ORNJYcTkStyvrVAE2frgXKfuzQBhyJ4PlkDgyCvP+PAkDbOzg4VNuWjE2fXYYp/dl3cin5TjrjZlhCFiWQ9zAcMxZORpDJ9m/dAAAl3PKsfGbEzhxINt60++E0Sc+BHOXD8PISfGSJoWiKzXYsOY0jh7MBstSENLBsHYYZlkOEb0DMWfRQIyb3FdSJKWktB7rNqTgwKFsWCzs9c/5VkZYT1/Mm9MfkyfGS4qkVFQ14cdt57HncDZMJgsIcycjtJsX5k9Pxozx8ZKiHNX1rfhh9wVsP3oJeqMZCobc6F1zY2nN3wMLJyZhzri+kqIctc1tWHMgDVtOZKHNYLrBILAuSbEcRYCXKxaPScLCMQnQqpzsZjS06bD6WBo2nMpEi95463lc/9++7i5YPDwBd49IgovGEeX4PcvhiDj0mxGlFD+8sRnfPr+20xyJm9Vx/P437sKiv8+2a4Ld8ukBfPbsOjCMOMaSJ6dj+T9m2cXY88MZfPiP9WAIEawT0pHLMPv+UVj5/Cybu4Vu1pFdF/HOsxtBAFGMyfP645HnZkJhxwR76lgeXn9+MyhHBRmEWJ250RNi8eRzM6GyoxZHyoWreOHlLbBYWMHGeR25DMOGROC5Z2ZCoxE/wWZkl+Lvr2+B0WgWbGrX8REn9+2B1/8+C84idj11KPtKJR5/ezPa9SZhxvX/6BsZjHeemA13l84L93WmgtJaPPyfzWhq09tszkcIENXdHx8+MgfeAnVNbldRdQMe/O8m1Da32yy8xhCCMH9v/PehuQjwtL2zyqHfpuyZvx3pyQ79ovXjm1vw7fNrAcBmIbGO41/94wese3ubaMa2zw/hs3+sA6h4xo/v7sR3r28Vzdi//hz+8/Q6m5M38L8ky61fH8PnL4tnHN+XhbeeXg+O5UQz9m2+gH+/vFV0o72U05fxyj82wmJmbTI6xjx2MAdvv7xVdG+bjIsl+MfzG2EyWWx27+1422fOXcGLr24VXQguu6ASf315IwwGYSekg0EpkJZVgqde2wSTWVyb+8sltXj49Q1os+GEANakTUqBrMJKPPrPTTCIbEp4rboRD7y3XpQTguuMgrJaPPj+BrQJNK+7WRUNLVjx4XrUtdh2QgBrAm9xbQPu/Wg9mtq7rlGkQ79dORwRh36xSj+chW+e+1HSa7965ntcPJZt0y7vwlV8+rQ0xtr3duHcvkybdkV5Ffj3U2vtB1Bg2zfHcXR7mk3TitJ6vP30evvqp8A6Me3fmoa9my7YtK2vbcUr/9gIe8ulUkpx/PD/tXfnUVFc+R7Av1UNNCCIbK2yikgkxgVExQhGFEQNahBFkxjjdsxJBg2OyVOik3HmRLP4NOaFmadOMqMmmtEo7mvcRY0DihBRgRglyiaohG5p1qr7/iAwcZLqvV9B+/ucw/HY93b/ftfGrl/fqrr3BnZt/5fevmp1HZb9MQOiaFwYUWTIvnQLW766oLdvXX0jFq/IgCCIRi2rLooM390ow2dbz+nt29jUjN//9y40NgtGbS4oigxFxZVYu+W03r6CKGJB+m7U6ZnR+fXzGIrvVWPFluN6+zLG8Obf90JdV290jPJqNZZuPWLwc8iTiwoR0m7t/Hi/yfu6KOx47Px4v95+u9cdN3ndAl7BY2e6/g/a/ZsyTb44j+M5ZKw/qbffwe1ZLQdVU060csCOjZl6Z0UO7ctBc5Ng8rLtGV99q3fG4sg3V1FX32jwDM0vMQZk7L6MxkbdMxbHzt6AWlNv0u7DjDHsOZILbV2jzn6ns2/ifnWtSTFExnDw7DXUaHTPJpzPL0ZJVY1RBUJbDJHh+OXvca9ao7Pf5R9KUVR236QYgshw7kYxiiurjX4uebJQIULapYriSmQdvmLyvi5Cs4h/HchB5Z0qyT7VlTXI3HvJ5H1dREHE1fNFuFNYJtmnVl2H4xnZJsdgIsPN/BIU5d2R7NNQ34TDO7NN3zuGAWV3HuC7S7cluzQ3C9ifccmkA2urB/cfIevCTcl2UWTYteeyWfvTaB7V4+y5Isl2xhh2HMgxa6O8+oZmHDt7Q2efr7+5YtatzoIo4sBZ3TN620/lmry0fatdmVd1tv/znHkxFDyHHRf0zxqSJxsVIqRdOrrxlFEXaf4WjudwdONpyfYT2y8afZrhP/EKHke3SE/Vn9mXg2Y939D1USh4HN12UbL9wonr0NYadr5fV4zDO7Ml27O//QE/VWvNisHzHA7uuSzZnvfdHdyrVJsdY9+BK5LthT/cQ/HdB2a97RwH7DmSK9n+Y9lD5N8sN6toYwzIOJ4n2X6vWoOL1380aaailcgYdp6RLhJqautx4rubZsUQRIaMi1fRbMYmjsT2USFC2qWyHyrMfg0OQPnte9IxblWCM7PYEUUR5cXSsy5lP9436o6U3yIIIkpv64hx54HOXXANjVFSfF86RslDsxczE0WGkjsPpWOU/2TW67fGKC2VPhVQWmF+DMZ0v05ppfkxAKDivlryFFVJVY1JZ+H+U/WjOtRLFMpl1WqjrqGRom1oQo223uzXIbaLChHSLtU9qjf5tEwrQRSh1XGeva623qRrER7DWk6/SMdoMD8GgFpd46hrBGfyxuv/pn0kPatSV9dokUWo6rS6Y5hb7AAtC4eZ0maMhgbpWS6thWKIjKFB4g6dOgPvqjGEtv63r3fRNui+DsaoGBZ8LWJ7qBAh7ZKzq5PJF6q2Uih4OOvYhdbZxdGgzfd04TigU2cdMTo5WuQA3knHOJycHMAs8P3Y2UWpO4YFCirnTrpjmHM649+vI72WiJMR64zoomu9EmcLxeB5DkqJxc0sFQMAOkmsi2LJBclocTOiCxUipF3ye0r/8uD6MAb4hkgvZe4X0s3sWReO5+EXLL20vF+wCs1NglkxFAoeASE6YgR5/WrjQFNiBAarJNv9Az3NLhJ4BYeAHl7SMfw8zHp9oOXgHeDvKdke4Gt+DI4D/H3cJdv9u0m3GRwDgK/KTbKI9Vd1MeuC21Zebp0kix0f986ws8BOuK6OSrg5G75AG3nyUCFC2qUxs0ea/Q2cMYYxs0dKto+aOtTsDdZEQcTYV4dLtg8fHwaljm/ohhAEEWNfkt6u/tlRfeDiat4HvSCIeD55iGR7RGQwPL3MWyVTFBjGT4qQbO/X1w8+3c07wIoiwwsTwiXbQ4JUCAlSmTVLxRgwaWyYZLt/N3eEh/qZfZppSpx0DG83F0T3DTLrjhae4zB1xADJ9s7OjogPe8rsGFOG9TP5FnnyZKDfDtIueft5Yuj4CPAmXoSpsOMRlTgEXj7S34DdPF3x3KTBJl/oySt4hMf0gU9P6dkKZxdHjE6ONPk0E89z6B0WiJ59fCX7ODjY4fnkISYf+DgO8O/pjT5hAZJ9FAoeEyYPMutUlqprZ0REBuvIg0NSonShYgi3zk6IHhais8/khHCzilxnR3vEDg/V2WfK6DCzZpDs7BR4fngfnX2mxoSZdUcLOCAxuq/OLtOi+psVgzGG5GdN25eJPDmoECHtVvJbE01fR0QQMWXRBL39Jv1utMkHDFEQMWXBGL39Js4a3vIt34RjuCgyJL8xSm+/hGlDoLBTmDSbwBgwdc5zemcJxk0Mh1JpZ/JsQvL0YXqLpTGj+6JTJ0eTi6rkyYP1bkwXFx0Kjy7OJsXgOCApYSAclbpnuUZEBKObV2eTZhM4Dkgc2Q+uevabebZPIIK6eZgUg+c4PB8ZCi+3Tjr7hQX5oG9AV5NjjOwXDD+vLkY/lzxZqBAh7Va/4U/jjTWzTHpuyidz8Myw3nr7hQwIROonr5oUY9YfJiFi1DN6+/n36oq3P3nFpFVPp/4uFlHjpKfPW3X1cceyNS8C4IwuFCa8GIm4idKnM1q5e7jgz6umgec5owoejgPixvXHxCmD9PZ1cXHEhyumgOd5owoFjuMwPCoEL00bqrevUmmP1X+cAns7hVExeI7DoAGBmPviML197ewU+GRxEhyV9sbF4Dn0C/HBgpefM6hv+oJEuDgpjSoUeJ7DU37eeOelWL19OY7DJ3MmwsPF2agYCp5DoLc73ntJf6FOCBUipF1LWpiAN9bOAjjoPYWisOMBDkj5nzlIXDDO4BhjZwxH6ievguM5vdeMtJ4qmrN8MqYtet7gGDETB2LxpzPAK3i9p2la219KjcesJeMNjjE05mm8u/ZlKOx4/eNQtBxUJs2Iwutp4w0uXsIHBeG91S/CwcFOf4yfD1xjJ4TjraUTDI7xTB9frP5wGhyV9noPfq0xRo4IxR/emWjwQT8kSIVP35uGTs5Kvc9pzTtqcDDeT0vUO+PSqoePB9a/Ow1dXJ0MiNHy56A+AVj7X0lwkLiA9D/5eLnhH4unwcvNBbyef9/WSbn+Pbtj/e8nw0nPrE4rlZsLNr85DT7unQ2O8ZSPN/4xPxmuTtJ3SRHSimOWuCfPSozZRpjYtmsXCpGx9gDO78kC0HJwEEURPM+DiSLAcRieFImkhQno86z+mZDfUnSlGLv+9xgy92RDFBl4nmv7k7GW5daHjhuASb8bjf5RpsW4db0Uu/9+Bqf3XIbQLIJXcBCFn2OgZYG0wTFPI3FuDAY+Z1qMH3+oxJ4vz+P4/lw0NzWDV/At4+B+jiGICB8ajMQZwxD5nO5rHaSU3n2A3duzcPRgHhoamqDgW2JwfMuKJoIgol9YACZNG4KoEaEmnc6pqKhBxu5LOHg4D3X1TVC0joMHAA6CIKLP0z6YnBiBkTFPmxSj6oEGOw/mYN/R7/BI29AWg+NafscEQUTv4K6YkjAQo5972qSLmx/U1GLHN7nYdTwP6tp6KBQ8mMiAX8QI9vfC1PhwJAzvY3Ch80vVj+rw9alc7DiTh4eaOtj9PA7g5xiiiB7d3PHiyHAkRj1jcKHzS+q6enx9/jv8MzMXVepa2PF824JnPMehWRTh5+mGl4eHYcqz/eHoYHwMYjuMOX5TIUI6lAfl1Ti2+TRKb1ZAq6mDs6sTfEO6I37mCHhY4LZJAPipSo3j277F3e/LodXUw8nFEd0DvRD30jB4W+D2TwBQV9fiREY27hRVQPuoHkpnB3T19UDclMHoquP2U2M8Utfh5IFc3C6qQO2jBigd7eHdzQ2xE8LgGyh9G60xtLUNOPlNPm4WVqD2UT0clHbw8nZF7Nh+COjhbZEYdXWNOHn6BgqLKqB5VA8Hezt4enZC7Mg+CO4pfcuxMRoam3HqQiHyb5RBo62HvZ0CHl06ITa6N3oHd7NIjKZmAaezv0dOQQk0tQ2wt+PRxdUZsZEheCa4u0XWm2kSBJzNu4WsgrtQa+uh4Dl0cXHCqPBeCO/la5EYgigi8/ptXCj8ETXaevAcBzdnR4zsF4whvfwtEoN0fFSIEEIIIUQ2xhy/6RoRQgghhMiGChFCCCGEyIYKEUIIIYTIhgoRQgghhMiGChFCCCGEyKZd3+jdekOPWq2WORNCCCGEGKr1uG3IjbntuhDRaDQAAH9/f5kzIYQQQoixNBoN3NzcdPZp1+uIiKKIsrIyuLq6WnyRHLVaDX9/f9y9e9cm1yih8XV8tj5GWx8fYPtjpPF1fNYaI2MMGo0GPj4+4HndV4G06xkRnufh5+dn1RidO3e22V8wgMZnC2x9jLY+PsD2x0jj6/isMUZ9MyGt6GJVQgghhMiGChFCCCGEyOaJLUSUSiWWL18OpdI2t6mm8XV8tj5GWx8fYPtjpPF1fO1hjO36YlVCCCGE2LYndkaEEEIIIfKjQoQQQgghsqFChBBCCCGyoUKEEEIIIbKhQgRAUVERXnjhBXh5eaFz586Ijo7GqVOn5E7Log4ePIjIyEg4OTnB3d0diYmJcqdkFQ0NDQgLCwPHccjNzZU7HYsoLi7G3LlzERQUBCcnJwQHB2P58uVobGyUOzWz/PWvf0WPHj3g6OiIyMhIZGVlyZ2SRXzwwQcYPHgwXF1doVKpkJiYiMLCQrnTspoPP/wQHMdh4cKFcqdiUaWlpXjllVfg6ekJJycn9OvXD5cuXZI7LYsQBAHvvvvuY58p7733nkH7wlgDFSIAxo8fj+bmZpw8eRKXL1/GgAEDMH78eFRUVMidmkVkZGRgxowZmD17NvLy8nD+/Hm8/PLLcqdlFYsXL4aPj4/caVhUQUEBRFHEhg0bcO3aNaxduxbr16/H0qVL5U7NZNu3b8eiRYuwfPly5OTkYMCAARgzZgwqKyvlTs1sZ86cQUpKCi5evIhjx46hqakJ8fHxqK2tlTs1i8vOzsaGDRvQv39/uVOxqOrqakRFRcHe3h6HDx/G9evXsWbNGri7u8udmkV89NFHWLduHf7yl7/gxo0b+Oijj7Bq1Sqkp6fLkxB7wlVVVTEA7OzZs22PqdVqBoAdO3ZMxswso6mpifn6+rLPP/9c7lSs7tChQyw0NJRdu3aNAWBXrlyROyWrWbVqFQsKCpI7DZMNGTKEpaSktP1dEATm4+PDPvjgAxmzso7KykoGgJ05c0buVCxKo9GwkJAQduzYMTZixAiWmpoqd0oWs2TJEhYdHS13GlaTkJDA5syZ89hjSUlJbPr06bLk88TPiHh6eqJ379744osvUFtbi+bmZmzYsAEqlQoRERFyp2e2nJwclJaWgud5hIeHo3v37hg3bhzy8/PlTs2i7t27h3nz5uHLL7+Es7Oz3OlYXU1NDTw8POROwySNjY24fPky4uLi2h7jeR5xcXH49ttvZczMOmpqagCgw75fUlJSUpCQkPDY+2gr9u3bh0GDBiE5ORkqlQrh4eH47LPP5E7LYoYNG4YTJ06gqKgIAJCXl4dz585h3LhxsuTTrje9+//AcRyOHz+OxMREuLq6gud5qFQqHDlyxCam4W7dugUA+NOf/oSPP/4YPXr0wJo1axATE4OioiKb+HBkjGHWrFl4/fXXMWjQIBQXF8udklXdvHkT6enpWL16tdypmOT+/fsQBAFdu3Z97PGuXbuioKBApqysQxRFLFy4EFFRUejbt6/c6VjMtm3bkJOTg+zsbLlTsYpbt25h3bp1WLRoEZYuXYrs7Gy8+eabcHBwwMyZM+VOz2xpaWlQq9UIDQ2FQqGAIAhYuXIlpk+fLks+NjsjkpaWBo7jdP4UFBSAMYaUlBSoVCpkZmYiKysLiYmJmDBhAsrLy+UehiRDxyeKIgBg2bJlmDx5MiIiIrBx40ZwHIcdO3bIPArdDB1jeno6NBoN3nnnHblTNoqh4/ul0tJSjB07FsnJyZg3b55MmRNDpaSkID8/H9u2bZM7FYu5e/cuUlNTsXXrVjg6OsqdjlWIooiBAwfi/fffR3h4OF577TXMmzcP69evlzs1i/j666+xdetWfPXVV8jJycHmzZuxevVqbN68WZZ8bHaJ96qqKjx48EBnn549eyIzMxPx8fGorq5+bAvkkJAQzJ07F2lpadZO1SSGju/8+fMYNWoUMjMzER0d3dYWGRmJuLg4rFy50tqpmszQMU6dOhX79+8Hx3FtjwuCAIVCgenTp8v2n0sfQ8fn4OAAACgrK0NMTAyGDh2KTZs2gec75veIxsZGODs7Y+fOnY/dvTVz5kz89NNP2Lt3r3zJWdD8+fOxd+9enD17FkFBQXKnYzF79uzBpEmToFAo2h4TBAEcx4HneTQ0NDzW1hEFBgZi9OjR+Pzzz9seW7duHVasWIHS0lIZM7MMf39/pKWlISUlpe2xFStWYMuWLbLMStrsqRlvb294e3vr7afVagHgVx/qPM+3zSa0R4aOLyIiAkqlEoWFhW2FSFNTE4qLixEYGGjtNM1i6Bg//fRTrFixou3vZWVlGDNmDLZv347IyEhrpmgWQ8cHtMyEjBw5sm1Gq6MWIQDg4OCAiIgInDhxoq0QEUURJ06cwPz58+VNzgIYY1iwYAF2796N06dP21QRAgCxsbG4evXqY4/Nnj0boaGhWLJkSYcvQgAgKirqV7dcFxUVtfvPTENptdpffYYoFAr5jnmyXCLbjlRVVTFPT0+WlJTEcnNzWWFhIXv77beZvb09y83NlTs9i0hNTWW+vr7s6NGjrKCggM2dO5epVCr28OFDuVOzitu3b9vUXTMlJSWsV69eLDY2lpWUlLDy8vK2n45q27ZtTKlUsk2bNrHr16+z1157jXXp0oVVVFTInZrZ3njjDebm5sZOnz792Hul1WrlTs1qbO2umaysLGZnZ8dWrlzJvv/+e7Z161bm7OzMtmzZIndqFjFz5kzm6+vLDhw4wG7fvs127drFvLy82OLFi2XJ54kvRBhjLDs7m8XHxzMPDw/m6urKhg4dyg4dOiR3WhbT2NjI3nrrLaZSqZirqyuLi4tj+fn5cqdlNbZWiGzcuJEB+M2fjiw9PZ0FBAQwBwcHNmTIEHbx4kW5U7IIqfdq48aNcqdmNbZWiDDG2P79+1nfvn2ZUqlkoaGh7G9/+5vcKVmMWq1mqampLCAggDk6OrKePXuyZcuWsYaGBlnysdlrRAghhBDS/nXcE82EEEII6fCoECGEEEKIbKgQIYQQQohsqBAhhBBCiGyoECGEEEKIbKgQIYQQQohsqBAhhBBCiGyoECGEEEKIbKgQIYQQQohsqBAhhBBCiGyoECGEEEKIbKgQIYQQQohs/g+vRRzGC8YqngAAAABJRU5ErkJggg==", "text/plain": [ "<Figure size 640x480 with 1 Axes>" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "plotPinPow(fuelBlock)" ] }, { "cell_type": "markdown", "id": "fb90d25f", "metadata": {}, "source": [ "Unsurprisingly, we have a pin power profile that matches our `p(x, y) = x + y`. Pretty!" ] }, { "cell_type": "markdown", "id": "f2c6be2f", "metadata": {}, "source": [ "## Rotation\n", "\n", "`HexBlock` objects have an implemented `.rotate` method that supports CCW rotation in 60 degree increments. Before we rotate this block, make copies of the locations and pin power data arrays to compare before and after rotation." ] }, { "cell_type": "code", "execution_count": 11, "id": "9373e430", "metadata": {}, "outputs": [], "source": [ "def getPinRingPos(b: HexBlock) -> np.ndarray[tuple[int, int], int]:\n", " locs = b.getPinLocations()\n", " allRingPos = [l.getRingPos() for l in locs]\n", " return np.array(allRingPos)" ] }, { "cell_type": "code", "execution_count": 12, "id": "3f7589a9", "metadata": {}, "outputs": [], "source": [ "ringPosBefore = getPinRingPos(fuelBlock)\n", "pinPowerBefore = fuelBlock.p.linPowByPin.copy()" ] }, { "cell_type": "code", "execution_count": 13, "id": "d4fb2a75", "metadata": {}, "outputs": [], "source": [ "import math\n", "\n", "fuelBlock.rotate(math.pi)" ] }, { "cell_type": "code", "execution_count": 14, "id": "819f10f3", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "<matplotlib.collections.PathCollection at 0x1baf95134d0>" ] }, "execution_count": 14, "metadata": {}, "output_type": "execute_result" }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiIAAAGdCAYAAAAvwBgXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzsnWV4HEe2hr/qGY2YyUKLbEu2ZElmZmZ2Yggzb7Ib3DDtZpPdwIY2aCdOzMzMLLDAtmRZFlnMIw131/0xlq9B3dOgJE7S7/PoZq/rTH+DXadOnTqHUEopVFRUVFRUVFR+A5jf+gmoqKioqKio/HlRHREVFRUVFRWV3wzVEVFRUVFRUVH5zVAdERUVFRUVFZXfDNURUVFRUVFRUfnNUB0RFRUVFRUVld8M1RFRUVFRUVFR+c1QHREVFRUVFRWV3wztb/0EhOA4DuXl5fD09AQh5Ld+OioqKioqKioioJRCr9cjNDQUDCMc87ilHZHy8nJERET81k9DRUVFRUVFRQalpaUIDw8XtLmlHRFPT08A9hfi5eX1Gz8bFRUVFRUVFTE0NzcjIiLi6jwuxC3tiLRtx3h5eamOiIqKioqKyu8MMWkVarKqioqKioqKym+G6oioqKioqKio/GaojoiKioqKiorKb4bqiKioqKioqKj8ZtzSyaoqKjdSVtuIdSdzUVLTiFazBe7OOkQG+mBm/0SE+3t3iEZFfTPWH85BcVUDWk0WuDk7ITzQB9MG9UDnYN8O0aiu12PzgVxculyHFqMFrs5ahAR4YfKwHogJD+gQjfrGVmzdk4OLxTVoaTXDxdkJQQGemDiyB+KigzpEo6nJgO07s5FfUIXWFjOcnbXw9/fEuDE9EN8tpEM09M1G7N6WhfNny9HaYoJOp4WfvwdGjktE96TwDqkx1Npiwt7NmTibWYKWZiOcdFr4+ntg+ISeSOoT1SEaJoMZ+9anI+fERbQ0G6DRauDj74Ghk1OQPLiLw1oLYjAbLTi4/hTOHDyP5oZWaDQMvAM8MWhyKvqMSewQDYvZisNrjuP0rjPQ17WAMATe/p4YOK0v+k/uBY1Wo1hD5c8FoZTS3/pJ8NHc3Axvb280NTWpp2b+5BzPK8b3+9JwLK8YDENAKcBRCoYQEAJwHMWg+M64a1Qf9O8SKUsjLb8MS3eexuGcS/aJ54oGIQBDCFiOom98BO4Y2xuDE6NlaeQUlOPHLWk4eLrg6r/dqJHSLQy3T+yN4X3iZGnkXazCz+tPYv/RfHAUINdqMAxYlkP3LiGYN603Rg3uJmuSvXSpBj+vOoF9+8+BZTkQQsBx12t0iQvG7Jl9MHZ0DzCMdI3S4lqsWnYMe7Znw2Zj29WIignEzPn9MW5yMjQa6ZNsRWk91iw5hJ3rM2CxWMG0oxEeFYAZiwZhwqw+0DpJn2Rryhuw5n/7sGP5cZgMFjAaBhzLAQTQaBiwNg6dIv0x/e5hmLRoMHTO0teHdZWNWPvpLmxbehCGZmO7GoFhfpj2wChMvW8kXNycJWs01jRhzX+2YMuXO6FvaIVGa78uAGi0GrA2Fn4hvpj2yHjMfGIS3DxdJWuo/HGQMn+rjojKLQ2lFF/tPon/bj0KDWOfqPloG39y8mDcM7qvpAl22e50fLDqgEMNhrFPVPdN6o+Hpw2UpLF+Xxbe+3YPCIGwBiHgKMVtE3rhiQXDJU3iuw6ew9sfbQUIwLKOX8fUsUl4+sGx0EqYxA8fycfrb28EpRQsy/HaEUJAKcWoEQl47q+ToNOJn2BPH7+I159fCZuNFXwdhACUAgOHdsULb8yCi4uTaI3s05fwymNLYTHZHLwO+39TBsTi5f8shJu7+Ek8L7MYf1/8JQwtJrtjwCtidxi794nBq9/cB08fN9EahdmleGnOf9BU1yKsAYAwBLFJkXhr1ZPwCRR/Ty05fxnPjXsT9RUNojQi48Pwjx1/R0CYv2gNlT8WUuZvNUdE5Zbm692n8N+tRwEIT97Xjn+05Qi+23tatMbyvRn4YNUBURrclfGvt57AZxuOitbYfCAH//hmNzhKHWtcWRss356OD3/cL1pj35E8vPGfLWA5Kjh5A///Ojbvzsa/PtsJseuR4ycv4pU31l9xEIQnpLZr7jtwDu+8t/mqpiPOpBfh5b8uh8Vic/g62p72iSMX8OaLq66u0B1xPqsULz74HUxGq4jXYf87c6IQrzyyFBaLTZTGpXPleG7+pzDojQ4nb1zROJdehJcWfQ6T0SJKo6ygEn+d8p4oJwQAKEdRmFOKZ6e9j9ZmoyiNquIaPD3sZTRUOnZC2jTK8svx9PBX0VynF6Wh8udGdURUbllOXCjBJ1uPyHrsh5sP43RBmUO77EsV+NeK/bI0vtl2EoeyCx3aFZTW4J1vdsnSWLkzA7uOnXdod7miEW/8ZwukboBQCmzdm4NNu7Id2tbW6fHqG+sBSAuiUgocOJiH1WtPObRtbjLilb+tAMdRSInVchzF6eMX8dOSQw5tTQYLXn5kCVgbByrSOWrTyM0oxpKPHX+WFrMNL9/xJawWm2gHDAA4lsPFnDJ8+dpah7Ysy+HleR/B1GoW5SBcq1F2oRIf/2WpQ1tKKV6Z/k+0NLaKdvIAgLVxqCquwT/u+ET0Y1T+vKiOiMoty9J9adDIyC0A7Ns0S/anObT7aXe6rPwFwL69sXSnY43VOzMlOwhtEELw4xbH0Z31OzLBUSrRRfh/fl5/ymFUZMvWM7DZWEkOwrWsXHPKYfRh55ZMmIxW0RGaa6EUWLfipMOIxd4tmdA3GSU5CP+vQbF5xQkYDWZBu6Pbz6CuqkmSg9AGx1HsXnUKzQ2tgnand2Wj4lKNPA2Ww8H1p1FzuV7QLuvgWRRmFUtyQq7VOLUtA2X55ZIfq/LnQnVEVG5JLtc34fC5IofbGHywHMXBs4WoaGjmtalrbsXu9AuyNTiOIi2/DIUVdbw2LQYzth4+K1uDUoq8omqcK6zktTGbrdi4M0vWxNpGWUUDMnNLecdtNhbrN2Uo0qira8GJk/wRJI6jWL/KsUMkRIvehMP7zvGOU0qxftkxKDkEYzZZsXfzGUGbDd8dku3gAvZox86VJwRtNn61F4yMBN02CIDtS4UjSBs/3Q6NVr4Go2Gw+Yudsh+v8udAdURUbknWn8hVdCMHAAKC9Sdyece3HD8ne3XfhoYh2HCEX2PXsTxYbaxijY37c3jHD54ogEFkToGgxs4s3vGTpy6hsdGgSINhCDZtyeAdz8ooRnVlk2KNzev4o1QXzl5GycVqRZ87IcAWASeh7GIVzqcXKXLaKKXY8gP/tmTN5Xqk7cuVFQ1pg+MoNn+7n3e8uV6Pw+tOyoqGXNVgOWz9Zg9Yhb8BlT82qiOicktSWtuk2EkgBCir45/YSmsaFTs7HEdRVsOvUVbVAI3C2g0sR1FS2cCvUdEg6+jqTRqX+TUulzd0yHtVWsavUSGgL0Xjcin/dkOFwJhYKBW+Tnkxf4RMCtVl9bzRocqiWqmpOu3SVKuHmceJrS6uVeTotGHUm9Bc36L4Oip/XFRHROWWxGCxXD09IheOozCYrfwaJquiVStgnwtajPz5AkL6UmgRyEkwmqyyc1CuxSDwOoxGS4cU9TIKRG6MBotiZwcATCb+99xoUBY5asMsoGFqFc4fEQvHUVh4vj/GVlOHaACAsaX9a/H9uywNvbgTOip/TlRHROWWxN1ZB0bhxMcwBG7O/HUl3F10HbD9A3i48teVcHPRKbp+G54CBahcXZw6YnEMdyENV52i3I023Nz43w9XN51ixxCwP1chjY7AWUDDRUKdESEYDQMdz/fXzcOlQzQA8BYec/XsQA0vtbiZCj+qI6JySxIVpLyUOqVAZ4HrdA72lZ1E2gbDEHQO9uHXCPGFTWF4W8MQRIX58Y5Hhvk5PI0iSiOCv/hURISfYidBwxB0juQvXx8eqbz4FcMQRETxXyc8Snn5fEIIwjsLaMQEKtYAAUIi/XmjUCExQR0SofIL9oaOpwhccOfADinX7u7tBk8/D8XXUfnjojoiKrck0/v16JDrzBC4zqQBCbKPB7fBchQzhiTxjo/u3w0uEiqK8mlMH9GTd3xo/zh4KFyFsxzFtHHJvON9e0fD31/ZZMJyFNOmpPCOJyZHICTMV9GJFo6jmDqzD+94bHwoYuNDQBR87pRSTLmtP+94aFQgEvvHgtHI1yAAptwxhHfcv5MP+o1LUnZqhiGYct9I3nFPXw8MnzdQ8amZyQ+MhUaj9p9R4Ud1RFRuSTr5eGJYj2hFdURGJcUiyJt/8vT1cMW4Pt0UafRPiERkkA+vjburDpOH9ZCtwRCCHrGd0KUz/ypb56TF9HHJsreZCAE6h/shKT6U10ajYTBjaqqi7bKgIC/06c3fo4cQghnz+sm+PgB4+bhh0PBugjbTFwyUVMjsRlzddBg+gd8xBIBpdw4F56AqrBBaJy3GzOkraDP1vlGKkkkJIZiweKiwxsPjlZ2a4ThMeXCs7Mer/DlQHRGVW5Y7R/SWvR3AcRR3jOjt0G7hmFTZp3NYjuKOcfyr7zbmjksFCJGVUMpRikVThCckAJgxIRlaDSMrmkApsHBmP4eh/kkTk6Fz1sreErhtbj+HztLYiT3h7uEi26mafVt/aB1sJwyf2BO+/h6yNAgBpi0YCBeBHBEAGDg+CUHhvrIiFoQhmLhgIDy8hfvN9BrVHZHdQmRpMAzBqHkD4Bcs3LG6x6Bu6NY3DoyMqAijYTB4ej+ExARLfqzKnwvVEVG5ZekdG46/zhgu67HPzRyBlGj+FX4bCZHB+PuiMbI0Hp0+GAO7d3ZoFxXqh9cemiArofSOqX0xsm8Xh3adgrzxxt+mAYBkZ2TmxBRMGOl4K8zP1x1vvz4bDCNNgxBg3JhEzJjWy6Gth6cL3vrgdjAaRpKjQAjB4OHdMH/xYIe2zi5OeOuLO+Gk00rSYBiC1IFxWPzIaIe2WicN3lr6EFxcdZIcBYYhSOgVhXtfmi7ClsEbK5+Eh7ebNA0Ng+jECDz2r4UObQkheG3d3+AT6C3JGWE0DMK6hOBv3z0i+jEqf15UR0Tllmbx8F54dsZwEMDh9oaGsUcdnp85AguGpYrWmDEkEX9fNAYMIaI0AOCJmUNwz0THkYo2xg6Mx+uPTISGEa9xz4z+eHgef57AjQzuG4u3npsOjYZxqNE2Ac+b2htP3jtKdJSjV2pnvPPmHOh0Wmgc5EC0aUya0BPPPjNRtEb3pHD846OFcHZxcphn0aYxfEx3vPDGLNGORWx8KN779l64ebg41GjLJ+k/PB6vfLgQWidx+Q4RccF4f80T8PJ1F62RMrgr3lz6IHTO4vKKOkUG4INtz8G/k4/D106I/S+hbwz+seEZ0ad7AkL98OHhN9Gpc6Dj95fYnZfY5Ch8sP91uHu7i9JQ+XNDaEecyfuFkNJGWOWPTealcvywPx17sgsA2G+oHKVgCLm6tTImuQsWD09FcpTjSEh7nC2qxI970rHr9AVQSkEIAUc5uwbsSYrDe8Zi4Zhe6N01XJbGheIaLN+ehh3HzoNlOTAMA477fw2OoxiUHIX5E3qjf5LjaEt7XCqtxapN6dixPxdWG3tFg17J77B3/+2T3Blzp/TCoD6xsjTKLtdjzbo0bNuRBYvZBkbTpgGAACxL0TMpArNn9sbQwV1lbedUVjRi3YqT2LYxAyajBZo2DQYACFiWQ0JiOGbM64sRY3rI0qitasKGZcewdfUptOpN0GjtGoTYK/OyLIcu3cMwfeFAjJycLKtwXEONHhu/P4gtPxyBvtFwvQYhYG0couJDMP3uYRgzp59oR+damur02PT1Pmz+Zh8aa/TQaDWgHGffEiT2JnQRXTph2gOjMX7xEN5jwUK0NLZi0+c7seHTbagrb7BrXPnx2V8Hi5DYYMx4bCImPzAGzgLH2lX++EiZv1VHROWWwGKzASDQOdjfr2lqwcZTZ1FS24RWkxnuLs7oHOiDaX27I8BLePVl1wB0WuHVZn2zAZuPn0VRZQNaTWa4uegQFuCNqQO7I9jXU/CxVhsLjlI4OwlrNOmN2HbkHArLatFqtMDV2QmdArwwaWh3hAYK79vbbCxYjsLZwWkcfasJOw+cxcWiWrQazHB21iLI3xPjR/RARKjw8WibjQXLctDphHNCDAYzdu89hwsFlWhpNcNZp0VAgCfGju4ueFQXAGwsB5uVhbODvBOj0YL9u3KRf64cLXoTnHRa+Ad4YOS4RMTECecfsDYOVptjDYvZioM7c3A2oxitehO0Thr4Bnhi+PgkdOkRJqzBcrBabHB2cRLUsFpsOLI9C9nHC9DSZISTkwbe/h4YMjkF8amdBR/LshysZhucXYU1bFYbjm87g8yD56FvaIVGy8Db3wODp/RCj4FdBB/LcRwsRiuc3XQOnguLk1szcHpHJvQNLWAYBp5+Hhg0vS9SRiY61DAbLXBxc+6Q48cqty6qI6Jyy0MpxfHCUiw7mYlDF4pgudKLwkWrxaj4GCzon4JekaGKblaUUqQVXsbyo2dw4GwhTFa7I+Ks1WBIfDRuH5yMfnERijWyiyqx8sAZ7DlzAaYrnV+dtBoMjI/E/OEpGBDfWXHhtHNFVVi9JxO7T+bDeKXappOWQe/4CMwdk4pBPaMUl5IvKK7B2h2Z2H34PFqvVEDVahgkxYdhzsRUDOkTC63CUvLFJXXYsCUDO/fmoqXFXoFUo2GQ0C0Es6b1wtBBXeEkIyJwLWWl9di8IR07t2ehudl4VSOuSzBmzO6L4SMSRG998FFZ3oCtq09jx4YMNNbbu+QyDEF0106YPr8fho9PdJjQ6oiaikZsW3ECO1acQH2tHqAAoyGIiA3GtMWDMHJaL7gqPLZdX9WE7cuOYNvSQ6itbLRrMAShMUGYcvdwjJk3AO4Ki5E11jRj+3f7seWr3agurQPlKAhDEBIdhCkPjsG4O4bDS60z8odDdURUbmkOXSjCm1v2orS+CRqG3FRUrO3fYgP98NrU0egTJX0b5FRBKd5cuweXqhsENSL9ffDS7FEY1FX6NkjWpQq8+dNuFJTXCmqE+Hni2bkjMaKn9G2Q/OJqvPXtTpwvrm5Xg2EIOI4i0NcDT902HGP7Cx9dbY+isjq8+/kO5ORXCGr4ebvh4UXDMGmE9Bovl8sb8N6H23Emu/Tq9drT8PJyxb2Lh2D6FPE5Pm1UVzXjg/c2I+3UJUENdw9nLL5zKGbPc3xS6Ebqa/X48M2NOHEoHwy5WYMwBJSjcHXTYd7dQ3DbPUPBSHQQmxta8fHLa3B0R459C/JGDWI/6eTs6oSZdw/DoifHSd4yam024tPnf8aB9acBerNG2xEvnbMTptw9HHe/NEPylpGx1YTP/rIEu388DI7jbj4yfWX7S+OkwcR7RuLB9xZC10GViFV+e1RHROWWZW16Ll7esAuUUoenSBgCMITBv+ZMxITErqI1tmfm4fmftoFSCkenf9tyAd6YNxbT+4qfYA9kX8TfvtoMlqMOe+LYMzOAF+aPwrxh/EXDbuTk2RI88+F6+3aPyGPMj88bisWTxCfRZp2/jGfeXgOTxSZa4+45A3DffMenU9rIu1CJv764EgaDWXQl27kz++CR+0eKdhSKLtXgr0/9CH2zEazI+h2TpqbgqWcmiY5WXS6pw7MPfI/62hbR9TtGTkzC396YKbpCaXV5A55b+AWqyxtFawwY3R0vfrIYTiIL59VXN+H5WR/icmGVqFonhBCkDO2GV5c+LFja/lqa61vw/IR3UJhVLOp7RRiC7gO64O1Nz/GWnFf5fSFl/lZPzaj8auw9fxF/X78TnAgnBAA4Ctg4Dn9dvRXHC0tEaRzLL8Zzy7ZdcRAc21NqT3p9eeVOHDhbKEojs7Acf/1qM2wsJ6oxX5vFuyv2YkdaniiNvOJqPPOfdbBYxTsIAPDJykPYeDBHlG1RWR2eeXstTGZpGt+tPo6VW9JF2VZUNuKvL65EqwQnBABWrTuNH5cfF2VbW9OMZ/+yDM1N4p0QANi6KRPf/G+fKNvG+lY8/+AS1NfqJRUR27c9G5+9t01Un56WZiNevPMr1EhwQgDgxN5z+M8Lq0RpmFrNePm2/+JyYbXogmuUUpw5nId/PvStqFYCFpMFL8/4FwqzS0R/ryhHce5EAd6Y9x/Yrmyhqvx5UB0RlV8Fs9WGF9bukPVYSoHn1+wAywnfBG0shxd/3g5xbs6NIsBLy3dcTWjlfy4ULy/ZDo6TrkIAvP7jLhhMwh1gKaV44+sdsLKcrGJr/1y6G40tjrudvve/XTBZrLK6HH+yZD9q6vQO7T76bDcMBrOswnTfLD2Essv1Du2+/GwPmpoMsjRW/HQMF/IrHdot+WwvamuapVdLpcDmVaeQk+HYkV7+2R5UlNRK7htEKcW+Dek4deC8Q9s1X+xG0bnLkiuychzFse1ncHiTYwd005e7cf5kgXQNlkP6nhzs+uGQpMep/P5RHRGVX4XtufloNpllFfXiKEWVvgUHLxQJ2h04V4havUHW5E0BNBlM2H3leDAfJ/NKUVbbJGvypgCMFiu2nRaeMM5eqsSF0hrZVWVtLIcth3IFbS6V1uHMucvyG9kRYOOebEGTiqomHD9VKLuxIMMQbNyaKWjT0NCKg/vPSYqEXItGw2DThjRBm1a9Cbs3Z8ou2a7RMNi04qSgjdlkxbblJ2RrMBoGm344Imhjs7LY/O0B2Z85wxBs+na/oA3Hcdjw6Q7ZnZoJQ7D+v9s7pNOzyu8H1RFR+VX48Ximoj4lGkKw7ESmoM3Ph5VpMITgp8PCGisOZipqlEcI8NO+DMEb7ao9ZxRpUAqs2J0pOOGs26nsdXAcxdodmbBdOe3UHpu3nVF0WojjKDZvz4LJZOW12bYlEw4CZYKwLIddO7LRojfx2uzecgYWi/ztApblcGjPWfvJFx4Obj0DQwv/c3AEx3I4fTAPlaX8EaQTO7PQKPAcHGpwFLknLqL4fDmvTcbeXFQW1UDWigP2LZpLOaU4f+qizGep8ntEdURUfnGqmluQU14lK4rQBkspjhQUo9Xc/rZGs9GEEwWlijQ4SnGmuAI1zS3tjlttLA5kyV/hA3YnobCyHiU1jTzjFLtP5inSAIDKumbkl1Tzju86fF6xRmOzEdl5/JPS7n1n5UdcrmAwWJB+pph3fN/uXMWrZ6uFxckT/BPf/h3CkR8xcByH4wf484MObjmjqCMwYHekjwg810Mb0xV1BAbskZdDAtszB9ccV9StFwA0Wg0OrhaXH6Tyx0B1RFR+cepaDR12rQZD+7kPDSJyIsRSz3OtplaTIkfnWhr07b8nJosNFit/lEGSRnP7GhxHoW+Vv/oWowEATc0d85k0NvJrNDS0Kr4+IQSNAtdpqG2RvcJvQ8Mwghr1Nc2KOgID9t4zTfUCGtVNijoCA/atkyaB3KDG6mZF3XoBuzPeVNOs6Boqvy9+cUfk8uXLWLRoEfz9/eHq6oqkpCScPn36l5ZVuYVwlGQqBStPAlxHath4rtWhGnyvQ0Fbd7EalFLZHYdv0hCYdJS0qL9OQ+A6cnNDroUQ4dfRIZ/JlTLrvBoKJ+//vw6/E8t2hINLKWxWodfRURod44yr/D74RR2RhoYGDB48GE5OTti2bRvOnj2LDz74AL6+wuWlVf5YeLp0XM8Jb57+FV6uLh2m4cWn4dZxGp4813Jz0Ununsur4d6+hkbDwEVGr5F2NTz43xM3hVU/xWh4eCjX4DgKT09+DU+FlUUBgGMpPLz4Nbx83ZRrUAoPb/7n6uXvcbVQmXwIPHz4n6unr4ekTsDtKjAMPHzVZnl/Jn5RR+Sf//wnIiIi8N1336Ffv36Ijo7GuHHjEBsrr9GWyu+TcB9vBHgou9ESAFH+PvB1a/9G6+/phgh/b8X32SAvD4T6tl98x9XZCV3DAqFwKx/ebi6IDm7fGWcYguQuYYpLwrs6O6FrZCDveK8e4YqSVQFAq2XQPa4T73jv5M6ymsRdCyEEid35e7307hPtsAuwGJKSI3nHUvvHKP48KKVI6hXFO548IE6xBsdySOwbwzvec2BXEIW/ENbGImlgF36NYQmKI2GsjUXPoQmKrqHy++IXdUQ2btyIPn36YO7cuQgKCkJqaiq++uorXnuz2Yzm5ubr/lR+/2g1DBb0S1F0ogUAFg1I5a20SQjBgiEpiq7PEILbhyQL9my5fUSKqEJpQhpzhvaETqAp3rwxqYqSPDUMwdShPeAmUC579oRURcmqGoZg3JAEeAtUwZwxNVXRtoZGQzBkYBwCA/gbDU6d0VvR9gzDEPTqHYXwCD9em8lz+ir6PAhD0K1HGOLiQ3htJszrJ/v6gH17KSI2CIl9o3ltxtw2QFZn32sJDPVFn1HdecdHzB8IF4VRKq8ATwyeIb46sMrvn1/UESksLMTnn3+OLl26YMeOHXj44YfxxBNPYMmSJe3av/vuu/D29r76FxER8Us+PZVfkTm9ExVtOei0GkxPFl4lTevTHU4iS2m3B0MIZvZNFLQZ36cb3BRsa1BQzBqcJGgzolcsfDzkbwewHMXsUcKl5PslRyFYYIIXozFrfIqgTWL3MERF+sv+3FmWYubUXoI2sXHBSOguvzkix1FMn9VH0CY0wg+9BsTKjlhQjmLGggGCNv7B3hg0LlH2tgalwPQ7hwi+D54+7hg5u6/sKBVhCKbdN0Kwd46ruwsm3j1S9utgNAymPjBGdLl6lT8Gv6gjwnEcevXqhXfeeQepqal44IEHcP/99+OLL75o1/6FF15AU1PT1b/S0tJf8ump/IoEerrjnsHCN3whHh0xAB4Ock28XF3w0FjhG74Qd4/sA39P4S0kV50THpsmvs/KtRAAtw1PQai/cN8FrVaDx+cPladBgMlDuiM61F/QjmEIHrtjuCwNhhAM798FCQLbMvbnQvDwfSPlaTAEfXpFIVVgy6SN+x4aJcvZYRiCHonhGCCw1dDGnY+OAsMQyTqMhkFst04YMoY/itDGgsfHQqtlJGtoNAzCYwIxekZvh7bzn5wAnauT5KPCjIZBYKgvJiwa4tB29lOT4e7tJtkZYTQMvAO9MO2RcZIep/L75xd1REJCQtC9+/U/wISEBJSUtF/u2NnZGV5eXtf9qfxxeGr0YEyU0LyujXm9k3DfUHGh2vtG9cXsfsJRjfaYnBqPx8YPEmV72/AULBwlvFK/EQJgeM8YPD1L3OQ/dWgi7psuzakiBOiTEIkX7xoryn7UwG54dPEwSRoMIejepRNefXyiKPv+fWPw1CPins9VDYYgJjoQb7w0XVSkIzmlM/72wlR7A0OR8yvDEISF++HNf8wTVfciPjEcz787F4QQ0ZM4oyEIDPbCW/9dBJ2IFX50txC8/NmdYDSM6OiLRsPA298Db39/P1zcHDekC40Owms/PAKtk0Z0TRFGw8DD2w3vrHoSHt6Oc72CIvzx9qZnoXNxEu2MMFoGru7O+MeW5+Eb5C3qMSp/HH5RR2Tw4MHIy7u+iE9+fj46d5becl3l9w/DEPxrzkTcNcg+iWsEZg2G2Fefj44YgNemjRYdeieE4NW5Y/DgmP4gV67DR1uy5t0jeuOd2yeIvvkTQvDMrGF4fPpgMISI0pgztCf+dd9UaCWsEh+YOQjPLBwJhhGnMWlQd3z49ExJ21MLpvXFCw+Pg1bDCL7HbRrD+8fh41fmwlnC9tT0Kal4+bmpcHLSCDoKbVsGfXtH4+N/LYCbm/hcg7Hjk/D6246fV5tGz+RIfPL5XfCScCJm6JjueOvTxXC90oGW77W0aXTtHoaPf3gAfhK2wPoMj8c/fngQ7ldO8fA5PW0TfOeuwfh43RMIChV/ErHnoK7414Zn4OXrIUojLDoIH+94HmExQaI14vvG4aODr8Ovk8911+LTCAr3xydH30J0kuMImMofD0J/waL+p06dwqBBg/D6669j3rx5OHnyJO6//37873//w8KFCx0+XkobYZXfF0V1DVh5Khsr07Jvqpbq5eqM2/smY26fJIT5yP/cL9c3YdXxbKw8lgW90XzdmIeLDnP6J2HewJ6ICPCRrVHVoMeaI9lYdTALja3XF/BydXbCzEGJmDOkJ6I78SdDOqK2sRUbDmZj9Z5M1DVdX9zLWafF1CE9MHtUMmLDA2RrNDQZsGVfDlZvz0BN3fWVZZ20GkwYloCZ41PQLSZYtkaz3ogdu3OwZkMaKquuT0TXahmMHp6AGVN7Ib5rJ9k5Hy0tJuzekY11a07hclnDdWMaDcHQ4QmYPqsPEpPCZWsYDWbs3ZqF9T+fQElhzXVjhCEYPDIeU+f3R3KfKNkaJqMFBzZnYuPSwyg8V3G9BiHoNzIBUxcPQurgLoI5G0JYTFYc3pyODV/vQ37GDdVrCdB7eHdMvXcE+ozuITuvxGqx4cj6U9jw2Q7kHs2/aTx5RHdMf2Q8Bk7pBY2C/C6VWw8p8/cv6ogAwObNm/HCCy/gwoULiI6OxtNPP437779f1GNVR+T3BaUUudXVKG1sQqvVCk+dDjF+fugSwJ+vYLLakFVWiSajCYQAPq4u6BneCTotfyj7fFUNiusb0Wq2wN1Zh85+PogP5j+qarHZkF1SiUaDCaD247OJkZ3gInBypaCqFoXVDWg1W+Dm7IRIPx/EhwbyTixWlkVuUSUaWkyglMLL3QU9IoPhKrBCL6qqR0FlHVpNFrjqnBDq54UekcG8GjaWw9lLlWjUG8GyHDzdnREfFQwPnronAFBa1YCCy7VoMVrgotOik58nEmNCeDVYlsP5wio0NBlgtbHwcndB15gg3pokAFBR04T8omq0GC1wdtIg0NcDSV35jyBzHEV+QSXqG1phtbLwcHdBXGwQvAWiE9W1euQVVKKl1Qydkwb+fh5ISgjjnSAppSjIr0RdfQssZhvcPVwQExsEX4H6FHV1Lcg7XwF9ixFOWg18/TyQlBQOLc8ESSlFYX4V6qqbYTZZ4e7pgs6xQfAP5I+ANNa34nxOGfR6E7RaBj6+7khM7QwngdMsRfmVqC5vgNlggbuXKyJigxAY4sNr39zQinOZJdA3GaHRMvD2dUdinyjoBL6LJfkVqCqtg6nVDDcvV4THBiM4gv9329JkwNnTl6BvaAVhCLz9PNCjfyxcXPm3h8ryK1BxqRpGvRFuXq4Ii+uEEAHH1qA3IvdYPprr7Y6xl58HegzsCjeBk1oqtw63lCOiBNUR+X3QarFg47nzWJKegQt1dTeNp4SE4M5eKRjfpQucBRwMIUxWG7bk5uHHk5k4W3lzD5UeIUFY3C8FE7t3E3QwhDBbbdiZcwHLjmYiu/Tm1vBdgv2xcHAqJqfEw00n7+SM1cZib1YBfj6YiYzCm/u0RAX5YsHwVEzuEy/oYAhhYzkcOnMRK/Zm4vT5mxO+wwO9MX90KqYM6s5bWM0RLMfh+JkirN6RgeNZRTeNB/t7Ys64VEwZ3gM+XvJqyHAcxekzRVi7OQPHTl+8qRqsv58HZk1KxeSxSfCTWQCLUorMzBJsWJ+GI0fybzqm6+PjhunTe2HS5BQEyDxlRCnF2axSbFx5Cgf3nL2pzoanlyumzOmDSTN7I6iTvPwISinys8uw+efj2L8586ZKse6eLpg4tx8m3dYfIQJHlR1RkFOKLUsPY8+aU7Car28E6OrujPG3D8TkxYMRHis/claUW4bNX+/Fzh8PwWy8Plrq7KbDuIVDMeW+UYjqES5bQ+WXR3VEVH410i6X4/5169FkMoGg/ZYcDCHgKEWIpye+nzMLcf7CJzpuJKe8Cg/8vB51rQYwBO3W8WjTCHB3w9cLZyKhk/j9bMAeAXngm3Woam65eq0bIcR+TNLbzQWf3zUDyZH8dSHao6SmEQ9/thZldU38Glf+6+aiw4f3TUO/rtKOsFfWNeOx/6xFUWU9GIa0W/+iTcNZp8U/H56KwUn8tSfao7ahBU+/txYXimugYQhvPRJCAK1Gg9cfm4SR/aQlKTc2G/DCm+uQm1fuQINAoyF47vEJGD+yhySNlhYTXn1lLTIzi6HREN56JG2RnSeeHI+pU1MlaZiMFrzz4mqcOHwBGg3DW1eFYQgoBe5/cixmLRggaUvHYrbi/edX4dD2bGENjf37sPixMbj94VGSNKwWGz55fjl2rTzpQIMBx3KY+8gY3PX8FEnbRizL4Ytnf8TGL/eI0pj24Gg89N4ixUXzVH4ZVEdE5VfhaEkJ7l69FhyloprBaQiBi5MTVt4+H/GB/Fsp15JRWo47f1gNK8uJ1nDSaLD0jjlIDhfnKJwrr8YdX6yE2WYTVeSLIQQahuB/98xCv1hxjsKlqnos/vdyGMwW0RqEAB/dPx1De4hzFC7XNOHud35GU6tRlAYhAAHBOw9Oxpg+4hyFmoYW3PfyMtQ1torWoBT4+4PjMXm4uNNMjU0GPPy3ZaiobpJUSOypB0dj1mRxp5laWkx48okfUFJSJ0njvvtH4PbbB4qyNRkt+NtDS1BwvkKSxu33DMVdD48SZWux2PDSvd8gN71YUtO8aYsG4qEXp4pyRmxWFq/f8z+k7T8vqdPx2Pn98Zf3F4jSYFkO7971GQ6tPyW+wSABhs7oixeXPCI7T0bll0PK/K1+eiqyuFTfgAfWbRDthAAASylMVivuXLUGtSI68l5ubMb9P60X7YS0aVhYFvf/tA4VTfxdQtuo1bfigW/WwmQV54QA9p4eLEfx6JINKK5tdGjfbDDhwU/XiHZC2jQ4juKZbzfhQnmtQ3uDyYJH/71atBMC2B0ESin+/tVW5BRWOLS3WG146h9rRDshbRoA8M5XO5F+1nFdIBvL4bk31kh2QgDgwy/34NjpQhHPieLVV9ZKdkIA4Ouv9mP//nOiNN79+xrJTggA/PztIezYmCHK9sO/r0FumjQnBAA2/ngMG388Ksr2i1fXIG3/OUlOCADsWnECyz/ZKcr2+9dXS3NCAIACh9adwnevrZb0vFRuPVRHREUWX5w8CYvNJtpBaIOlFPVGI346c8ah7fcn0mGwWCRrcJSixWzB0hOOb+Y/HTuDRoNJlobZZsN3Bx13kl5zNBvVTeIn7zYo7BPz1ztPOLTdeuwcymqaZGlwlOKrTccc2u49kY/C0lp5peEp8OWqIw7Njp26iHMXKmWVVCcE+HLJAYcTZmZGMTIzi2WXbf/qf/scPjbvbDmOH7w550Qs3366x2FH3uKCKuzblCnZQWhj6ce7YDZZBW2qSuuw5YfDsrs1L/9oJ1r1RkGbxupmrPlomzQn5BrWfLwdjdVqO5DfM6ojoiKZJpMJG86eAyvz7sRRih8yMmFl+Vt9GyxWrM7Ika3BUooV6dkwWvlvtBYbixXHz0h2Qq5qcBQb0s+i2WgSsOHw80H5kwXLUezKvIC65lZeG0oplu/JkN3OjOMojmYXoby2SdBu1Y4M2f2COEqRlXcZhWXC0Z01m9Pll1KnQGFxLc7lC0d31q1PU5RXUFnZhIyMIkGbTatOKdJorG/F8UN5gjZblp9Q1OnW0GLGoe3Zgjbblh1V1IzPYrFi75pTgjbblx6Q/RsE7BW8d/xwUPbjVX57VEdERTJrcnJh45R12Kw3GrG74CLv+Jac8zBYhFdrjmi1WLA19+baBW3syS2wH+lVgNXGYmM6f6j+6LliVDW28I6LgVJg3fFc3vHMC5dRVFkvd0EJwF7/Yu2BLN7x/KJqnL1YqWjC0DAEa3fxR8JKyxuQnlWirOGfhmDdVv5IWE2NHkePXFDUjI9hCDasT+Mdb240YP+ObOUaK0/yjhtbzdi55rSiTreEIYLbM1aLDVt+OAJOQVNBANjwLX+UimU5bPxyt+StpWuhHMWGL3crer9VfltUR0RFMmmXbz52KhUtwyCtnP866aXlitvUaxiC9FJ+jYzicmgVJrkRYr8OH5mXlGtwlCL9Yhm/RoHy94rjKNLy+DXO5F1W1LQQsEd30s/x54nknLusTAD2RnkZ2fwa58+Vy45OtcFxFFlZ/BoX8ipuOj4rR+PsGX6NogtVDrdVHEE5igu5l8Ha2o9Mll+qQUuT41wuYRHgcmENWpvb356pK29AXUWjMo2r12lwbKhyS6I6IiqSaTQZFa2+AXsX2maTmXe82WRW1KYesN/M9Sb+iIfeaFY+KVGgSSCqojeYIXvP5BoaW4U0TLIreF5Ls4BGi8HcIScT9C0CGq3mDnkdrQb+71WLwGuUgsFg4R1r1XeMhtXKwmKxtTvmKO9CCi08z7eFx3mQpdHU/rVamvi3HCVrNHbctVR+XVRHREUyThrlpZgJCJwE9rftvU8Ualw5yiuk0RFOgk6gNLWU3jJyNaT0lhFC6DpOWgayMxavga9KaZtGR1QT0Ah95h30Xgnlf2gFqqR2lE5HlkPne74d9V4Ja8grPti+RsddS+XXRXVEVCQT6O4u2LBODJRS+LvxV9wMcHeHhijdNiHwc+fX8PeQV/HzWjQMEbyOv6ebopwHwF5TJNCLv3Kon5cbWIU5O4QQBHjza/h6uyuOUAFAgEAFVF8fedVRb8TPh//zECrxLgVvIQ0/jw7R8PB04XVEfP07RsNJp4Gbe/sVfH0EStVLgTAEXjzvu09gx9WH8glQa039XlEdERXJTO7WTfZpljZYSjG5Wzfe8Yk9uipOiGU5DpO68xfqGt+zq+LJleUoJvTkfx1jU7sqSvAE7Dki43vza4zq1QVEYWiHUooJ/eN5x4f0ioFWqzyfZtygBN7xvqlRcJHQ1bd9DYJxI7rzjvfsGQFPT3ll7dtgGIIxY/iruHbtHirYb0aUhoZg1MQk3vHOXYIRFhWgKGqo0TAYNrEn73ZYcLgfuvSM4O3OKwZGw2DAuCToXNr/XL38PZA8LEHR6R9GwyBleAK8Osg5U/n1UR0RFckMi45CiKf8Gy1DCHqHhqJbIH+32N4RoYgN8JM9vRIAXYMCkCJQXTUhNAhJEZ1kH0klAMJ8vTAwjr91eWSgDwZ0i1R0BNLPwxUjEmN5xwN9PDAiNU5Rwqq7qw5j+vI7bd4erhg/OEGRhpNWg4lD+Z0EN1cdJo9LUqTBEGDyWP4JXKfTYtq0Xoo+D46jmDqFv9S7RsNg+rx+ivJdOJZiyuy+vOOEEExfNEhRrhbLcpi6QLhK7LR7his60cKxHKbeNVTQZvpDYxSd/uFYDtMfGiv78Sq/PaojoiIZhhDc2StVft0KSnFHL+GeHYQQ3NE/VfaNlgK4o1+Kw8lg0eBURRGLhYNSHU5qtw9Lkb09wxCC+UNTHOaazBuVIju6wzAEs4YlwcVBI7/ZY+VraBiCScN6wMNNuJHfjInKNEYM6eZwi2fylBRZ1wfs71X//rEIdtCcbvz0VGg08n4hjIYgMSUSnWOE2yCMmp4KZ2cnWXlODEMQEx+CrknCjeOGTUmFh7errMgLwxCEdA5AymDh9gEDJqXCN9hbVuSFMAR+nXzQf2KK9CeocsugOiIqsrgjNQW9w0Il54owhGBi1y6Y1M1xb5M5qYkYHBMpOWLBEIJhcVGYmeK4Cdqknt0wpkesZA0NIegdHYbbB/Z0aDs8MQZT+yZIvplrGIL48CDcObq3Q9ve3cIxd2Sy5DlJwxBEdfLDvVMGOLRNiOmExdP6SVSwawQHeOGheUMc2nYO98f9i4RX0Hwavr7uePSekQ5tg4O98cgjYyRrMAyBp6cLnnxyvENbH193PPHCFFkarq46/OXv0xzaunu44Jl350quSEoYAp2zE/76j7kOHXWdixP+9vEdkPrlJcSeUPvcf+90qKHRavDCdw/b7aTIEIBhGDz/7UMdmryr8uujOiIqsnDWavG/mTPQIzhI9CROAAyL6owPJk0U9Rgtw+DjuVPRKyJU9H2QEKBv5zB8OGeyqPodDEPwz9smYVCXSNH3QIYQ9AgPxid3TINORNY/IQSv3j4WI5P4t1fa04jt5I9PH5oBVweRijaNv94+EhMH8udg3KTBEIQH+eC/f5kFD1fhSEUbD80bgpljkkVraBiCIH9PfPLiXHh7uop6zKK5/XH7LPEOj4Yh8PVxx4dvzkeAyETRmbP64K67xTs8bU7Iv96/3WE0pI3x01Lx4F/GidfQELi6OeOd/y5GeGdxHaqHTkjC46/PtDcwFPEFZjQEzi5OeP2LOxHdTVxTyH6je+Cv/1kEhiGitpsYhsBJ54RXvrkP3VI7i9JIHpaAv//wKLRajajICGEItFoNXlr6CJKHif/Oq9yaqN13VRRhslrxjwMHsSI752rJ9mu/UAyx19pw1znhrl698MSggZILfFlsNvx77xH8fDoLZpvtJg1y5f93cdJiQZ9k/GXUYOgkHjG2sRw+3X0MPx7JgMFivdo19loNwJ7nMLdfEp6eOBQuEo8LchzFVztPYOneNLSYLGAIuWlbiBC7Aza1X3f8bdZwuDnrJGlQSvHDjtP4futJNBvM7WrYO/sSjO/fDX+7fSQ83aQlb1JKsXpnJr5ddwyNzUYwDLlp64m5srod2a8rnrlrFHy9pJ9Q2rwzC98sO4y6htb2NRgCSimG9I/DXx4ciwAZyYp79uTi66/2o7q6mVeD4yj69Y/FU0+OF+2EXMvhvefw1ce7UHm5od329oyGgGMpUvtG4/HnJyMsUpwTci0nD5zHV//cgrJLtTwaDDiWQ2LvKDz6ynREde0kWSPzSD6+eGUNivMq2tVo+7f4XlF49O25iEsS15n6WnKPX8Bnf/0BBZnF0GiZm/rttP1bXEoUHnl/EXoM6CJZQ+XXQcr8rToiKh1Cs8mEtWfP4qfMLFxubobZZoOLkxOifX2xODUZU+Pj4eqk7EREi9mCjdnn8PPpLJQ0NMJstcHZSYvOvj64vU9PTE1KgIfEiftGDBYrtmaex/LjZ1BU0wDTFY0wXy/M798T03p3h6eLuOgBHyaLDTsz8rDi0BlcrKyHyWKFzkmLTj4emD24J2b07wFvd2UnOyxWG/amF2Dl3gwUlNXCaLZC56RBoI8HZgxNwrQhifCT4Rxci83G4mDaRazemYG8S1UwmqxwctLAz9sdU0YkYvrIJAT4KjvJYGM5HD9diLWb03E2vwJGkwVarQY+3m6YOCoRU8f3RLDCI6AcR3HqVCHWr09DTnYpjEYLNBoNvL1dMXZsIqZMTUVIiI8iDUopMk5ewsZVJ5GVVgSjwWKPsni7YuT4JEyZ3UeWA3KjRs7pImxadgzpRy/A2GoGYQg8vFwxbGJPTL6tPzrHBSvWOJ9ehE3fH8KpvbkwtJhBALh7u2LIpBRMvmMIYrqHKdIAgPz0S9j0vz04tiX9alVWdy9XDJzcC1MfGI2uvaIVa6j8sqiOiIpiTDYbthbm4fDlYjSZTGAYBn4urhgXFYcREdHQOIhqUEodhnEtLIsdBRdwsKgIjSYTCAAfV1eMjonB6JhYh5ETMRpWlsXeC4XYf+ESGoz2G5qPqwuGxkZhbLc4h5ETMRosx+Fg/iXsPVeIhlYDOAp4uzpjYFxnjO/RBc4OIidiNDiO4mheMXZnXUBjqxE2loOXuwv6xUVgfEpXh9s3YjQopTiVV4pdafmo1xths7HwdHdGSmwYJvWLh5uLsJMnViMz/zJ2HctDXVMrLFYWXu7OSIwLwcTB3R0ms4rVyM2vwK6D51Db0AKLhYWHuzMS4jphwsge8PIQdvLEaABA/oVK7Nydg5paPcxmG9zdndElLhgTxibBR6DOiBSNSxersWPLGVRXNcNsssDN3RnRsUGYMCUFfg4iQG23dkc6JYXV2Lk+HVXljTAZzHDzcEFkTCDGzeiNQAcRILEaly/VYNeqk6goroWh1a4RFhOI8XP7IzjCr0M0KktqsfPHI7h8sQoGvRGuHi4IiQ7E+IVDEBoTJPhYlY5HdURUZFNjaMXXWafx87ksNFvM0BBytWaIljCwUQ6d3D1wZ2Iq7uyRCjcn6RGIBqMR36anY1nWGTSaTNdpaAgDlnIIcHPD4uQU3JWaCk9n6REIvcmMJacysOx0JuoMRmgYcvU0Rtv/9nV1xYLePXFnv17wcZUegWg1W7DseCZ+Op6Jan1ruxpeLs6Y168n7hzUS1YBNZPFhhVHz+CnQxmoaNBDwzDgOA70Gg13Zx1mD0jEHSN6I8hbegTCamOx5lAWftqbgbKapus02rYmXHVOmD64BxaN6Y1Qf+m/RRvLYeP+bCzfkYHiinporly3TYNyFE5OGkwe2gMLJ/VGRLCvZA2Oo9i2LwcrN6fhYrF9i4LjOFD6/9s4Wo0G44Yl4PYZfREVLj0CQSnF7n1nsWbtaeRdqGxHw/7fUSMSMH9OP8TKmAAppTi47xzWLD+BczmXb9aAfatw6Ih4zF04EN0SQiVrAMDx/eexZslhZKcVXdGgdgeJIVe3OweOiMecu4aiewr/EXUh0g6cx5qv9iHjcD4Yjb1yLuWuaBACjuPQd0QC5jwwCj0HxsnSOHM4D6s/2Y5Tu3PAMO1osBxSRyRg9qPj0Gd0oiwNFemojoiKLPLqa7B4y2rUGQ0OC5YxIIj3D8D3k+YgyE18tcpLDQ24Y+0aVOr1jjUIQbSvL5bMmo1QCXVLLjc14+6f1qKkodHh0VyGEIR6e+K722ejs5+PaI0afQvu/34dCqrrHGpoiL366td3z0ZckPjJr6HFiEe/Xofc0iqH1dU1DIGXmwu+eGAWEsLFT356gwl/+XwjMgouA1T4AIaGIXBz1uHjx2cgOUb85GcwWfDCJ5txPKvo6gQnpKFz0uL9v0xHnx7iJz+zxYY3PtyCA8cv3JTf056GRqvB23+bhoG9Y0RrWK0s/vWfbdi1JxeEEMFy9BqNfRL8+/PTMHwofzG6G2FZDv/993ZsXpfebs7KjRqUAn99aSrGTnR8eqsNjuPwzX92Ys2Sww41GA0DynF47KVpmDxPfAIxpRTLPtyBZR/tuJqfIqTBsRzufXEaZt8/QnT9FUop1vx3J75+dbVojUXPTcXCZ6d2SE8jFWGkzN/qqRkVAEBxUyPmbVwhygkBAA4UefW1uG3jcjSZxTX5qtDrMX/lClFOCGCvN1LU0ID5K1agziCuC2hdqwELl65EqQgnpE2jokmP25euQGVziyiNJqMJd3y9ChdrHDshgL2KbF2LAYu/Womy+iZRGgazBfd9vhrnyqpFtXhhOYqmVhPu+XQlCqvqRGmYLDY89sk6ZF4sB3XghLRptJoseOg/a3C+pFqUhs3G4q//3oCT2cUAxGmYLDY8+a+1yMoX1+WZZTm88v4mHDxRYNdwIMJyFFarDc+9uw6ns4pFaXAcxT/e34Lde3OvaAiLsCwFy3J4/e31OHLsgigNSik+fn8btqxPv6rpSIPjKN57cyP27swRpQEA3/x7B9YsOSxKg2PtkZhP3tqIbatPidb46SO7E9J2DUcaAPDNOxux7psDojXWfrYLX7+6WpLGj//chGXvbRKtofLroDoiKuAoxT3b16LFYpZUup2lFMXNjXj2wA6HtpRSPLRpIxqMRskalS16PLVtqyj7p9ZtQZW+RbJGg8GIx9ZsEtV07aU1O1BW3ySp8BZLKVrMZjz8w3pRGm+t3ouLlXWSNDhKYbLa8MhX62ETUanywzUHkVtUJanYGkcprCyLx/+7DmZr+51hr+XLNUeRfr5UUtE4Su0T7NMfrEOLkb+TbhvL1p3E0dMXJTXMo9T+98K769EgotX9+k3p2Lv/nKS+f222r7+9AdXVzQ7td27NwtYNGdJ7CxLgvTc3oqzEsQN6eFcu1iw9IlHAzsdvbcTF8xUO7dIOnMePHzq+J7THV29vQO7pQod2uccL8NXLq2Rp/PjPTTi9R7zjpvLLozoiKjhUVoSLjfWy+sewlGLnpQso1Quv9NMrKpBdVSVb40hJCS7UCd9oz1fV4ERxmWyNrPJKZJVXCdqV1DVi3/lCeRocxcWaehwvLBW0q2luwdb087IqvrIcRXl9Mw6eFb6ZN7easO5IjiwNjqOoazZgV1q+oJ3RZMWqXZmymvZylKLFYMb2I+cE7axWFis2pcmqwEupPfqyZU+28HPhKJavOiFDwe6MsCyHTVszHT6XlcuOyesdQwEKig1rTjs0XfX9Idm9YxhCsOGnYw7t1ny1T3bvGA3DYL2IqMjaz3dBI7P3EaNhsPazXbIeq/LLoDoiKliak6Gomy5DCH4+myVo88OZTEUaGkKwLOuMoM3P6VmK+pRoGIJlaZmCNitPZcnuTdOm8dNxYY21x5Wt1hhC8NNhYY1Nx8/CdqXui1yN5fuENXYdPw+j2SpbAwBW7swQjHQcPHkBTXqj7OtTSrFmW8ZNNTGu5VRaIWpq9LI1OI5i4+YMWK3873duVilKimplOW2AvTfN9k2ZMBotvDYXz1cgL7tMdu8YluWwd8sZ6Jv53+/yohpkHM6X3TuGZTkc3ZGNuir+hU1dRSOObsm4qcaIWDiWQ/q+syi/JG57UeWXR3VE/uRUG1qxt0TeCr8NllIsO3eGd8JoNpuxNT9fscaqnBxYeCZPs82GtVm5irrpshzF5tw8tJjbv5lzHMWq0znKXgdHse98Iepa+LcDVh7NUtT/hqMUJy+U4rJAPsrqg1mSS4PfqHG2uAoFl2t5bdbuzVLUHZYCKK5oQE4B/3bAhp1nFDWwA4CauhakZZfwjm/eqlyjWW/C0eMFvONbN2ZCo6ADLQCYTFYc3MsfQdqxLk2xBmtjsW8L/4Jg58qTijrptrFbIB9l1/KjCntN26MiO348rPAqKh2F6oj8ySnTNynq4NlGk9mEFmv7E3ilXg8bJ7+7ZhtGmw31xvYn8NpWA8w2+Sv8Nmwchyp9+0mrzSYz9CbHOQuO4ChFeWP7OQNWlkVNc6tiDQAoq+N3RC7XdsznXlbDr1Fa1Sh7hX+dRnUjv0Z5g+yGgtdyuZJfo6S0XrEGwxBUCGiUFtcKRmXEoNEyqChv4B2/3BEaGgYVpfW84+XFNYq69QL28u0VJfwObnlhteztpTYopai4VKPoGiodh+qI/MkxWJWFzq+llccR6VANS/vXMlj4Q9LSNXheR0dq8ERdlG5lXIuB51o2lhOVzCoGvtcBAGaez0oqBiP/dUwm5RoMQ2AQ2NIQ2u4QrUEIDAb+6wiNiYUAMApcp7VFuRNNKWBo5b+OocUsKWm4PTiOwijwXE2tZsWOIeUoDHpxp/1UfnlUR+RPjpvCsuvX4uHUfuGxDtXQtV9AzZ3n32Vp8JSJd1dYPv46DZ4qpa7OHfde8T1frYaBUweEzwH+1wEALiKa9YnB3ZVfw9VBtVcxcByFuxv/ddwExkRrUCp4HXd3ZW0D2nAV0PDwVNY2ALBXN3UTeK7uHi6KoxUMQ+Dqwa/h4u6seKuMMATuXuKaMKr88qiOyJ+cSC9vRcmXbfi5uMKdx+EI8fSEk8RGd+3h7uQEP9f2bx4B7m6Ke9kAgE6jQbBn+9VJPZ2d4S2jAuuNMIQgzKf90tlOGg06+Ygv3sYHARAZ4MM7HhHkq3ifHQAiBSqgdg7165DCUZEh/BpREX6KJyUAiAjlLzMe1TlAsQbHUYSH8Wt0jg5QnL9hs3EIj+AvmBcRHag8R4RlER4VwDseFhukKC8IuPJeCVSkjejSSXFEhBCCsFhlfXdUOg7VEfmTE+DqjrFRcYpPzSzqnsI76Xg6O2NafLziUzPzk5LgxNMbRqfVYk5yD2UaDMH0pATe6ArDEMzv11PxqZlxPbrA151/NTZ/sHKNQfGd0cmX36GZO1x8Jc72YAhBz5gQRHfin1xnj05WFKYnBIgN90dCNP+EMX1ciqJJiQDoFOSF1B78nWKnTlKmAQA+Pm4Y0I+/iuuk6b0U52+4uekwdGQ87/j4Wb0VazjptBghUMV1/Nz+HeIkjJndl3d8zG2DFDu4HMdh/OIhiq6h0nGojogK7uyRqugkCADcniA8sS1KTlF8amZBz2Th59Crp+ITLQt6Cb+OuX2SFE2uLEdxe3/h1zGjX6KiVSXLUdw2OEXQZnL/BOichBv+CcFRittGCmuM7tdVcFvFEZQCc8emCk46g/vGws9BgzlBCDB7YqpgxCM1pTNCHDR/E4JhCGZMTYVWy/9+x3cPRbSCaALDEEya3gvOAlt7UXHB6J4SKTu6o9EwGD01Be4CWzzBEX7oMzxBfh0RDYOhk5LhE8DvRPsGeWHItN6yozuMhkHfMYkIFogeqfy6qI6ICgaGRiDeL1BWNIEhBJNjuiHEQ3g7IblTJ/QODZWloSEEI6OjEeMr3AgtLtAfQ2M6y9boExGGHiHC4dowXy+MS+wiK2KhYQjiQwLRJ0q4Tbq/pxum9e0hW6NzgA+GJEQJ2nm4OmPOsGRZE5+GIQj29cCoFOEmZc46LW4b30vWFhDDEPh4umL8IP4VPmDPd7ltGv/qWVCDELi56jBplHAjNIYhuG3eAFkahBB7M7+Jws4nIQS3LR4k65QRIfbJddqs3g5t590zTHbEggKYfvtAh3azHxgpu44Ix1HMvHe4Y41Hxsp+HRzLYc5j42U9VuWXQXVEVEAIwTcTZsLH2UXSJK4hBF19/fHusHGi7D+bMhWB7u6SNSK8vfHvCRNF2X8wYxLCfbwlawR5euCT2VNE2b81cxxig/ykaTAEPm6u+GzRdFFh5RdmjkT3iCBJBdo0DIG7sw6fPTATGhE5OU/MHIJeceGSHB6GIXB20uK/j8+Ezknr0P6eGQMwMDlKmgYhcNJo8OHfZsFNRDLqbdP6YOSgbpKcKkIIGIbgvZdmwdvTcdLi1EnJmDg+SaKG/e/NV2chwN9x3s+ocYmYNV98Y7k2DUqBl96YiZAwxx2LB4yIx4IHR0jSaOOZN2chqovjvIqUQV1w7wtTZWk8+uZsdEvp7NCuW+9oPPb+Alka9742B8lDhR1clV8X1RFRAQCEeXph9YwF6OTu6XDSIFf+egZ2ws9T5/OeZLmRQHd3rJp/GyJ9fERrdA0IwIp58+HtIi5J1MfVBcsWz0VcoL+olThDCDr7+WL5nfPh7y4uxO/urMP398xF97Cgq8/TkUaItyd+vH8+OnmLS0R10WnxxYOzkBptj56I0fD3dMeSx+cjQiBJ9VqctBp89Oh0DOze+co1HGgwBN5uLvj6mXmIDeVPWLwWrYbBu09MxfDesVefpxAahsDdVYdPX5gjmBty4/N65clJGD+8h2gNF2ct/v3KHCQnhIvSIITgmScnYOrklKuaghoaAicnLd5+fTb69o4WpQEADz4+FvMWDhSpwUCjYfDyW7MxZLj4iXXxI6Ox6OFRdg0H2xsaDQOGIfjrW7MxekqKaI3ZD4zEvS9Ou3oNRxqEEDz21hxMXjRYtMbku0fgsfcXghDisNx72/h9r8/BnMfFLZxUfj0IVXro+xdEShthlY6h0WTE0txMLM3NQK3RAC1hwIFemXAJbJRDtLcv7kpMxfz4nnDROl4V30iz2Yyfss5gSWYmqlpaoGWYq5VEGUJg4ziEe3nhztRULEjqKes0jMFixc/pWfjxdCYuNzW3qxHi5YFFfVJwe6+e8HCWfnzSbLVh1elsLDueieK6xnY1Aj3dcXv/ZNzeP1nWiRurjcX6k7lYdigDhVX19kgHpaDXaPi6u2Le4J64fUgK/Dyk50vYWA6bj5/Fz3szcOFyLTQMA2pvYAKGIbCxHLzcnDF7WE/cNjIFgd7tnyoSguModhw7h5U7M3C2sOpKxIaCu6LBshzcXXWYMTIJ88f1QrCICMKNUEqx92geVm9JR/b5cvsESK/XcHF2wpQxSZg7uRfCOvnI0jhyrABr1p1CZlYpNBoCUFyn4azTYvy4JMyZ2QcR4fzJvEKcPFaAdStP4vSJQjAMASEEHEfBMAQcx0Gr1WDMhCTMmt8fUTGBsjQyjl/E+mVHcfJgHgi5UYNCoyEYPqEnZi4ehLiEUFkaOScvYt03B3BsV479HsIQcCwFo7FrEEIwdFIyZt47XFQkpD3Ony7Eui9249CGNFD6/8+fYciV5oYUgyalYObDY5A4sKssDRXpSJm/VUfkTwbLcTBzNrhqnAS3CGwch93FF3GkrBiNZhM0hMDXxRXjo7ugf0i44GM5SmGyWeGqFdZgOQ4HiopwoOgSGk0mEELg6+KC0TGxGBQZKbiytWvY4KLVOrQ7eqkE+y4UotFoAgWFj6srhsVG2fNJBLYwKKUwWm1wcRLWoJTi1KUy7Dl3EQ0GIyil8HZ1wYDYSIzoFgOtwIrwqoZWK7gCppQis6gcu7MK0NBigI2j8HJ1Rr+4CIxMiuU9TXSthrNW4/D15hZVYVdaPur1BlhZFl5uLkiNC8Po1DjBrRh6pfOvzoEGAOQVVWPX8fOoazLAYmXh6e6MpLgQjO7fVbD2CKUUZosNWq1G8D0FgMLiGuw8dA51Da0wm23wcHdGQlwnjBkaL1h7RIpGSWkddu3JRW2tHiazDR4ezoiLCcaYUd0F64JQSmGx2KDRMIIJrABQXlaPXduzUV3ZBJPJCg8PF0THBmH0+ER4OqiDYTHbQBh7jooQVeUN2L0pE1WXG2A0WODu4YyImCCMmZoCb193BxrWKxrCC5LaykbsXnMKFcV1MLaa4ObhgrDoQIyZ3Q++gcJOp8Vs7/KscxbWaKhuxu7lR1FWUAWD3ghXDxeERgdizG2DEBAqvG1ltdhAOQqdS8fV8fmzozoiKtdRadBjeUEGll/MRI2xxb6aBkGMlz/u6NYbM6ISeYuRiaXG2IrleVn46XwmKlr1V1fsnT19cEdCKmZ3SYS3s7IaHPVGA1aezcFPOVkoa7aXKCcAwr28sSgpGXMTEuHLU2dELE0mE9Zmn8WPGWdQ0tB4VaOTlyduT+mJ+T0TRW/h8NFiMmPDmXNYdvIMiuoawFF7xCnQ0x3zeidhbu8kBHtJjzpci8FixdaM8/jpSCYKKuuuavh5umF2v0TM7Z+EEF9lvymTxYYd6XlYfiATeZdrriYP+ri7YvrA7pg7pCfCRW4T8WGx2rDn9AWs3J2Jc0WVV3sJebm7YPLg7pg9MhmdOznOjRDCZmNx8FQBVm3PQE5+xdUjrh5uzhg/NAEzxyUjJkLcVhSvBsvh2ImLWLspDVm5ZbBdadjm5qbDqGHxmDE5FV0U1rVgWQ6nThZiw9rTyMgohu1Kkz1XVycMHR6PaTN6I15mZKMNjuOQcfISNq48gbRjF2G12J0EZxcnDBoRj6nz+qF7zwhFx2sppcg+WYhNPx7FyX3nrnNE+o1MwNRFg5DUL0axxrnThdj03UEc23YG5ivVc52cteg9ojum3jMcKUO7gemA+kd/Vm5JR+Qf//gHXnjhBTz55JP48MMPRT1GdUSU0WQx4u8nt2NbyXmA4KZGagT2THgXjRZ3deuLp3sOh1biD6/FYsYrx3Zj/cWzoBTgcLMGYC8Utig+Bc/3HQGdwOq9PYxWK944tA+rz+WC5W5U+H8dDcNgXvdEvDx0BFy00lY2ZpsN/9x/CMszs2G90ljvRh2GEBAA03sk4NWxIyVXc7WxHD7ccwQ/nMiAxcavAQATenTBa1NGw0vidg7HUXy++zi+P5AGo8V69TO+UYOCYmT3WLw2Z4zk7RxKKb7bdQrf7DyFVpPlasLkdRoMAeUoBnePwisLxiLIR5pjRSnF8l0Z+HrjMTS3msEQctP3V8MQsBxF34QIvHzPeIQESL9HbNidhS9/PoxGvfFqSL89jeSEMLzw4DhEChQ+42Pn3lx8/s1+1De0tq+hIWBZioRuIXj2yQmIiZK+1XLwwHl89sku1NboeTQYsCyH2LhgPPPsJHTtFiJZ48ShfHz23hZUljdevV57Gp1jg/DkS1PRIzlSskbG0Qv476vrUF5U264Go2HAsRzCogLw2BuzkDJQ+ORWe+SevIhP/vYTivMqBDWCI/3xyNvz0G9skmQNlVvQETl16hTmzZsHLy8vjBw5UnVEfgUqDXos2PMjSlsaRdXWIACGh8bi86Gz4awRl/dRa2zFgm0rcKGxTlS3WAJgQEgEvh07G25O4ibxZrMJi9evRk5NtSgNhhD0DArGkulz4CUy76PVYsF9q9fjdOllUY3gGELQNcAfS2+bAz83cREYs9WGx5ZvxOGCYlEaGkIQ6e+D7++cIzo6YmVZ/G3ZVuzO5u/yep0GQxDs7YnvHpqDMD9xdTJYjsOrP+7E5pP8XV5v1PDzdMNXT8xBVLC4SZxSivd+2IvV+/i7vN6o4enmgs+enYMuEeImcUopPlt2CMs28nd5vVHD1UWHD/8+G93jxE/iS346im9FdnllGAKdTov3Xp+D5CT+Ams3snrlSXzx6W7RGhotgzfenou+AgXWbmTr2tP4+J1NAG52OtvTYBiCl/4xD4NGJojW2LshHR88u+JqXocQ9hNJBM+8Nx+jpvcSrXFkayb+8eA34FjO4fFf+5qA4PF/3Y6Ji9TiZ1KRMn//4nGnlpYWLFy4EF999RV8HdSBUOkY9FYz7tz3s2gnBLCvmg+UF+KvxzaJmvCNNivu2rkaBSKdkDaNE5VleHTfRrAiuvGaWRvu27weuSKdEMAe9cmursKDWzbAwjruxmvjODy+fjPSyspFd6PlKMWF2jrct3odTFabY3uO4tm123HkYoloDZZSlNQ34r4f1qJFoLFcG5RSvL56N/bkiHNCAHvhs6omPe7/31o0GcQ1AHt/7QHRTkibRr3egAc/WYM6kV2Fv1h7VLQT0qbRbDDh0X+tRmVd+12Nb+THDadEOyFtGgaTBU+9tQalFfwdbq9l3eYM0U4IYP+emM02PPvqahQWiesMu3N7tmgnpE3DZmXxykurkHe+QtRjDu89i4/e3nTFQRCpwXJ467mVyM4oFqVx+mAe3n92BTiOiioYSKld5/1nVyDtUJ4ojexjF/DuA1/DZmNF1SBpc4g+/ttPOLIlQ5SGijx+cUfk0UcfxeTJkzFmzBiHtmazGc3Nzdf9qUjni9xjuNhcJ7nKKAXFlpJz2HP5gkPbb3PTkFtbLVmDoxR7SwuxodDxZLY8Jxunyy9L1mApxfHLpVh5Ntuh7cbcczh4qVi0o3OtRnZlFZamO75B7T5fgB1nL0jX4Cgu1tTjm8OnHdoeu1CC9afPSi6IxXIUlxua8Pmu4w5tsy5V4Of9mdIErmjUNrfi441HHNoWlNXi280nJGtwHEVzqwn/WX7AoW15dRO++OmQLA2jyYIPvtnj0LauvgWffCneQWijLZH1/Y93OLRt0Zvw4QdbZWgArI3De+9ucjjpm4wWfPDaeska9pNEFO+9vAacg0WHzcri/b8tv3kPUaTOv/66/Go+DB8cx+H9x5fYHRAZOv9+6geYOqBDskr7/KKOyPLly5Geno53331XlP27774Lb2/vq38REeLDkyp2zKwNPxWkS5702tAQgqV5whMfy3FYcjb9pnwQsTCE4Puz6YI2lFJ8f0bYRggC4PszGQ5vtEvSMmX3daEU+CEt02F058cTmbJ74HCUYvnprKs5JXz8fCRTUvGza2E5irUnc2CwWAXtVhw8o0hj6+nzaHYQeVm9V5nG/vQC1Da2CNqt23lGdodYlqM4mVWMsspGQbstO7IgIujXLhxHkXu+HBcvVQva7dyRBauDCVhIo7ioFmdzLwvaHdiZA0OrWZYG5SiqK5qQfrxQ0O7Y7lw01bfKap1AKUVTfSuO7c4VtEvffw7VZfWgcqqxUsCgN+HghjTpj1URxS/miJSWluLJJ5/EsmXL4CKyGNULL7yApqamq3+lpaW/1NP7w7K9NA9NFnFh9vZgKcWRqiJcaq7ntdlXVogqg/DNXgiOUpypqUBObRWvzYnLZShqapTp6tgXPRcb6nGqnP9Gm11Ridwq8ds+7VGhb8GhS/zh58KaepwsKlPUA6fBYMSe8/xbLhWNeuw/V3j1RIkcDBYrtmXyh7gbWozYnpanSMPGsth44izveIvRjM1HchVpgALrD+bwDpstNmzYnaWoMRvDEGzYzb91ZGM5rNvs2AkWQsMQrN+SyTtOKcW61adl/z4Ae5LshnX8kyulFOt+Pq7odAqjYbBxpXCEa+PSI4q6GzMMwcYfhKNtG789ILv/DWCvf7Lhm32yH68izC/miKSlpaG6uhq9evWCVquFVqvFgQMH8PHHH0Or1YJtZ//e2dkZXl5e1/2pSGNryTkwChu8awjB9tLzvONbLuUp6nLbprG1iH/i21KQBy1R9vXUMgy2FuTzjm/LuyCqFLoQGoZg23l+jZ1nLyh+rxhCsC2HX0NKXggfhEDQETmQXSgqr0cISoHtp/k1TuQUw2xxnHMjBEcpdhzn/+5mnC1Di0HeCv+qBkex8zC/xvm8CtQ3iMuH4YPlKPYc4N++vFRYg4qKRnnbGW0aLMXB/ed4nbKq8kZculClyKHiWA4nD1+A2dR+tK2pvhU5py8pcgw5jiLn1CU01bf/npsMFpzekyu7/w1gj+4U5pShqqRO9jVU+JFeFlMko0ePRnb29Xv0d999N+Lj4/Hcc89BI/EIp4o4qo162VsmbRBCUGcy8I7XGFsVd+slIKgX0KgzGsFSZRMfRynqjfwa9QajuOw7AViOos5g5B2vazXYV5RKbuaUorZF4HW0GKBhGNiU3GgpUKvnnzwbWgxXj7IqQShhtV5vaPe4sVTqm/nfqwaBMSk06fk/84bGjtFobTWDZbl2S6Q3KHR02rDZOBgMZnh43By1buSZ2KVCKUVzkwGBLjefzGqskx9ZvZGm+hZ4+91cgE3f0KLImbqWxlo9giPVrr0dzS/miHh6eiIx8fqulu7u7vD397/p31U6DpvCVSsAgAI2yr/3bOPk7UvfICH4XPnqhUjSoBRWAY0Oea8AWAXeD6UTdxtCJ4CURiraEHJk7GPK3QTBz5zl0G5REokIvR831oyQrSFwnY76PNqu1Z4j0lGvA7Anrrb777+ChpIohVgNvn+Xg81BrpaKPNSycX8wfJ2VVf1sw1vHn9fj6+wGonD7hwDwEtDwdnbugO0fRrCaq7eLs6L9b8C+beIjkAPl5aqsYm0bfu789Uo8XZwVhbbb8HHjfx2ers4OTz+IwUtIw82lQ1auHgLvuadA6XUpuAloeHSQhpOThresvqensirF19JeNAQAPLw6UIPnWh7eyiohi7mWh0/H3BPtGh13LZX/51d1RPbv3y+6mJmKPAYEd1acI2KjHPoF8VdF7B8SAcUrY8qhfyf+U1H9wyIUb//YKId+YfzdVftFhCuOinCUom8Ev0bfKOUahBD0ixLQiI1QlHAL2B2q/nH8n3mfruGKI1QahqB/N36NlK5hChXsGv2682skdg1VlBjZptEnkf+7261LJ4f9XRzBMESwqFl0TBBcXaVV9m1PI6F7KG/n2rAIf3j7Kpt4CQEiYwLhzuPs+Ad7ISjUR5EGAASF+sA/uP2cQncvV0R27aR40eHt74GwmCBF11BpHzUi8gdjXmwylPzeCIBIDx8MCo7itZkV1wM6kdVX+QhydcfoyFje8cldusJTYgn1G/FydsbkOP5um6O7xMJfZGVUPly1WszowV89cmB0JMJ9vBS5hhpCMDuVfzuzZ2QndOkUoOhzB4C5A/hLWceFBCAlJlT2UWfAvk01b2hP3vGwQG8MTIxS5CiwHMW80Sm84/4+7hjRv4vsI8JtGnMmpvKOe3q4YNyoHu1uqYiF4yhmT+WvGOri4oRJU1LAaOS/Do6jmDm7L++41kmDKXP6Kvo8KAVm3j6A1wlgGAZTFw9W5CQQQjDtjsG8fWEIIZh+30hF0TaGIZhy1zBoFTqYKu2jOiJ/MAJc3DEpMkHRtsZd3foK3hi8dM6Y06WHbA2GENzZvZdgXxsXrRNu69FTtoaGECxMTIazlt9h0jIMFvVKkT25agjB7J494OHM7zAxDMGi/vyTlkMNhmByUjf4CmzNEEKwcEiK7NQKDUMwonsMOvkId0G9fXiK7MgLQwj6do1wWOZ9/pgU2dtMhAAJUcGIjxJuHjdnQqrs3B0CICLEFykJ/BEqAJgxOUVRjkWAvwf69xEuwT51eio4Vv7k6unlgiHDugnaTJzZW1EkzMXVCSMnCPdqGTe7jyKnTaNlMHZWH0GbkbP7wllBBIkCmKCWef/FUB2RPyCPJw6BjtFKzuPQEIJID1/MieFftbbxUM/+cHfSSZ7ENYQg2M0DixJSHNrem9Ib3s4usjR8XFxxV7JjB2BhajIC3d0lOzwMIXDX6XBfv94Obef06oFwX2/Jq3CGEOg0Wjw4rJ9D2ym9EhAb7CdZo61Z4CNjBzi0HZUSh+6RQbI0CCF4bMogh7YDk6KR2i1c1iqcgOCxuUMd2iXHh2FgarQsDQrgscXDHK7gu8Z1wqhh8bJX+g/fO8Lh5Bwe4Y9JU1JkR8Lue2AkdDrhyGZgsDdmLRgoTwDAHQ+NgqubcM6Ml687bntklGyN2x4eBS/fm0/LXIuruwvueG6qbI2ZD4xCQIiP7MerCKM6In9A4rwD8OXwOdAyjOh8EQ0h8HV2w9JRt8PDyXGyXaSnD74dNxtOjEb0JK4hBJ46Z/w4YR58nB1viQR7eGDJ9Nlw0Wolabg6OWHp9NkIcnfcLM7PzRXfz58Fd51OkoZOo8HXc2cg3NtxszgPF2d8e8cs+Li6iJ7EGUKgZRh8vmAaYgIcN4tzcdLiy/tmIcDTXZKGhmHw78VTEB/meO/bSaPBfx+eiRA/L9Eabc3J3rlrApJjHLegZxiCD56YhqgQP9GOQtvH9tLdYwXzQ/7fnuDNv0xB16ggyc7IX+4eiaF9xHV8ff7pSUjqHibZGbn/zmEYM6K7KNsn/jIeffrGSHZGbl84CJOniovU3fvEWAwZnQCp+4vT5vfDrIXinJgFj43BmJmOnfobGTOrNxY85rh9CADMfHAUpt4zXJoAAQZPScU9L8+U/NxUxPOrdN+Vi9p9VxnpNWW4/+AqNJiNYEDarS+iIQQspejqHYjvRs5HiJu09zm7thJ371yDGmNru63ar9WI9vLF0vFzEenlI0kjv64Wd21ci4oW/dVr8WmEeXrh+2mzEOcn7ax/UUMj7l25DsWNjbwaba8vyMMdX8+Zge7B0hLXyhub8eCy9bhQXedQw9fNFV8snI7kcGnt2mubW/HIt+tx9nI1b92PthOyni7O+PiuaegbK7zNcCONLUY89b+NyCws59eAPXrg5uyE9+6ZjCE9oiVptBjMeO7TTTh5tkTwdYACOict3nhgIkb16SJJw2iy4tWPt+Dw6YuCNVIIAbQaDV54aBwmDBPnILRhttjwz/9sw54D56DRELA8WymE2DvWPvXIGEybmCJJw2Zj8eEH27F96xkwDOHd2mpzuh54eDTmzHMcZbsWluXw5b+3Y8PyE2A0hHdLiGEIKAXueGgkbr/XceToWjiOw5J/78DK/+2zvw4+DY39Nc57YCTufHo8b25Ie1BKsfzD7fjhvc0gBPzvlYYBx3KYdu8IPPDGHEVbR39WpMzfqiPyB8dks2JLyTksyTuNnIbK68YIgBGhsbijax8MDYmRnSthZm3YVpSP73PTkFFzc0fPIaGdcVf3XhgVESu7kqmVZbGzsABLsjLaLdvePywcd/ZMxZjoWDjJLJZn4zjsu3gJS9MycKz45vYCqaEhuKN3CsZ1jRPMPRGC5TgcLijGspOZOHSh6CbXsEdoEBb3T8WEHl3hwnN00xEcR3G8oAQ/H8nE/nOFN+WOdOkUgEVDUzExpRvcdE6yNCilSCu4jBUHM7Ens+AmBzQ62Be3j0jF5L4JcHeRtzdPKUVWQQVW783ErlP5N+VchAd5Y/6YVEwe3B2eAseCHXG2oBJrd2Ri55FzsN1Qc6JTgBfmTEzF5BE94O0pP7H5wsUqbNiaiR27c2C5oT9MgL8HZk3thUnjkuDrI7zFIETRpRps2pCO7VuzYDZfX8nUx9cN02f0xsQpKQgIEM4FEqKsuBZb1pzGtnVpMN7QBM7L2xVT5vTFxFl9ENTJcaSQj4qSOmxdfgLblh9Hq/76dhXuni6YeNsATLqtP0IUFBarLqvHth8PY8uSQ9DfUBzO1cMZExYOxqQ7hiI8VjjfSIUf1RH5E1BvbkV2YxmaLEZoCAN/Z3f08usseJrlQlMNLrc2w2CzwNPJGbFeAQh1539fmyxGpNeWodFiBEMI/Jzd0DcgEi5a/snrYmMdSlua0Gq1a0R7+yHCk/+m1GwxI626DA1mIwgIfJ1d0S84HG5O/JNXUWMDipua0GIxw0PnjCgfH3T29uG1b7VacKriMhqM9mqYPi4u6BMSBk8d/xZUaWMTLtU3oMVigbtOhwhvL8T482+RGK1WnLps1+AohY+LC3qFhsJboMZIeWMzCmsb0GI2w03nhFBvL8QF8d9czTYbTpdcRn2rESzHwdvVBcnhIfATOPlT1dSCi1V1aDGZ4eLkhE4+nujSyZ93pWqxsUgvvow6vQE2joOXqzOSwjshwJN/gqxtbkVBeS30RjOcnbQI8vFAt7BAXg0byyGj8DJq9QZYbTZ4ubqge2Qwgrz5t9Iamg24UFYLfasJOictAnzcEd85iFeD5ThkFZSjpqkVZqsNnm4u6BYRiBB//u97c4sR+ZeqoW81w0mrgZ+PO+Jjgnm3bziOIqegHNX1LTCZrfB0d0aXyCCEBvF/31tazci/UAl9iwlaJw18vN0Q36UT74qbUopz+RWorGmG2WyDu5sO0Z0DERHqy6thMJiRn1cJvd4IjYaBt7cbusWHQKtt30GnlCI/vxIVlU0wGS1wc3dG50h/dO4cwKthMlqQf7Yc+iYjCEPg5e2KbolhcOJxnimlKLxQhcul9TAZLXB1c0Z4Z39Ex/JHFS1mK/LOlELfZK9U6+nthm7JEdA5899/ii5UouxSLYytZri46RAa6Y+Y+BDe74nVYkNeRhH0Da3gOApPHzd0TYmCixv//afkQiVKC6pg0Jvg4qZDcIQ/uvSMUHw8+I+G6oj8QaGUIquhFMuLTmJ7ec5NJdC9nFwwt3NfzOncB2Fu/DcqR2TXl+PHgtPYWJJ7U9VQD60O82N64fbYXojydJy7wMfZ+mr8eD4Day7mwMxe31/ETeuEeV16YlG3VMT5yF/1XKivxY+5Z7DyXDaMtus1XDRazI7vgcWJKYj3D5Stcam+AT9lncHK7By0WK5fIeo0GkxPSMCilGQkBstfWZU2NGFFehZWpGej2XR9nxQtw2Byj25Y0CcZyWHyayVUNOqx6mQ2Vpw4g8YbOuRqGIKxiV2wYEAyekVJz3loo6apBWuP52DFoTOo019fBp0hBCOSYnDb0BT06yL/pl7fbMCGIzlYsfcMam7owksIMDgxGvNHpWBA986yj6U2tRix+UAuVu3MQGVt803j/ZI6Y+64VAxKiZYdAWxpNWPHvlys3pyGyxWNN42nJEZg9pReGNwvDlqZ2wYGgxm795zF2rWnUdJOD5UePcIwc0ZvDB3aTXZdFJPJin07crBhxUkUFtzc5LJrQihmzO+HYaO7Q+csLwJoMVtxcHs2Ni47hgs5N0dLY+JDMG3hQAyf1BMuMk/OWC02HNmWhY3fHcS5tEs3jUfEBWP6PcMwcmYfuPHUTPmzoToif0CMNgueT1+NfVXnoSEMbx8Whtj3aJ9MGIO7Y4dIuqGbWRueP7UJm0pyBTXa8hse6z4ET/YYLknDyrF45fgu/Jx/hjdP4lqN+3v0xQt9RkraNmI5Dm8fPYBvs9JEaSzsnozXh40WPE58I5RS/PvIUXx24oQojRkJCXh3/DjoJGwbUUrxvyOn8J99R8AIaVzJbxgXH4d/zZgoeUvnxyMZ+OeWAwDAezy3TWNIl87498IpcBc4stwea4/l4K2Vu0GpY43esWH48L5pglVY22P7ifN47fsdsLGUt2ZEm0aPqGB89MQM+HpKK9h18HQBXv7vFlhsNt7j0m15GrERAfjwudkI9HWcNH0tJzMu4eV/bIDpyvZKezptGhGhvnj/tbkICZa2FXImqwQvv7wGLS1m3qr6bRrBwV745z/nIzJC2qIg7+xl/P2pn9HUaODVIAwB5Sj8AjzwzkcLEdNFmsN+Ka8Cf3/ge9TX6K9ei0/Dy9cNb3xxF7olScuJKrtYhZcWfY7qsgbeHJy21+fu5YrXvr0fif35ayT9WVAdkT8YRpsF9x/7HjmNlyU1tLs7dgj+0n2cKFsLy+LeQz/jeHUxqASNBbG98HqviaKcERvH4cG9a7G37KKk2gQzYrrj30OniHJGOErx1O4t2HiBvzvqjRAAY6Pj8Pn4aaJWsJRSvLRrN1bc0NTRkcagyEh8M2um6ByWf+46iG+P87dpvxGGEKSEheC7RbNFOyOf7T6GT/ccl6TRtVMAlj44T7QzsmRvGv694aBoDQ1DEBnoi6VPzRftjKw5kIV3ftwjuhuOhiEI9vPE9y/cBn8vcXkZ2w6fxeufb5Ok4evthm9fX4ggf3F5GQeP5eOVf24EBUQV4NIwBB4eLvjivYUICxEXBT11+hJefHEVKKWiarYwDIGrqw4ff7wI0VHioofZGcV4/vEfwdo40Ro6Zy3e/+JOdE1wfLoKAPJzyvDcnV/DYrGKqqfCMAQarQbvfHMPEntHidIozqvA0zM/hMlgEdUXh2EICEPw+vcPovfweFEaf1SkzN9qKvDvgJcy10p2QgDgu4uHsarolCjbl9O2SnZCAOCni+n4Jv+EKNu3T+2V7IQAwPrCs/go84go2/+cPCLJCQHsk8quSwV459gBUfb/O3VakhPSpnG0pASv7tkjyv6n02ckOSGA3QnLvFyB5zfuEGW/Mf2sJCekTSO/shZP/7RF1ES550yBJCcEsFcuLalpwJNfbRQ1iR3LLca7y+zvq9jvFstRVNXr8eTH62EV0cgsM68Mb365XbJGQ5MBT/xjNUwWq0P7vItVeO39TaDgj+i0p9HSYsLTr65Cq8Hs0L6oqBavvLIGHCfOQQDsuTBGowXPPrsCTU38XYfbqLjcgJefXi7aCWnTsJhtePGJZair0Tu0r6tuxssPfC/aCWnTYG0sXn1oCSrL6h3aN9W34MUFn4l2Qto0OI7izfu+RsmFSscPUAGgOiK3POeaKrC74qxkJ6SNT/L2wMrZBG2K9PVYU3RGshPSxse5B2CwWQRtKlv1WHIuXXaVxs+zj6PJbBK0aTAZ8XnGSVnXpwC+y0pHdatwW/JWiwWfHDsmW2Nldg6KGxsF7Sw2Gz7cJ87xuhGOUmw7m4/zVTWCdizH4d/bD8vWOJxfhMySm09IXQulFB9uOiSrvD3LUaQXXsaxvGKHtv9dK+91sBzFueJqHDhz0aHt/1Ydla1RVF6P3cfyHNp+v/wIOI5KrpDLchSV1U3YvjfXoe1PPx+DzcZK1uA4ioaGVmzenOHQdvWyYzCZrJIr5HIcRYvehPUrHf+GN/x4FC3NRsmVZTmOwmSyYu33jr8zW388ioZaveQOwZSjsFpZrPx0l6TH/ZlRHZFbnBWXTkBD5H9MjRYD9lYKRwiWXUxTVBLeYLNic4nwTfCn/EzJBZGuxcqxWF0gHIVYdS5HcQv25eeENTacO39T4qsUGELw85ksQZsd5wrQZHK8uuVDQwh+ThPWOJh3CTX6VkEbQQ2G4OdjZwRtTheUoaSmUbbzqWEIlh/KFLQ5W1SJ8yXVssvbMwzBir3CGkXldUg/V6qg9DzByh3pgjZVNc04euqioi7KqzenCUZSmpoM2LfvHG8tE0dwHMW69WmCpesNrWbs3JQpefK+VmPL2jRYLPy/MYvZim0rT8p+rziWw861aTC08v/GWBuLzUsOtZtzIlbjwIYMNNULL2xU7KiOyC1Ms9WITZfP8CaNioEBwU+X+MPvJpsVKwszFHW6JQCWXuDfArJyLH48n6G4Q+z35/hvtByl+D5bfsSl7RpLszMEu+UuychQ1MCOpRTLs7JgFnBmfjiVoay5HKVYdyYXLWb+G+1PRzMVOZ8sR7EjOx/1LQZem+WHzihuLnfo7CVU1N98MqWNVfuzFGlwHEV6/mVcqrj51Egb6/Yo06CUIr+4Bmcv8ofqN+3MUnT8k1LgckUj0rNLeG22bctS5OgAQH19K46f4I8g7dmWDbOAEyGGFr0Jh/ae4x0/vCMHLc3C0VFHmE1W7NuUyTt+cs9Z1Ffzf+/EwHIcdq0Ut239Z0d1RG5h8poqbzo+KxUOFGcaSnkn8ILmWrQ62FZxBAVwvqn6pmO4bZTqm1Bvdry37EijtIX/OrWGVpS3ON5bdkSt0YByffs3oFaLBQV1dYqcHQDQWyworG9od4yjFFnllYqdNrONxfmqWt7x9OJyRc4nYE8+zr1885HMNtIulsluLtcGpUBWMf8WUFqecg0AOFPAr5F+tlSxBkMIsvJvPlp6VT9XfsSlDY2GQc45fo2cnDJFHWivamSX8Y7nZpUqcqLbNHIz+R2q3PRiaLTKpi6GITibwb/tl3uqEBqeuitioZQi91Shomv8WVAdkVsYvVWZ198GRymMbPvORnMHaQD2Amjtalg6UIMnT6RZYPX/e9JoNVtkbzPcSLOpfQ2W42CyKlu1ttFk5H9PWk3KHNw2mgWSMFsE9MXCMATNBv7vqL5V+feXYQj0Aq+jWa9cgxBAL7Dd0NSsbDHQRovA+9HSbFTsUFFqT8Dl1dAr1+A4Cr1A4m1rsxHi05J5oEBzvfztzz8TqiNyC+PEKPPIxVzrj6Iht6hTuxo8x2udZBanav9av93rYAhRvGr9fw3+5yu3mNfNGvzfn47oAUIphZPA6pevIqk0DTjQ6ID3yoGG3KJk10KI8Puh/RU0nJy0sjsO/78I4KT7ZV8HADg56G6sYkd1RG5h/J2lFULiw02jgxPT/g8iwEV+b4tr0RIGXk7t13wIcJVWNIoPAsDPpf1y5n4ubopyN67Fn+f5eru4KMqruJYA9/Y1XLRauMjsYyNWgxACbwW9Wa7T8OD/bP0U9Ga5Fn+BomP+Xsq/W5QKXyfAx13xd4vlOPh68b8fAX4eip1DlqPw9RZ4r/w9ZFeTbYPjKHx9+DV8/Tw6wDkk8PHjvy/5+HuAKPxENAwDH3/++6tPoKfiyCSjYeAX/OetfyUF1RG5hYn37oRQVx9F19AQBpPDe/KOR3n4oYtXoKKftYYwmBCewLsCDnbzRK/AUEU3Wg0hGBEeA3eeHjRezs4YGhGlyFFgCEHfkDAEurV/E3TSaDCuS5xCDSAhMBCR3u1XwiSEYEpiN0XJkQRApK834oP5i09NSYlXpAHYnZDkSP7iU5P7JCieXD1dndGvawTv+KQBCYpXx85OWgxKjOIdHzcoQXFekEbDYFjvON7x0UMTFOcFARTDB3XlHR0xIqFDtjRGjEzgHR8+trvgqRoxsCyHEWN78I4Pm5jUIRrDJ/LfF4dNSZV98qcNjuUwbGovRdf4s6A6IrcwDGGwILq/Iu+fpRzmdeZv+U0IwZ1d+iq60bKUw+IufQRt7krorehGy1KKO+N7C9rcmZSqKAGToxR3JQnfOBanpCjUAO5MTRU8IbGgT7Li5MhFfVMENeb376lIgyEEtw9MEdxKmjMoSfb1AXtexexBSXAWqBI7dVAPRVtAGoZg6uDu8HDlb4A4fnACXBSE2DUMwdgB3eArEHUZMbgrPNz5n4MYjUF9YxEcyL8CH9A/Fv4CUQBHMAxBcnKkYKn35N5RCA33le0cEkIQFx8iWF21a2I4YhNC5Z8yIkBoZ38k9Y3mNYmIC0bSgDgwGvn3Xr9gL/Qd1V324/9MqI7ILc70iFTZuQkaQtDTJxzdvDsJ2k3rnAg3rU6Wu8MQgjivAPTyF+7fMKFzN/g6u8pyqhgQhLl7YVgY/40DAEZERiPE3QOMDA0CAn9XV4yL5l+1AkC/8HDE+PrKWukTAB46HabEdxO06xESjKSQYFmRFwJAp9VgZrLwDTA60A/9YyNkR3cIAWb3TRS0CfbxxPDEGNmRF0op5jpwZnw9XTG+n/wIEstRzBmeLGjj7qrDlBGJsrc1WI5iztgUQRudkxYzJqYo0pg1WdiJ1mgYzJzRW/YEznEUM2cKLwYIIZh5W39Z1wfsn/mMefwLpzamLx4k+wQQATB90SCH78P0e4ZJLph2VYMhmHbXsA7JYfozoL5LtzjeOje8ljxD8uMYELhonPBGykyHtm5aHd7vN02yBgGBE6PB+/2nO/xR6zQafDRsqmQXgcC+Evto2FSHk7+GYfDxuCn2fg8SNQgBPh47xWEfGEII/j15EpwYOe4O8MHEiXB14m9j3sa708bB2UkLqfMSBfDO1HHwcnGcA/LazDFwd9HJcqpenj4agZ6O84temDMSPu6ushyFp6cPQ3iAj0O7p+YMQ6CPhyyNB6cOQJdw/nb3bTwwZzDCgnxkaSyY1BuJXRz3T1k0ewCiIgIkaxAA08Yno09yZ4e2s2f3QXy3TpIdHkKAUSMTMHQI/9ZPG5Nm9kZy7yjJGgxDMGBoV4ye6DiSNmpqCvqPiJel0bNfDCaJcHYGjk/C8Om9QKRqaBh0S+mMGfcOl/S4PzOqI/I7YEp4Mp5PnARAXHFSDSFw1erwef87EOMprknVuPB4vNNnChiRMQsNIXDRaPH1kPlI9A0RpTEsLBofDZ8KDSGiohYMsTs6X46ciT7B4jpm9g0Jx+fjp0HLMKImWAYEGobBJ2OnYHC44xs5ACQFB+OrmTPgrNWK0iDE/lr+OWE8xsSJ68rZJSgAX90+E65OTqKiFuTK3ysTR2FKorhmW5H+Pvj63tnwcNFJmvyenjAEc/uJ23YJ9vHEl4/MhrebiySNB8b3x+IR4vbX/bzc8PnTs+Hv7S5JY+GYXrh/6gBRtl7uLvj4+TnoFOgtSWP6yCQ8dru4CcnNTYf3X5uDiDA/SRPsqKHxeOrBMaIiHc7OTnjnnXmIjQ2SpDFwYByefXayKA0nJw1efW8+4hPDREdfCLFv67z49mxRUQSNhsHzH9yGnv1ixGswBPHJEXj5k0WiTsUwDIOnP1iI/qP581VufgxBdEIoXv/+ATi7SutQ/WdG7b77O2Jf5Tm8n7sDpYZ6aAhzU8XVtn/r5x+Nl5KmIFqkE3ItBysv4u2MXbiorxXU6O0fjtd6T0SCj7S23QBwvLIEr53YjfMNNdC0096+7d+SA0LwxoCxSA4Q5+hcS0ZVBV45uBvZNVWCGt39A/Ha0NHoFyqtNTgAnK2uxqt79iK9vFxQI87fHy+PGIEhUeIcnWu5WFOH17btxcnisvY1rrS17+zng+fHDsOortLbj5fUNeLN9XtwtKBEUCPM1wvPTByK8UmOV8U3Utmgx9ur9uBQ7iUQQm7KF2rTCPbxwONTBmNqX+l763XNrfjHsr3Yn2Gv/HmjRlsLd38vNzw0fSBmDeNPVuSjqcWID5bsxZ7jeaC0HY0rr83H0xV3zxiAeeOF84Hao6XVjE++3oudB85e6T3T/uvwcHfGgln9sWBWP8mRAaPRgi++2IvtO7Lb7T1DCAGlFG5uOsyZ0xeLFw2WvM1gMdvwzad7sGXdlZLtN2nYTyy5uDph+ty+uPOhkZKPStusLJZ+sgublh2HyWi5es3rNADodFpMnNcP9zwzATqJ+T4sy+Hnj3Zg3Vf7YGgxgzDk+tLvxB4d1jppMHZeP9z/8gy4uMnP9/mjIGX+Vh2R3xmUUpysu4Tll07gRG0hDDYLGELg5eSKiWFJmBfVD9EejkPNjjTSakvxQ8FpHKosRIvVbNfQuWBieAIWxvVGV+8gxRqZtRX44Xw69pZehN5qL8TkpXPBhM5dsbBbKhL9pTs5N5JTU4Wl2RnYeakAeotdw1PnjNFRsbgjMQXJwdKdnBvJq6nBsjNZ2Jafj2azGRyl8NDpMCwqCotTU9A7VEFi3RUu1tbj57Qz2JqbjyajCRylcNc5YWB0JBb1TUG/zuGKNYprG7HqZBY2ZZ5Dk8EEG8fB3VmHPlHhWDAwBQPjIhUf/7xc14Q1R7Ox6dQ5NLQYYOM4uOl0SIkOwW3DUjA4IUpx/ZHqhhasPZiFjUfPor7ZAJuNhauLDj2igjF/VAqG9oxRXK+lrrEVG/dnY+P+bNQ1tsJqZeHq4oSunYMwZ1wqRvSJU1x/pLHJgK17srFpZxZq6lpgtdjg4uKEmM6BmDkpFSMGd4VOIJFXDHq9Cdt3ZGHz5kxUVzfDYrHB2dkJkZH+mD69F0aNTICzs+OtRCFaW8zYsy0Lm9acRmV5IyxmK5ydnRAa4Yepc/pg1PgkuLopix4YW83Yt+UMNv98HOXFdTBf0egU7ovJt/XHqGmpcPdQdmTdbLTgwMZ0bF5yGKUFVTCZLNA5OyEozBeTFg7CmLn94SlwtPnPhuqI/M4o0F/GrqqTqDE1wsxZ4a51RYxHKMZ36gdfnafgYymloiagAn0VNl1OR4WxESbWAg+tC2I8gjA9vDcCXYTf27aviCOdQn0t1hZnorS1EQabBR5Ozojx8MfsqFSEurV/XFWqRom+ESsLsnBJX49WqwXuTjpEe/phflxPRHj6dIjG5ZZmrDifhcLGBrRYzXB30iHSywdzuyYixsevQzQqW/RYlZuLC3V1aLFY4OqkRbiXF2Z374Gu/o4dSTGfe21LK9Zk5SKvuhZ6sxmuTk7o5OWJmUndkSBwtFeKRkOrEesycnG2vBp6kxkuTloEeXlgWnICksKFk6TFajQZTNh0+iyyS6qgN5rhrNUgwMsdk3vFIzkqxOHjxWi0GM3YevI8zhSWQ28ww0mrgb+nG8b36YZeXRxvMYjRMJqt2HH0HDLyLkPfaoJWy8DXyw2j+3ZFn+6OnTwxGmazFXuP5iEtpwTNepO9Xoa3K4b164L+qdEdomGx2HDwSD5OpV2CXm8EYQi8PV0xaGAXDOgX69DJE6Nhs7E4cjAPJ48VQH+lIqynlyv6DYzD4GHdHDp5YjRYG4uTh/JxbP956BsN4CiFp7cbeg+MxeAxPRxGTkRpsBzSDp7HsR3ZaKpvBcuy8PRxR/KgLhg2ORnOLn/c7RvVEfkdQCnF/uoMrCk7gDx9CTSEAUcpKCgYEFDYw7zDA1MwN2IU4jzDZGnsqzqLHy8dxpnGmzXaGBncHXfEDEWiD3+9BiEOVF7AN/nHcKK26GYNYn8eI0O64t4ug9AnIFKWxuGKIvwv9wQOVVwCQ4g9LH5FgxB7iHxYaDQe7N4fg0KiZGmcqCjF/86cwt6Si3YN2K9LQMAQ+xHiQaGRuL9nX4yMjJGlkVFRjq/S0rDzYsHVf7Nr2D9vllL0DQ3Dvb16Y1yc8AkePnIrq/D18dPYfu7C1Wj4VY0rWyDJoZ1wV79emJTQVVYkJb+qFt8cOo2t2XlgKQcCco0GA5bjkBAShDsGpmJacoKsSMql6np8vy8Nm9LOwWZjr27pXPs64jr5Y9GwVMzoJ+8Yb1lNI37YnYaNx87CYrWBXNn2uPZ1dA72xYKRqZgxJNFhMnN7VNXpsWzbaWw8kAOj2Xp1Gwqw5zqwLIewQG/MG5eKWaN6yopy1Da0YMXG09i4OwutBku7GkH+npgzKRWzJqbCRUaUo7HRgFXrTmHjlkzoW0xXr3uthr+fO2ZM7YXZ03vDTcb2hL7ZiHWrTmLj2jQ0NRra1fD2ccO0Wb0xc24/eAoUiePD0GrGhp+OYdPyk6iv1ber4entislz+2LmokHw9pVe9NFktGDz0sPYuOQwaioaodFe0aD2ZFaO5eDm6YKJtw3ArPtGwC/ojzW/Aaojcstj41h8lL8S2ytPggEBJ1DFQwMGIMBz8QsxMlh8cRyWcvj3ua1YXnzMsQZhQCnF3xNnYHqEcD2Qa6GU4j+5e/Fl/pF2cwuu17BPIi8lT8DiWMcZ69dqfJpzDO9nHhSlwVKKZ1OH4+EeAyRNsN9kn8Zbx/ZddQYcaTyeOhBP9xksSWN5dhb+vncPCCCo0ZZncHdqL7w0bLikUy2bcs/j2Y3bAQLBOiFtGvNSEvHahNHQSpjEd58twNMrt4CjVFCjbb9+clI3vDNrHHQSKsYeOV+Ep77bBCvLCmvAngMwskcM/rl4Elx14ifYtAtlePLT9TBbbQ41AKBffCTef3Aq3CWsYnMvVuCp99ei1WhxrEGAnl1C8f5fZsDLXfw2woWiajzz5mo0iujzQghB1+ggvP/3WfD1Fj/BFpfW4a8vrEBdfYsojc4RfvjXO/MRGCAc0b2WivIGPP/UT6isaHSowTAEnUJ88I8PFyAk1Fe0Rm1VM1586HuUFtVen+fBo+Ef5IV3vrgTEdHi8+0aa/V4+e6vcDH3ssMjxoyGgZevO95e8gBiuktfbN7KqI7ILQylFO+d/wl7qk5LLiL2So+7MTTQcYIdpRT/PLsJq0qkt6B+veccTAlLFWX7Qc4e/C//iGSNV5InYmFsX1G2n2Yfxb8yD0rWeC51BB5OFHci4vucdLx2dI9kjcdSB+CvfYeKsl2Vm4Pndu2UdH0C4M6UVLwyYqQo+23n8vHkui2SNWb17IF3Jo8V5VQdyL+ER37cAEqp6O8vIcC47l3w73mTRUVGThWU4v4v1tgjayJFGEIwqFtnfHLvdFH5H9mXKnDfv1eBZTnRhfYYhiA5JhSfPzFLVNTiQkkN7nvjZ1hsrOiKpgxD0DUyCF++NE9U1KKkvB73P/cjjCaraA0NQxAR6ocv310AdxFRi8qqJjz4+BK0tJhEF8HTaAiCAr3xxcd3wFtE1KKuVo/H7vsW9fUtomt3MBoCPz8PfPrNvfATUaitudGAJxZ+geqKJtFVUxkNAw8vF3zy08MIDvVxaN+qN+Hp2R+hrLBGkoaLmw4frX8K4THKcu9uJaTM3+rx3V+ZzeVHsVuGE0IAvHN2KSqMdQ5tt5WfkeWEAMDrWWtwUc/f2r2NPeV5spwQAHjzzDZkN5Q7tDtaUSTLCQGAf2bsx/Eq/lbibWRUl+N1GU4IAPw34zj2llx0aHe+tgYv7N4l+foUwPeZGdiUd96hbUlDI57ZsE1ybRMKYE1WLlZm5ji0rW5uwZM/b5bkhAD2qMiO3AtYeizdoW1TqwmPf7MBlEK0EwLYt56OnC/CV7sdf++NZiue+HS9JCcEsBf0yrxYjk83HnVoa7Ha8JcP1kpyQto08our8e9l+x3asiyHv729FiYJTghgj5SVlNfjvS8cO8aUUrz42hpJToj9uVFUVTfhnX9tFmX/5str0SDBCQEAjqVoqG/BG39fI8r+Xy+tRnVFo6TS7RzLoaXZhNefXCaqgNrHL65E2cVqyRomgwWv3vO14tL1v1dUR+RXhKMcVpbulfXYtnyFzeXCN0FKKZZcOii7LDwhBCtLjju0++bCUVkVTAF76folBY4njK/OnpRd+VNDGHx19qRDu2+z02T3Q2EIwZdnHGv8kJkpu0g/Qwi+Sjvt0O6n9CzJDkIbBMDXx087vNGuOp0NK8vKbgfw3ZE0sJzwjXb9qVwYLFZZ7QAogB8PZsBiswnabT15Dk2tJnkalGLVwTMwmCyCdvtOF6CmoVVWbxeOUmw5lIsmPX+begA4nnEJlysbZZXq5ziKfUfzUV2nF7Q7k12Kwks1sjVOnCpEaVm9oF3++QrkZpWClVHFlGUpcrNKcSGvQtCutKgGpw5fkFUplWM5FOZXIie9WNCupqIRh7ackfeZsxzKi2uRdsDxouOPiOqI/IpkNlxApUn4RykEBw5byo/Cwlp5bXKaylCgr4K8KcmeW7KpLAMtVhOvzYXmaqTVlQrmnTjS2FaWi3pzK69NaUsj9pcXyu7rwlIOe8sKcLm1idemxtCKrYV5sjU4SnGiogwFDfxRqmazGWvOnVWkkVNdjayqSl4bk9WGFRnZsjUogOKGRpwsKeO1sbIsfjp5RlG/oGp9Kw7mX+Id5ziKZYcyFHU9bTaasetMAe84pRQ/78tQ1CjPZLFh6ynhCWPlrgxFR51ZjsOmQ7mCNmu2ZShrXEiATbuyBE3WbUqHRkG/FYYh2LglQ9Bm07rTikqhazQMNq1LE7TZuuoUGKUay4UXT9t+Pqboe8VoGGxcelj+BX7HqI7Ir8jWiuNgFL7lrawJR2qzecfXl56GhijTsHBW7Kzk11hdlKFYg6UUG0r4b4KrLmYr7txKCMGqAv7Xse5CrvKuqoRgRR7/69iclwcryyrXyOHfOtmVX4BWi/AKXZRGBv97dfhCEepbhVfoojRO8WucvliGigbhFbojGEKw8ij/53GupAqFFfWKnB1CgNUHzvCOF1fUI6egQlGnW0qBNXv4Narr9DiZWaSocSHHUazbkck73qw34tCRfFmRims1Nm/Pgo1ny8FksmLPjhxFWxIsy2H39myYTO0v0Fgbi+1r0xR102VZDod350LfZGh3nFKKrcuOKvrMuStHfWsqGmVf4/eK6oj8ipQaqsBB2R6ghjAoN/GvwItba26qhipHo8zAH7kpbqnvAA2C0tZGfo3mBsVOAgFQ0sKvUdTcKHt7qQ2OUpQ080ddipsaJJ1IaQ+WUhQ1NvBr1Dd2iMaleiGNJsWOIUspimr5NUrrGhVdH7jyeQhp1PB/VmKhFCir5b9OWXWjYg0AqKxt5t0uK6/sGI3GZiPM5vYn8KqqZkUTaxtGowV6nm2mulo9rFZljjoAWK0s6mvbd2Kbm4wwGpQ56oDdqaquaP9zN5usaKrnj/CKhgJVpfKj5r9XVEfkV8TAmhVfg4DAaOPfNmm1KdcAAIPAdVo6QIOCCj7XVptF0TYAYJ+UWq38NyCD1SJ7C6sNClyt2NoerRarYocKAPRmfg2D1aLQnbLTYhZ4r8wWxY4IAMHIjcFs7RANg4V/69LIM+lKxWThz0Mx8qzMpcJRCrO1fZ2O0gDAm+9idJAHI0mDxxHoCAfh19QwGtr/HZpaO+a+CwCGVv77+x8V1RH5FXHTKCsxDNgncDct/3U8BMbEQ+Cu5T/W59kBGsSBhoeTs+xE1TYYQuDuxF/zwd1Jp7gsOoG9ZDwfHjpdhzgJns78Gm5Oug5xdjyc+d8rN2edYsfQsYZTh2i46fg1XBWWKxdzHVeXjtFgGAJnnmPCbh3YUM3Ntf3vlmsHavAdE3ZTWNr9Og33X17DjUfD1aPjesu4KSxF/3tEdUR+RTq7d7IXKFMASzmEu/IX14n2CFKcv2GjLDq785cZj/b074AcEQ5RHvzl0mO8/BRPrvTKdXg1fPwUh54ZQhDtzV9QKdrXF1YHJ0UcoSEEcX78ryPa3xc2pRoMQVyAP79GgK9iJ0HDEMQE8mtEBYkvTMUHQwhigvnfq6hg5RqEAJFBPrzjkZ06QANAWJA3r6McFuKjKDGyDX9fdzjzlDLvFOStKIm0DXd3Z3h6tj+5+gd6Sm5C1x7Ozlr4BbRfS8TTxw3uPPpS0GgYBIX4tK/vouuQ6qiEEIR2VtYr7PeI6oj8ikwOHQhWYY6Ip9YNAwMSecdnRvRRnL/hqnHC2E78bd7nRKUq1tAyGkyL5C/ONjc26aZunZKhV67Dw8wu3RU3cWMpxW3x/K9jctducJVQUZRXI5FfY2zXWHi5KFuRsRzFban879WQuM4I8pRe6vomjX78r6NXdBgi/L0VRZA4SjFvEL9Gt4ggdAsPhJKPnVJg7vBk3vGIYF+kdgtX/N2aMzqFdyzA1wMDe8UoOjVDCMHMCfwanp4uGDksXvGpmWmTUngdGmdnJ4yb1FPxqZlxk5J5m/NpNAwmzu4DRsHr0GgYDJuQCA+B4myTFw5S9JkzGgb9RnX/Q5Z7d8Qv6oi8++676Nu3Lzw9PREUFIQZM2YgLy/vl5S8penpHSsYzXAEA4IpoYPgxPBPbAneYYj3CpVdR0RDGEwL7wNXLX84M8YzAP0DomTv52sIg6nhifDR8f+oQ9y9MDo8VkEdEYJxkV0R7MZfYtrPxQ1TY+IVaQwJ64wogYiIh06H2d17yNZgCEFyp05ICOT/3ui0WsxPTZL9eRAAsf5+6BUeymujYRjc3i9ZUQ5HiLcnBsd25n8ehGDhUHFVffnwdXfFqKRYQZvbRqZCSSDMzdkJ4/t0E7SZOzZFUbRNq9Vg8pDugjazJqYqOjVDCDB1tHCl5ulTUhWfmpk6KUXQZurM3opPzUyd2VvQZtKcvrJqiFynMa+/oM2E28RVcuaDYzlMXTxY0TV+r/yijsiBAwfw6KOP4vjx49i1axesVivGjRuH1tYOyC7+HUIIwfzI0fIeC0DLaDEldJBD27tihslKwiRX/uZFCv/gAODergNlh+opKO6Ic6xxf/f+sjU4SnF/guMy8vf27CM78MJSigeSHWvcmZIKQuS5hhyleLCPY40FvZLhxMg7A0QB3D+wj8N8mTl9EuHspJXtjNw7pI/DFePUvgnwdHWWrXHH8F4OG9ON79MN/l5uslavBMD8ESkOe9oM7xWLTgFesiIWhAAzRibB00G/mX7JUYgK95OlwTAE44Z1h7+Dhm6J3cMQ3zVEtsaQQV0QyrOd0UZMXDBSekfJ+jwYhiC1dxSiY4VLo4dG+GHgSHnNFxkNg66JYUhIFm4K6hfkhZHTe8nWiIgLRurQrpIf+0fgF3VEtm/fjrvuugs9evRAcnIyvv/+e5SUlCAtTbj4zB+Z8Z36iXImroVc+b+v9LgLQS6O95/HhiThjmhxPVCu1aAA3kqehygPx1Gb4Z264ImEEZI02ngrdSoSfBy3iO8XHIG/95HnuL3adwx6B4U7tEsMCMa7Q8fJ0vhb36EYFh7t0C7Wzw8fjJ8gy+F5qE9fTIjr4tAuzNsLH82aDACSnZGFvZIxM0l49Q0AAR7u+GzhNBACSfkJBMD0lAQs6M+/ndGGl6sLPr1/BjQMkeSMEEIwOikW94xy7LS56LT472Mz4aTRSNJgCEH/hEg8NHWgQ1utVoOP/joLLs5OkiYmhiFIigvFE7cNE2X7r5dmw8P9/9g77/g4qqvv/+7MNvXei2VZlmQ1S5Z7773hijE1QAgBUiAhEEIJIY0AgUAahNANNu69914kWbKsZsnqvWt3tW3mvn+s5Mi2ZnaKIOR5/Xs+PJ/n8b2a7+7s7txzzzn3HL0sQ4FhCOIGBeGZR13/tggheO3lu+Dr6y6bERnhh+eeWSBp/q9eXYagEG9ZYSCWJQgK8cYLry6TNP9nv1mGiEEBsgqbMSwDX38PvPzneyQltj/52goMHhYuKwzk7Gfjht989CgYlcfw/1f1rb7rjg7nGWx/gcQ7q9WKzs7Om/77vyZCCJ4cuhxLI5yGgqs9LEsYaIgGr6R8D2MCXC8WvXoqYTa+N2SKZAZDGPxu+GrMChPOE7hVP0ychJ8mTeu5hhQGwe9GLMbymHTJjIeHjcJLI2eA9FzDFYMAeGXUTDyYKL2L8OrENPx+0mwwhEh4H87xX4yejB+mu/bq9GpRQiL+PHceWBmMp8aMwc8nTJTMmD50CN5bvggalpHMeHD0CPxq9lTJp4fGxkbjH/cuhV6jcbkw9S7yKzJT8NrS2ZIZ6THheP8Hy+Gmk86Ymx6P1++bL3nRT4gKxgdPr4Snm04yY3JaLN78wWKXHpdexYT7458vrIavp5vL19V7a0YlRePtn0lrqgcAYcE++NtraxDg5ymJQQCkxIfj7VdWwU1iF+HAAC+899a9CAnxkXR/CQHiYoPxzp/ugafAKZNb5ePrjrf//gAiowIkfU8IIYiMDsTbf38APr7ukhgeXgb86cOHERsf6rzfLjAMQxAa7ou3Pn4UARLzNgzuevz+i8eRmBEjyWDv7fD7xtdPIiRSOMn6/7q+te67PM9j8eLFaG9vx8mT/ZexfeWVV/DrX//6tn//v9R9t6/ONF/B5urjyGkvuVFxlYL2tGjnoWU0mBUyCssipyDaI0QZo6kE68pP4UxzCZiehZyn/2FoCIu54cOxNmYChnq79lL0p3NN5fj42lkcqSvuCUE4W8yzPa3mGUIwLzIZD8aNQYqfcB6CmC41VuNfBRewr6oYgNO44noZPb6GedEJ+N6wUcgMUtZOO7epHh/mXsSusiLwoDcYvc9enlLMHBSHh1NHYmy4uJtWSAVNTfh31iVsLyqEg+fBMgw4nr+x2PGUYkpMDB7KyMSkQcL5FGK61tSCjy9kYWteAewcB5ZhwFPqfChSZ0hpfEw0HhidgWlxsYoY5S1t+OxMNjZn5cNqd9zMgDMxdeSgCNw3LgOzkuIUHZOuae3A58ezsfnsFZhtdmhuYhBwPI/hg8KwdnIG5qbHK2I0thvx5ZFsbDqRB2O3tV/GsOhgrJmWgXmjE8Eq2LG2dJjw9YEcbDp0GZ0mC1iWcbagJ84FleN4xEUFYtWsDCyYmASNRpqh01ftnWZs3puDLXtz0NZhdjJ6Hu29jEER/lg+PwMLZ6RKNnT6qstowfad2di8IwstLcZ+GeFhvli2JBOL5gknj4rJbLJi17YsbN14AY0Nnf0ygkO8sXTFKCxcmgk3BUdzrRY79my6iG3rzqKuuhWspufzAEAYAs7BIyDIC4vWjMHClaNFE1SFZLM6sH/DOWz7+Diqy5qcDEoB+h+Gb4AnFt4/AYvumwhvFyGy/0XJ6b77rRkijz/+OPbs2YOTJ08iMrJ/l7nVaoW1T+Gmzs5OREVF/U8aIpRS2KkdDBhoRJJLAaDa3ISDDRfRZG2HlbPBQ2NArGcEZoZkwkMj/iOwcjawhIWGEX941ZhbsasmG3Xd7ejm7PDQ6DHEKwQLwtPhoxPfUVg5Z6EpsSRZAKgzd2BL5WVUm9phctjgpdVjsGcA7ho0HP568R+ajXMABNC5YDSajdhYloeKrjYY7TZ4anWI8fLH8iEpCHYTbwVu5ZzFofSsOKO524TNxfkobW+F0W6Dh1aHaC8fLI9PQZincPKr831w4CmFwcVJmbbubmwpKEBJazOMVhvctFpEeHtj+bAkRPr4iP6tvYehd8HotFiw7Uohihqb0GW1wU2rQZiXF5akDkOMv3iIz85x4HgKvYYVXeBNVht25hbiam0jOi1WGLQahHh5YtHwRAwJFj6qCwAOjoed42DQakQZZqsde7OLcKWqHp3dVug1LAK9PTB/RCISwsXDiFIZVrsDB7KKcbm0Fl1mK7QaFgHe7pidmYCkQeKbAI7nYbNzMOjEGXYHhyMXS5BdWI1OkxVaDQM/L3dMHz0UKUPCRP9WKsPh4HDyQikuXalEZ5cFGpaBj7cbpowZirRhEaJ/y/MUVpsDBr04g+N4nLtQhvOXrqOrqxuEEPh4u2HCuKHIGB4tgWGHQa91Oe/S+TKcO12Crk5nVVYvbzeMGT8UmaNjRT0zlFJYLHboXYTFKKXIOV+Gs0cL0dluBqUUXt7uyBwfh1GT4kVP8lBKYbXYodNrRMMplFLkX7iOU/ty0dlqAsfx8PJxx/DxcRg7MwUarfBzm1IKa7cdWr1mQI5Rf9v6zhkiTz75JLZt24bjx49j8GDXMfVeyXkj3wU5eAdy2nNwsPEgrhmvgaPO0sVurBvG+o/FtOBpiHJXtpPuFUd5XGjNx46a47jScQ2OGww9JgVmYEH4JMR5qWPwlMe55hJsrDqDC60lsPNOhoHVYWpwMlZEjUOST5SqYmCUUpxrvo51ZedxovEabLyjh6HBtNBErBk8CiP8xR9qUhgXmqrwWfElHKotgaWPITI1bAjuG5qJcSGDVDOym+rwaUEW9lYUo7un86uOYTEpIgYPDBuBSRHKTxj1Kq+xAZ/lZmPXtWKY7c6qmlqGwbjIKNyfloGpgwYr2q33VWFjE77IvowdBUUw9lRA1TAMMiPCcX9mOmbEDVFdSr60qQVfXsjF9twCdFqsNxhpEaG4d0w6ZibGQafAI9BXFU1t2HA2F9suXUW72VmlkmUIhkUE457x6ZiTFi9YKEyqalo6sOlUHraeuYJWo3OhZBiC+PBA3D05HXMyE1wmtLpSQ2sXthzLxdbjV9DaYQLtYQwO88eqGRmYOzYR7hLDK0JqbjNix+E8bD2Yi+ZWo5NBCKLC/LBs9nDMm5IMT4FiZFLV3mHGrn252L4nBw1NnaDUGbIID/XFkgUZmDczBd5e8r0OfdXZ2Y19e3OxfVsW6mrbQXu8WsEhPli0KAPzFgyHr686r4PJaMHBnZexY+N51FS2guedjIAgb8xflom5S0cgIFB8o+JKZqMVR3ZkY8fnp1FV2ug8eUUA/0AvzFk1GvNWj0ZQqK8qxrel74whQinFU089hS1btuDo0aMYOtR10l1f/S8ZIsebjmNj9UZ0ObrAgLmtp0zvv8V5xuF7Md9DmFuYbMbRxkv4sGwrWm0d/TJYwoCjPOI8o/Dj+HsQ6yk/RHGsMR9/LtyBBks7WDC31T35DyMUzyUvR7KPfKPnREMJfpu7G1XmthvX648R6xmIV9IXITNAfojibEMFXry4F6WdLWAJua0zbe+/DfL0w6sj52JSmHQDuVdZjbV4/tQ+FLY1iTIiPL3x67EzMCta3vcfAK42NeIXh/bhSlOjKCPEwxO/mjgFC+MTZTOutbTg+T0HkF1b1y+D6QmxBbq749mpE7EsJVk2o7K1HS9sO4ALFdWiDF83A348fTzWjHKd2Hqr6tq78PLX+3G6pFKU4WXQ4wczx+D+SSNkG6HNnSa8+uUBnLhyHaTnejczAJ46j/g+NGsUHp41WvYpinZjN37/yUEcySq5EebsK0KctUwMOg3umZ2J7y8dJ9sINZqteOPDgzh4ugigEDydptOyWD4nHY+vmSQ7ZNRtseEv/ziEfYfzwfP0tr45PdEvsCyDhXOG44ePTBMsriYkm9WBf/z9IHbtvAyO4/ptZkgIcZ4SmpOKJ56aJbtirMPB4d/vHcT2DRfg6Cm5fyun9zOeMjsFTz23AB4yq6NyHI/P/3IAWz46AavFfuMzvpVBAYyflYwfvbrsOx/O+c4YIj/84Q+xbt06bNu2DQkJ/zl37+PjAzc31xbw/4IhQinF5prN2Fm3U9J8Bgx0jA7PJDyDOM84yZwNlQfwSfkOiQwCLaPFy8nfx3A/6cfBvq48jbcKt0tmsITB79Pvw4Qg6YvflopsvJSzHVTCAWMGzpMTr2cux5wI6Yvfzoqr+OmZbaAUN/JHhOTMWSP445gFWB4rXlOhrw5WXsMPDm8FR6nLI8a9y9Cr42bh/mHS62ScqqrAIzu33gj3SNFzEybjsRGuT4706lJ1Db63cQssdsdtC7eQnhw/Bj+ZKP3k15XaBjz86SYYbTbJdS8eGDsCz82ZLNlQuFbfjO+9vxEdZotkxorRKXhp2UzJhkJlUzse/cvXaO40SWbMy0zAb+6bC41E13p9Syd+8PrXqG/plMyYnD4Ef/jhQmglGgot7SY89eoGVNa1Sap1QggwMiUarz+7FHqJXp7Orm48/cIGlF5vlMggSE4Mx59eXQF3iR4Yk8mK55/9Clev1go2B+wrhiEYMiQYr795D7wl5n1YLDa88vRXyLlQJqljM8MQRMYE4o9/ux/+Er0jdpsDv/vR5zh7uEDSfIZlEBLhiz9+9hiCXByN/m9Kzvr9jQae/v73v6OjowNTp05FWFjYjf/Wr1//TWK/Ve1v2C/ZCAEAHjysvBVvFr2Juu46SX+zt+60ZCPEyaCw8Xa8kv9PlBlrJP3N/rocyUZIL8NBOTyf8xmutFdK+pvDdYV4MWcbeIlVTpwMHj+/tAlnm8okMU7WX8dPzmxzGggSKLSH8+y5nThUUyKJcbGhBj84vBUOnpdkINCe/148cwA7yqQ9bPKbGvDwjq2wOhyyaqn84dRxbLiaJ2nutZYWfG/jFnTLMEIA4L3T5/DxxSxJc6vbOvDwp5vQZZVuhADAJ2ez8M8T5yXNbegw4pEPNskyQgBg4/kreGdv/4nzt6q1y4zvv7tRlhECAHsvFeH1jUckLZRdZgueeGOTLCMEAE5cLsVvPtovidFtsePp321ClUQjBHDuzC9dqcJL7+wCJ6GVgNXmwHOvbJJshDgZFFeLavHi77bC4XDdjdfh4PDKS5tQUCDNCAGceSelpY144fkNsFmFmxb2iuN4/OGFTbh88bokI6SXUV3Rghd+9AUs3a6b7VFK8edfbsS5I4XSAHAWPmuoaccvH/wXjJ39dzX+X9M3aohQSvv978EHH/wmsd+aWqwtWF8l36iioLDxNnxa8anLuR22Lvz92teKGA7egbeL17mca3JY8Pv8TbLrT1A4c1ZevbLB5cPAytnxQvZWmYQeDqV4PmuLy7LyDp7HM2e2S35o3Kqfn91xI6FV7LU8fXwXOF5+yTgC4NmTe0U7Avcyfn5wH+w8p6j2yK+OHEJbt+sH1K/2HYLFLs/Q6dXvjhxHfZfR5bzf7D4Co1VZJ+W3D59GeUuby3lv7jqONlO3oiqjHx69iIKaRpfz/rbrNJo6jLIZFMCGk7nILnW9Ifj3znOobmqXz6DAnjMFOJ133eXcL3deRGlls2wGTylOXCzFkbPFLudu25WNq0W1sivL8jzFxewK7D10xeXcfXvzkHWpXBGj4GoNtm93bUifOHQVZ44VyWdwPMqvNWDj56ddzr1wrAhHtmdLNqb6Mmorm/HV3w/L+rvvqv73UnG/QzrWdExxKXUePAq7Cl16RfY3nFXc14UHRamxCiVd4h6LvbXZsPLK2tXzoKgyNyO7TfwhuK/2KjrtFsWMRksXTjSIeywO15agyWJSVFWWAmi3WbCvSrwFwam6SlR0tUvytvTHMDvs2FZ6VXTe5YZ6FDQ3Ka4q6+A5bCzIF51T0tyCi9U1sjwht2r9ZXHPS3VbB46XXFfMYAnBVxdzRee0GM3Yl1usuNQ5yxCsP3NZdE5XtxU7zl9VxzghzrDY7NhyLE9xWXiWIdhwKEd0jsPBYdO+HMXfK4Yh2LhXnMHzFJt3ZCnuE0UIsGl7lujCTCnFlk0XFDf9oxTYsvmCy3u9bf05xb1jeJ5ix9cXwLnw7mz/7JSsAms3MTiKPevPw2qxK/r775LuGCIK5eAdONJ05LaEUTliwOBo01HBcY7y2FlzQtHC2iuWMNhdK+x+ppRiQ6Vry90VY1PVGdE5X5Sdc1lYTZxBsK5M3FX/afFFxT1dAGcy4yfFF8UZBVmqGATAv69eEn3QfpaXo4pBAXySmy264KzLyVXF4CnFF9mXYeeEH7QbLuWpOpHEUYqvs66g2yb8oN18/opiDxjgrHeyPasAnd0WwTm7zhfAZncdLhBjHMwpQXOncGuLgxeKYZLgyhdjnMkrR01Th+Cck5fK0NZpVszgeYrcohqUVTULzrl0uQJ1DR2Kn1iUAmXlTSgoEt6gFVytxfXrTao+9/q6DmRnlQuOl19rwNXLVar6BbW3mnD2hLAHqa6yBZdOFoNX0WfHbLTgxB5xY/1/QXcMEYUqNhbD6HDtmhYTDx5nW84Kjpcaq9Bsa1fF4CiP403CbsgKcxMqzU2qGt1ylMfRhitw8P0/rBu6O3GlvVaRF+E/DIpTTaUw2a39jnfaLDjdUKFqh89TiuyWGjR29/+52jgOByqvqWJQACXtLSjv7D/kQCnFrpIiVQwAqOnqxNUm4ZDDjquFqhmt3d3IqhFeMHbmFSrefffKZLXh7PUqwfHdOeoZNgeHk4XlguN7s9Q36uR5iqN5pYLjB84XqTLaAGfC55FLwl7Dw2eLVHcEZhmCwyLhmaMnClXXvGBZBkdOCudMHDtWMCCMY8eE87VOHLqqqlsv4PQgHT8o7Jk8fSBf9bF+whAc333HEPn/Vl32rgG5jtFhFNwdt9vUGTq9svA22Pn+d5Vt1oFh8KAwOvrfVbZaB67JYZut/x1di1X5Tu+2a1n6v1a7tVv1oterZgFGt8MBq4iXQY5auvtn8JSiwyLsAZDFMAvf9zbzwCTStZqEGS1G9Z87IQQtIozmTpMqQx0AGIZBm1H4fjR3mGTnCdzOIGjrEma0tJtU7fAB571qF/GqtHWYVXXSBZzGeHuH8PtobzOrvlccx6O9Tfh9tLeZVBuGPE/R1iz8fG1vNaruLUN5itam//1WKHcMEYXqLVamVrzIGZKBYgCAQyDPRGn+Sf+M/l+vEHsgGVKy+VUzBvCku0Pg9Qr9uxLZBa5FqZpg380Se70Ddb+E3gcAxXkbfUXgrMAqyFC5sALO3IdvmgGIM8TGpIq6uA7nGKD3IZJbMWD3yhVjAL6+dhEGP1D3SkXY8LuiO4aIQrlrpDVaciUDY7jRA+ZWeboo7y5VLGFgYPov4uOlHRgGAHgJvF5vrbziPmLyFni9PrqBYwhdy0enrsKkFIanTqcik+YWhr7/18syDNxUVha9wTAI3xMvvbqqn1IY3iJjUsVTCm834e+Pt4f67xbHU3iL1Mfw8VT/O6SUwlukyZyvyuqlvfISuR9eXgbV4R9CCLxECoJ5eupVeysYhsBL5H54ehrkt7HuR2IN+Ty8Dao9OwC+84XNpOiOIaJQQzyGgCXqSlEzYJDoLVwMbIhnFHSMujLRDBgkeQ8R/OHGeATDU6PuQUtAkOAVDj3b/2uNcPdFoF68D4xrBhDjEQA/gb44gQYPRHv6qn52hLh5IcLdp98xd60OSf7BqpJuAcBXb8AQ3/47bTKEYGR4hKpEUgBw02iRFBgsOD4mKko1Q8swSAsTbpQ4dnC0rNbx/YkhBCOihSsEjx2qngEAIwcLM8YkRKteXCmlyBzSf48tABg5LEp1vgDHU2TECzMykqIUnzS5weB4ZCQJM9JTo1SHfziOR3qqcMXm4emDVHtFeJ4ibXi04HhaZoxq7w4hQNqIGMHx1NGxqt8HwxAMHztE1TW+C7pjiCiUl9YLo/1H3+iaq0Q8eMwMnik47q4xYGbIGLAqGYsjJguO61ktlkaOUbW4UlCsGjRBcFzDsFgzeJTqBXxt7BhBg4oQggfiR6q6PgOC++MzRctlP5g0QlXSLUMI1iakizbeeyAtQ1VYgyUEq5JS4KET9kjcNyJdNWNRUiL8RCokrx09XFXohGUIZiQMQYi3sBG7epxKBiEYGxeNQUHCTQBXTkhTtbgyhCBlUAgSo4QNw6WTUlXtwAmAmDB/ZMQLG1TzpyRDw6rbPAUHeGHscOF2CDOmDIObQd3mycfbDZPGCVeEnjgpQXJlVCG5uWkxY6ZwtebMcXEIClFXzVujYTFrUbrgeMrIwYgcHKTaOJy3arS6C3wHdMcQUaEZwTNUHd8N0gVhmPcw0TkLwife1u9Fjny1XhgTkCI6567IMaoWVw+NHjNCxMujLx8kv69HX+kYDRZHuWAMToPWRRdiMTGEYGWseI+TxbHD4KFVHnKglOKeRHHG7Ng4+BuUP2g5SnFvqjhj0uBBCPdW3qCLoxT3ZogzMqLCERfkr3h95XiKtaPFGYnhQUiLDlXsTeAoxT0T0kXnRAX5Ymyicq8ITynWTBEv7R/k54mpGXGKvTsUwN0zM0R/Y96eBsyZmKiYQQjByrkZovfBzaDDgjlpihkMQ7Bkfjq0Il1ptVoWi5eMUPx5MCzB/AXpMIgYTCzLYPGq0YqfWSzLYPr8NHiJGEyEECx5YILiJy/DMpgwOwX+wd/N9idydMcQUaFYj1hk+GYoLmq2KmqVYH5Ir2I8wjEteJRixvdil7gMIYW7+2N51FjFC8bjcXMFwzK9CjJ44aE46f1JbmMkTIGni1wTb50BT6VMVMx4LGksAg3i8VY3jRY/z5yk6PoEwAPDRiDSs//QT6+0LIvnJgh7sVwxViQmI84/QHQeQwiem6qMwRCCOfFxomEZwPmgfXa2csaE2GiMGey6qeJP5yn7PFiGIH1QGCYnum54+MSCCWCI/F8hyxAkRgZhVrrrhoePLhkLDcvI3iGzDMGgUD/MH5/kcu79d42BXqeVbbixDEFIgBcWz0h1OXf1XaPg4aGXbSgwDIGvjzuWLRrhcu7SZSPh6+uuiOHhoceKVWNczp2/LBNBod6yjwoThkCn12D1g66fRTOXZiIyJkh2UTNCCDQaBmuemCHr776rumOIqBAhBI/FPobBHoNlGwp3R92Nkf7SQgk/jr8bqT5xshlrB83DjBBpbrufJCzC+MBE2Q/aewZNxvLocZLm/njYdMyT0byuVysHZeKRodIMjB8mjcfq2HTZjMWDkvHT1CmS5j44bAQeTpYXBiIAZkbH4cUx0yXNX5mUgh+NGiubMT4yGr+dPkvS/PmJ8XhuqrxFnCEEw8NC8eaCeZLmTx46GC8tmH7j9UllJIQE4p3ViyTtSEcNicRrq2aDEHmM6ABf/PWhpZIa0qXGhOIPD853dnKVCGEZghBfL7z3+F3QSUgOjosMwutPLgbDMJINBZYh8Pd2x3vPLIeb3nVIJDLUD3/6xVJoNIzkRZxlCLw8DHjnVytEE1V7FRzkjdd/vRI6nUYWw82gwxuvrYKfr+vkSz8/D/zxjTVwc9OBlVjvg2EIdDoN/vD63QiW4EXw9HLDH/56Pzw89ZINBYYh0GhY/ObtexARJb4ZAACDuw6//ehh+Pp7SDZ4GIaAZRm8+Nf7MThBfhf376K+0e67avW/0H0XAKycFR+UfYBL7ZfAgBEM1xAQMITBgzEPYmKgvJ27nbfjneIvcaTxIljCCB67JT0da78/ZBkWhsvbjTp4Dm8VbseW6nOijN5cj8eHzsXaGOkdUgHnceE38w/gk9Iz/bZqv8EgBJRSPJ4wBT9MmCqLQSnF23nH8V7+KTAijF7+9xPH4tn0abJ2iZRS/D33HP6UdQKAcBv1XsZ9iel4ZexMaGTWDfj4chZeO3HU2ZzPBWN5YhJ+N302dDLzAL7OvYKX9h+Cg/KC1Sp7GXPjh+KNBXNhkHnqZldeEZ7fuu9GJdb+MCxDwPEUk+Ni8OeVC+Ah89TNkfxSPLtuNyy9rdpFGKOHROHt+xeKnpbpT2cKK/CzD3fCZLH126q9LyM1JhTvfH8J/L3knbDLLq7Gz97djk6TBQwB+ktP6WUMjQrCOz+5C0F+8pLBr16rx89f34K2DjMYQvr9bvUyBoX7463nlyEsWNyTd6tKy5vw7Etfo7nFCIYh/ebZ9P57WIgP/vSblYiK6D+JW0hVVS147tmvUF/X4ZIRGOiF37++GrGxwrk6/am+tg2/+tEXqCpvBsMS8NztDMIQUJ7C198Dv3n7HsQnCefq9KeWhk689Oi/UVZYB4Zl+q22SnqeiV4+bnjp7w8gZaRrT95/U3LW7zuGyACq3FSOQ42HcLblLBz05uZp/jp/zAiegUmBk+ClVR6bv26swe66kzjYcB62W4qU+em8sTB8EmaHjoO/Tvn9Kjc2YnP1WeyouQgLd3PZaV+tB5ZHjcXiyNEINsh7MPVVhbEFG8ov4uuKLJgcN1dL9dYacHfMKKyMyUS4u69iRrWxHeuuZWPdtWx02m8u4OWl1ePuIem4J24EBnkJJyq6Up2pC+uKcvB5QQ5arTcXYXLXaHF3fBrWJqYjztf17khITSYTvrqah89ys9F0SwExg0aDFcOScW9qOhICAhUzWsxmbMzLx2dZObc1s9OxLJYmD8M96WlICQ1RzGg3W7D18lV8di4bNe03F2HSMAwWpibgnlHpSI0IURyb7+q2YkdWAb44lY2K5vabxliGYHbqUKwZn46MmHDFDLPVht0XCrHuWDbK6ltvGmMIwbS0Ibh7cjpGDo1UzLBY7dh/vghfHcxGSVXTTWOEABPTYrFqRjpGJw1SnCthtTlw5Fwxvt6TjYLS+tvGx6QNwoq5GRibMVg0gVtMdjuHE2eKsWl7Fq4U3N74LyMtGssWjcD4MXGSPFP9iXPwOH26GFs2X8TlnNv7aiWnROKuZSMxcVKCaO6JKIPjceF0CbavP49LZ2+vkJuQHI4lq8dg0owk6CR4pvoTz/PIPlWC7Z+fxoWjRbcd7Y1NDMOSByZg8vzhMLgNzNH4b1J3DJEBFE85VJuL0OVogYO3Qc96INQQC1+dsFVtcphQYa6A2WGGhtHAS+OFwR6DBfNBeMqj3FSKNnsLbLwNbqwbItyiEaQXfuibHd0oMVbB6OgGSxj4aD0R7xUtmA9CKcU1YwWarK2w8Fa4swZEu4cj3E2MYUVhZzU67d1gCIGP1gPJPlHQCCSEUkpR0lWD2u4WdHM2eGgMiHYPQoyncC6BhbMjr60GHbZuEAL46tyR6hsBncipkuLOelSaWmByWOGh0SPaIwDx3sIMK+fA5ZZatNu6QSngp3dDmn8YDBrhB0ZJRxNKu5phctjgrtEh2sMPSb7CC6Sd55DTVIc2i7P6qq/egLTAULiLJLaWdrSgpL0ZXXYb3DVaRHr6IC0gVJDh4HnkNtSjpdsMjqfwMeiREhwCL5H6JuUdbShsaUaXzQo3jQbhnt7ICAkTZHA8j7z6BrSYzbBzPHwMeiSHBMPbIOw5qO7owNWmJnRZrdBrNAj19MSI8HBBDxPPU+TXNaDZaIaN4+Bt0CMxNAh+7sKJffWdXbhS14guiwU6VoNgLw+MiAoXXCAppSioaURTlwlWuwNebnrEhwUhwFPYO9HUaUReTQM6uy3QsiwCPT0wIiYcWgEPE6UUxTXNaOwwwmKzw8tNjyFhAQjyEfZOtHSZkVdRhy6zFRqWgb+XO0bERkCrEV4gr1U3o6G1E91WB7zc9YgJ80eIv/Bmpt3YjdyyOnSaLGAZBn5ebsgYGgG9iBervLoFdU2d6LbY4OGuR3S4P8KChJ+5XSYL8kpq0WG0gBACXy83pCdEwCCyCFfVtKK2rh3mbicjMtwP4WG+gvPN3TbkXq1GR0+1WB8vN6QlRcJdZBGurW1DTXUbzGYr3Nx0CI/wQ2SksJfFYrEjL68KHZ3d4HkKb283pCRHOOuICKihrh3VFc0wGZ2MkHBfRA8OEpxvszqQl1OBjnZn1VkvbzckpUbB20f4+95U346q0kaYOi3Qu+kQHO6LmHjhZ5zd5sCVrAq0txjBOXh4ehuQmBYF3wB1ZRPU6I4hMgAyOzqR3XYAF1p3odN+e5OnOM9MjApYgCGeGWAU1hMxO0w403IcR5v2o8XWdNt4glcypgbNRopPuuKaJWZHN441ncfuuiOotdzeeyTJOw7zQqditP9wQQPDNcOKQw3Z2FR1AuWmhn4Y0VgWNRGTg9OgY5QV0rJwduytzcOX18+isPP2/ibDvMNxz+CxmB2eAoOLxFkhWTkH9lYX4LNrF3C5tfa28XjvINw/dBQWRafAXaNsR2LjOOyvKsbHhZdwsfH2HWKstz8eSszE0thkUQNDTA6ex8HyUnySl4Uztbf3aYn29sWDqRlYnpAMH72yGjIcz+N4eTk+zcnBifLy28Ig4V5euC89HStSUuAvcsRXTDylOF1Wic8v5uBocdltjGBPD6wdlY4V6ckI9FRW1IlSivNl1Vh3NgeHrpbeFqLw93DDmrHDsXJUKoJFjhG7YuRcr8X6k5exP+f2TsE+7gasmpCG5eNTEean7DlHKUV+eQO+PpqDveeLbqt+6ummx7JJKVg+JQ2RQb6KGABQVN6IzQdzsOfU7Y0A3Q1aLJ6airump2FQuLwQS1+VVTRh694c7D50BVbbzZ5lg16DedNTsHRuOmIHCS/+rlRZ1YLtO7KxZ+9ldHff7FnWalnMnpWCJYtGIC5OuQewtroVu7Zcwu5tWTAZb/b6ajQsps1JweLlI5EgM4zTV4217dj99XnsWn8eXbeUxWdYBpPnpGDRmrFIyohWXQROru4YIip1teMktlT/GTx1CJZfJ2BAwSPMMARrYl6Cp0aeez+vIxv/KnsXDmoXZPTmm4QZIvBE3LPw18lz7+d1FOGPhf9ENyfcV4QBAQ+KUEMgXkx6CqEGeT/uK+3leP7yh+hydIOg/7h8LyNY74s/ZTyKQR7yftxX22vwxPnP0Goz3biWECNA74m/jb4fiT7ykrhKOprw0Il1aOjuEmT0vj9fnRven7gaGQHChZ36U3lnG+4/uAGVxnbBuHzvo8JDq8P705ZhfOggWYyark7cv3MjSttbBXNwehkGjQZ/m70Y0wbFymI0Go14eMsWXG1qEs3zIXCeAPrzvHmYGy9cF6I/tZq78fj6bciprruRq9CfGOLMifrtollYmub61EhfdVms+NHnO3CurMolAwBeXDwdq8eIHyG/VWarHb/4dBeO5193yaCgeHrxZNw3Vd5Rd6vdgZf+vQ8HLxWLMtieXInHFo/DIwuEa/L0J7uDwx8+PIhdJ/JdMjie4r6Fo/D4qomywkYcx+MvHx7G5t3ZkhjL5mfgRw9Pl3Wqhecp/vXvY/hq/VnBnBLAefyW43jMnZOKp38yFxoRj9WtopTii49O4NMPjjp/5y4YU2Yk4ecvLYVOL32TRinF5k9O4V9v7AUhcMkYPSUBv3zjbhjcv72Qzh1DRIWy2w5gR827kuczYOCpDcDDsX+Cl1baLuBi61l8VP5XABA0Qm5leGi88GzCrxGgl5YHcLE1D38s/Cdoz/9IYbhrDPh96s9FwzV9ldVagmdz/gWe8pLqkDBgYGC1eDfzCQzxCpfEuNxWiUfPfAQ75SQ1nGMJgZZo8MG4h5Dm5/roJwBcba/HmiOfwsrZJRX5YkDAMgw+mrQGY4JjJDGudbRg2Z7PYLLbpDF6jor+a9pyTIuUVjmxqrMdSzevQ7ulWxKDwJkA9+6shVgwJEESo8FoxLJ169BkMklmAMAf5szBimRpJ6ZaTWas/ugr1LR3yiq69tLcaVg7Kl3S3M5uC+795waUNbXKamT49JyJeGTKKElzzVY7HnnvaxRUN8piPDprNJ5cIFwgsK9sdgeeeHszsq/VyioXfvf0dPxstbQkcIeDw8/f2oazeeWCycz9aeHkZLzw6GxJDI7j8cqbO3DsdLHkuhoEwJTx8fj1zxZLMngopfjTG7uxd3+eRIIzH2f0qFi89uoKSQYPpRR///M+bN1wXgaDICU9Cr9/517odNKMkY/fOYCv3j8qmcEwBHFJ4Xj9o0e+NWNEzvp95/huH103XsbOmvdk/Q0PHl32FqwrfwWcQIfbmxima/i4/O+SDYRehsnRhXev/QFWEe9Gr8pN1Xij6ANQkYZ6/THMDgt+nf8XmByuO5pWmZvwy9yPwEk0QnoZFs6Gn+W8jzab6+7FteZ2PHHuM9h5aUYI4CxQZeMdeOL8Z6jv7nA5v9lixEPH10k2QgBnp2GO5/H9UxtQYWx1Ob/DasF9B9ZLNkIAZ1iCpxQ/OLYVhW23h+1ulcluw707Nko2QgCnd4dSih8f3IWchtvDXbfK6nDgwc2bJRshNxgAnt+/H2erbg8T3SoHz+Oxr7bKNkIA4Dd7j+BYyXXXr4lS/PiLnbKNEAB4a99J7MktksR4/rPdso0QAPjgwHlsPSfcPr6vXv30ALKv1cjuWfLV4Rx8dThH0ty3Pjsq2wgBgJ3H8/HxdmkL8gdfnMBRGUYI4PxeHT1djPc/Py5p/hfrzsgyQgDnqajzF8rw7l8PSJq/dcN5WUaIk0FxJacKf/7dDknz92+5JMsIAZwek2tXa/GHZ9fL+rtvS3cMkT463PA5lNRZpuDRYC1HQedpl3N31G6EkraOPHg0Wutxoc014+vqPeCo/FqpPHi02NpxqME1Y135Ydh54dCVMIOiw2bCtuozLud+fv00zJxVdtVXHhQmhwVfXHfN+OzaRbTbpC/efRlWzo4Pilwzviy5jAazUTaDwnmk+q95rhmbi66iorNdEYNSincuuv7M9xQXo7i5WXFp+D+fOuVyztGS68itbVDMeOPwSZeL8rmyKpwrq5JtIPTqrX0nXZZ8v1LZgKNXyhQz/rLzpMtuuWW1LdhzrlC2gdCrf2w/DcstORi3qrapA1sOXVbM+GjrWZjMVtE5be0mfLX1gjIAgK+2XURbu0l0jtFkwefrXH/H+xOlwI6d2ahvEN/YWC12fCLTQPgPg+LQ3jxUXhffdDjsHP791j5FDJ6nOHukEEV51Yr+/pvUHUOkR/Xd11HTXQSqsJw6AYPzLTtF5zRZG1DYdUVxWXgCgiON+0QftK22dpxvyVHMoKDYXX8UvEANEQDosptxoD5LsM6IK/Gg2Fp9Cg5euH212WHDlspLihckjlJsqryI7luOH/eVjeewrvSS4sWCoxRbynPRaRP2UnE8j08KLykuoc9Rit0VhWjqFn7QUkrxcV6W8lLqlOJo5XVUdYo/aD/JzlZcSp2nFBdra1HcfHvid199dj5bcTM+CqC4sRm5tbcfRe2rL87kqGqUV9PWibNltx8T7av1J9UxWrrMOJZfJjpn47FcVQxjtw0HLxWLztl2OA9EBcNu57DnVIHonJ0H8/qtlSJVPE+x69AV0TkHDuTD5sLoEhMhBLt25YjOOXboKswmcaNLTAxLsGPzJdE5Z48Wor1V3OgSE8sy2PnVWcV//03pjiHSo0ute1Q1sKPgUd1dhAaLsGv4RNMhlQyKOksNykwlgnMONpxW0TXGqSZrKy63FwqO7627qNgI6VW73YSTTcIPj721uTCLGBFSZHJYsa9WmHGgphDttm7BcSmy8xy2VOQKjh+vvY46s+swlJgoBTZcE2ZcqKtBaXurqs+dIQTrrl4WHL/a2IjchgbFRhvgzN9Zd1mYUd7ShrPlVeqa8TEEX1wQZjR0GHGkoEx1M751Z4QZ7aZu7MkqUsVgCMGXJ3IEx80WG7aduqKKQQjBV4ezBcftDg6bD19W1fCPAtiwL1tw88RxPDbvFh6XxKAUm3dlCXaypZRi89aLiq8POI2d7TuzYbcLb562bTiv6mQKz1Hs25GNbrPwc2/7F2dUdYLmOB5HduWiq911+P3b1B1DpEflpjxVDex6VWUWXsBLjIWqGQQMykzCu5iCzmuywyW3iiUMCrtuL9rTq7x213F4KYwrHeWC49mtlWBd9OGRwshpFd65XmquhkYlg4Agq1nY1XmxqUY1gwfF+Qbh/IoL9TWKvQi94ijF2X6O+vbqYk2Nyt7JPYxq4XuVXX37kWnZDJ7ifIUwI7eqTpUx1cu4VC7MKKhudBlWcSW+58ivkEprW1yGVVyJUoqCCuHXWlXfhi4VO/xeVda3wSgQnmluNaK51djvmBw1iVzHaLSipqZNNaOry4Ka2v6v43BwKCmqU2VQAc66JuVlt5dZ6NXVnEpVhiHgDO+UFKj/rQ2k7hgiPbJwyt1dvWLAwMIJ/6jMA8IgMDuEr9MlMiZdRDRhtdNuVm3sAIDRIeyN6LJbVHtdeMqjyy7OUPs+eFBRr4pY2EaO2qwiDKtFccikrzoswq+102pVXF1TMsNiHZD30WUVXjw7LeoXVgAwWoV3rV3dA8OwOzjYHP0bG10u8i7kSOhaA8oQMGi6TAPz+xC7ltE4cAxjV//XurVOyDfBsNkccIh4ZOTI1Dlw92QgdMcQ6ZHSomR9RQGwRPj4ldKiZHIYmgFgOK8jzNAqLHx223VEGYzijsO9IiCir9XpqVC/8OlFertoGPndVPuTWP8YHcsOgFkIwQqivWMDcdLfFUOttwKAaD8fMf53jQFA0PhTWg69P2k13zxDI8gYuHslVJ1WiK1EGoES8d8GQ24XYCWM/5buGCI9kluQrD9R8PDQ+AqO+2j9VC+uPDh4a4V7vPjrfFQzKOXhqxMuIe2v91YdNgEAX51wpUp/vafq3TEhBP56YUagQVk1zr5iCYMAvfB1Ag0eqnIeAGe+QIibyPtwcwfHq/MeMYQg2EPkfbi7q34fBBBneMhrDqfkOoEiJd7lyE+EESCzyZ2QvN30goaIv/fAMLQaFh6G/utK+Puo/30Azu+Wj2f/1XX9fAbmfQCAr8A98fZ2U5VX0Vd+fv0z3D30A7a4+/n3f99ZloGnl7JKyLfKN2BgPtuB0h1DpEepvlOgdnfMEi3ivYSLHY30G6c6FEDAYLivcAv6CYEjByTcMDZghOD49ODhqsMmHOUxLWS44Pic8NQBYcwJTxEcnxeVNCCM+VHCFT0XDEpUvcvnKcWimGGC43Nj41WXb+YpxZI4YcaM2FhoByA0szgxUXBswpBBcNMqK8/fK4YQLE4Vfh8jB0fAR2bH3f4Yi9KF30dydAiCRfrNSBHLEMwfKfw+hoQHIDrEV9UTi2UIZo8U/u6EBXojcbDyBoS9jEkjYqEXKNTl4+2GEanRqgwFhiEYkRYNH+/+jR29XotxY+NUMxLiQxEi0IGYEIKpM5NVeS0IASIHBWCQSN+aaQuHg1HpGQkM8UZ8iryq0N+07hgiPRruN0M05OFKDFik+UyFgRV+AI3yHwc9o6x/iJPBIMN3FHy0voJzRvsPh5dG+UPQyUhCqEG4guuogAQE64Vfg2sGQYpPDGI9hcuwZ/hFI9YzSPGDloBgqFcI0nyFq6sm+YZiuH84GIUUAiDSwxcTQoRLpMd4+2Fi2CBVyaQBBnfMjBoqOB7i4YnZg+NUMbx0OiyME66u6uvmhiXDhqli6FgWdyUJG20eOh1WZCSrYhACrMgQNj51Gg3uHpOmyttGKcWq0cKl3lmGwd2ThqticDzFqgnCDEII1kzPUHz9G4xp6aJzVs3OUBWS43iKFbPEX+fyBRmqEjB5nmL5fOGNEwAsXZKpmnHXUuENIAAsWj5S8OSOFFEAS1eOFjX8Ftw9BrwKBiEEi+8ZN6BhnoHQd+vV/BflxnoizWcqiMJbwoPDyID5onN0jB4TAqcpPsLLg8eUoFmic7SMBnNDJysOz/DgMS90iugchjBYFjVRBYNiWdRE0TmEENwzeJxi3w4FxT2Dx7rczd0/dLTiGh8AcH/cKJcLzoOJIxWHNRgQ3J8wQjQnAQAeSMlQzGAJwZphaaLdiAFg7fDhqhjLkpPhpRc3xNdkqmPMHRaPABchnpWjUxVdH3Du8CcnDEaEi+Z0d41NUbwDZxmCEbERGBIq3ltq/thh0Os0in6FDEMQHxmE5Bjxdg4zxsTDy0OvKM+JIQQRwT4YmSzeamH8qDj4+3koMtwYQhDg54Hxo8TbIGSkD0J4mK+iz4QQwNPTgKlThL1gAJCYHIHYoSGKGXq9FjPmifcyiokLQUrmIMVeEZZlMHtZpqK//SZ1xxDpoxmhD8BHG6jIUJgYtAphbq57giwIW4ZgfagixtSg2RjqJf5jAIClEbMQ4xEpm0EATAsahxF+wjvKXi2LmogUnxjZ3gQCginBaZga7Lp52F1RIzAucIhsBgOCiUFDsTjS9Y5xYVQyZkckyGawhGBkYDTuGeL6Rz0jcgiWx6bIXjBYQpAcEILvJ492OXdseBTuS05XxIj19cdTI8e5nJsWGoofjJLWZ+VWRri3N56Z4Lp/ypBAf/x0mrQ+K7cyAj098NysyS7nhvt647kF4sZ2f2IIgbfBgF8tnu5yrr+nO15cNVMRw12vw8t3i284AGdH3VcenCPbjCaEQK/R4NXvzXFpqOt1Gvz68XmyNx2EOBe9V5+Y75KhYRm88vRCECIvOE4AEIbg5acXukysZRiCF55fDJZVkjxO8MvnFrnsA0MIwbMvLYVWp5EdzqIU+PlLS+Dh4dpj/pNXl8HgplVk8PzolaXwFchB+W/qjiHSR+4ab9w3+DV4aQNkLeKj/BdgWvBaSXPdWHc8NfQXCNQHy2KM9Z+EFZH3SpprYPV4cdiTiHIPlbXAjg3IwA+G3CPpR6RjNPjd8Icw1CtSMoMAGB2QgF8mrQEjIdlVw7B4c+QapPtHS34QEhCMCIjBnzJXQyPhdA9DCN4csxQTQmIl3ykGBCl+YfjHhFXQs67DeYQQ/H7cXMyOFg6v9Pe64n0D8fGMlXBz4anoZbwycTqWxkvvQMsSgkE+vvhs4Qp46aSFDJ+ZOBH3pEnvQMsSgjAvL3y2YgX83PqP4d+qxyaMwiPjxN3gtzICPN3x8b3LEewlLSx57/gMPDXTtfF1g8EQ+LgZ8OHDy116Q3q1dEwyfrbUafBI+W6xDIGHQYe//2AZYoKlJc/PGhmPF+6d4VzEJUAYhsCg0+Cdp5ZiaKS0Ttvj02Px4mNzwDBEMkOrYfH6TxcjeYi0LtgZqdF49dleQ8E1hBDiNHR+vggZqdGSGMOGheM3v14OrYaVtIgT4nwvz/9iIcaOkdZ4MnZoCH771hroDRrJDEKAHz+3AJOnS/vtRsYE4ncfPAQ3Dz0YVsIH0jPl+8/Ox+y7xENY/y3d6b7bj0yODuyp/eeN3jG3ln0nYJwnZFgfTA5eg5H+82RbwGaHCRuqPsXFtjP9NsD7D8MTc0MXY3qwfEY3Z8FH1zfiaNM58PT2BngEztbj7qwblkTMxLKIOZIMhL6ycnb8/doO7K4939N75mb1MtxYPVZETcIDg2dJMhD6ysY58JeiA9hQfgG2nsaCfTmk5/83sFqsHjQaTyXOhJaRl+/j4Hn8Jf8YPrl2HmaH/cY1+zIA59Hl1bEj8GzadBhYeYmVPKV4L+80Psg/jy67DQzIbWEhAufR0OWxKXhx1Ax4aOV1yqSU4v2cC/hb9nl09NQXuTVZliEEDAgWD03EyxOnw0cvL3mTUorPcnLw7tmzaO3uFmQAwLyhQ/Hy9OkIcJd/OuLr7Ct45+hpNBlNYAm5LWTDEAJKKWYkDMFL86YjRKIR0lc7cwrx530nUdfRJcqYFB+DF5fMkGyE9NXByyX48/YTqG7p6Le9fe+/jYmPxgsrp2NQkPwTfCfzruOtDcdQ0dAmysiIi8Bza6cjLkJaF+++uphfibc+O4Ky6hZRRsqQMPzswelIHCyti3df5RXW4O33D6G4rAEsS8BxtzB62trHDwnBTx6dgdTECNmM4uJ6vPPefhQU1IoyBscE4okfzsSIjBjZjOuljXj3T7txJafyxvX6Y0QOCsAPfjwbo8dL36T0qqaiGe++uh05Z0vBsMxtuSO9/xYa6YdHfjYPE2dJ6349UJKzft8xRETUZW9BVtt+5LQdhNHRDo7aoWPcEOYWh9H+CxDvPVpVgquT0YFTLcdwuvko2u1tcFA79IwBEW7RmBo0C+m+o6CRuajezjDicOMZHGg4hWZbG+y8HQZGjyj3MMwNnYLxgSOgY9SdVuiyd2N//UVsqz6DBksbbLwdekaHKI8gLI0cjxkhGTCw6tpPmxxW7Kq+jPUV51FtboWVs0PPahHl7o9VMaOxIGI4PDTKk4EBZ4+bnZX5+KL0Iq53tcLC2aFnNYj08MWa2EzcFZMKL626UxcWhx07KwrxWWE2Sjqa0e1wMsI8vHDP0HSsjEuFr16a90BIVs6BvWUl+CQvG0WtTTDb7dCxGoR4eODuYWlYlZiCQHd1Llo7x+FgaSk+y8nBlYYGmO12aFkWQR4eWJmcjNWpqQj2VHd6xMHzOFZyHZ9fyMHlmnqY7TZoGRb+Hm64Ky0Zq0ekIsxH+Ki5FPE8xcmScqw7k4OsilqYbDZoGAZ+Hm5YnD4Mq0anIdJf+Mi8FFFKca64El+dvIwLJVUwW+1gGAIfdwPmZyZi5YQ0RQbIrYzskhqsP5KDc1crYbLYwDAEXu56zB6ZgBVT0hAbLp53IoVx5VodNh7IwenL12HqtoEA8HQ3YProoVg2YziGDpLmaRFT4bV6bNmdjRPnr91omufhrsek0XG4a34GEuNCVTNKSxuwfUc2jp8ogtFkBaUU7u56jB0zBEsWj0DSsHDVp9Eqrjdh56aLOHYoH12dFlBK4eauR+boWCxeOQqp6dGqGTUVzdj51Xkc3XUZXR3d4Hkebu56pI0ejMVrxyF9TKxqhhLdMUQEZOOMuN61Gy2WfNj4TrBEB4MmAIM8ZyPIMNzlh0UpdTnHxplxtfMwasz5sHJGsIwW7qwPErynIMp9gBi8FVltp1BqLICZM4IlGnhovDDcZwzivVJdejWkMew413IR+Z0FMDpMYAgDL40nMv3Ske47MAw778Cp5lxcbC1Ap90MQgi8te4YG5CCsQHJLgvASWE4eA7HGwtwsrEIHXZn1VlvrTvGBcVjWkiSS8+JFAZHeRyrL8Hh2mK02UzgKYW3zg0TgmMxJyLJZfhGCoOnFCfqyrCnsght1m44eA4+OjeMCx2EhYOGuQzfSGFQSnGmthI7y4rQYjHDznHw0RswMjQCS+OSXHpnpDIu1NZgR3Ehmswm2DkO3noDMkLDsDQxCd4uklmlMnJq67EjvxANRiNsDg5eej1Sw0KwLDXJ5fFdKQwAyK9twLacAjR0dsFid8DLoMewsGDclZEEfxcJs1IZxXXN2HYhH3VtXbDY7PA06DE0LBB3jU5GoLe4Idn7aHfFKatrwfYz+aht7YTZaoeHQYfYsAAsHZeMED9xI08qo7K+DTtP5KO6sR1mix0ebjpEh/pi0aQUhAeJG3lSGXWNHdh5OA9VtW0wddvg7qZDRIgvFk5PQWSYuJEnldHY1IU9+3NRXtkCs9kGNzctQkN8MG9WKgZFixt5UhmtLUbs3XUZ10sbYTZZoTdoERzigznz0zB4SLDo3/ZyXDHaW004sD0bpYV1MBkt0Bu0CAz2xoxF6Rg6LNwlQ4ruGCK3XsdWicL2L1DWtRs8tfeEC3g4AwcMKDj4aAcjwXc1Yr0XgVHg5eiw1eNC60Zcad8PB7XeCK04gywMeHDw1YZjhP9SDPebD5bI90C021pwtGkXzrYchpW39FzX6Y7r/b/9dUGYFDgXEwJnQcvI90B02Duxu24/Djceh5nrFmD4YXbINMwKmQ4DK98D0Wk3YXP1UeysPYUuh/kmBksYcJSHn9YLiyImYmnEFHho5HsgjHYLviw/ha8rz6LNZrpx3b4MX607lkePwZqYCfDRyQ8dmBw2fF56Hl+UXkCjpatfhrfWgNWDM/Fg3FgEKCigZnHY8VlxFj4puogaUyc0Pdd1Vth1hhI8tTqsjkvHo8NGI8RdvnfAxnFYV3AZH+VfQkVne78MN40WqxNS8UjqSER6yfcOOHge6/Pz8HFOFkrbWm9j8JRCx7JYNiwZj44YiRhf+d4BnlJszruKj89noaipGSzDgOedjN4Qi5ZlsTg5EQ+PyURcoHzvAKUUO3ML8cmZLOTXNoJlCHie/ofR837mpyTgexMzkRAq3ztAKcX+3BJ8diwLlyvqnAxKQSluOlkyK20oHpqWieQoZd6BY7ml+PTgJWRdqxFkTEmLxf0zRyJ9iLLF6UxeOb7YcxHn8yt7GM73x/RkplKeYlxaDO6bPwqZw8RP1wgp60ol1m2/gDNZ1515GdT5XSCEgCHOI8Sj0gZhzeKRGJM+WBHjytUafLXxHE6dLXXmyfRlMADHUQxPjcLqZaMwfmycIkZRYS2+XncWJ44WgsIZpuV52pOr4gzlJKVEYvnq0Zg0NVGRl6OsuB5ff3wSx/dduXHtWxlDk8Jx19pxmDY/TZUn5Y4h0kf15gs4XvdzcNQGCrE6/c5vV7j7BEwM/S00jHTXeI05H5uqfgU7b7ktn+R2BhDlnoalkS9Dz0pfmKrMZfhn6e/RzZlcNs4jIIh2j8Ojsc/CQyN9Yaoy1+APhX9Gp71LIiMSzyb8GL466QtTjbkJz+X+Dc3WdpfHZhkQRLgF4XdpjyPYIH1hqutuw5MXPkK1qUUSI9TNF++N+h6iPKQvTI3dXXjk1Be41tnkksESggC9J/498V7EeUtfmFotZnzvyAbkttS5PBnBEgJfnRs+mbEayf7SF6YOqwXf378F5+udTdzEOCwh8NDq8NHc5cgMkR6bN9lseGL3DpyoLJfE0Gs0eH/hUoyPkpaECABWhwNPb9+D/UXXbsvv6Y+hYRm8d9ciTI2TvjDZHBxe3HYA2y8X9JsXcxODISAgeHPlfMxOlh7/53gev9t8BBvO5EpiUAr85u7ZWDxSepIyz1O8s/UEPj14SRKDpxTPr56OlZOFCxDeKkopPth6Bv/aehZMj7EmxuB4ih+tnoy18zIlL36UUny5/SL++tmxfnNW+qr3NTy8ajweWjlO1gK7bWc23v7bATBEGuPuFaPx/YemyDrVcmBPLt74/U4QgttyVvpjLFw6Ak/+dI6seiAnDuTjj89/DUohWu+EMASUp5i5aDh+/NISaLXKUgPkrN//p0/NNHbn4EjtT+CgFhdGCND76Kozn8Gxup+Dp9I6W9Z3l+DryuckGCG9DIpqcx42Vb0ABy+tzX1ddxXeu/YqzBKMECeFospcir9dew1WTlpzowZLI35z9XVJRsh/GDV4reAN0QZ5fdVkbcczOX9Bs7VDUu0OHhQ13c14JucvaLdJ69DZajXi+2ffR425VTKjwdKBR8/+E42WDkmMDls37jv+CUq7miUxOErRYjVi7bGPUW2S1gXUZLdh7cF1uNJaL+l4JkedzfdW7/8C1zqaJTEsDjse3LsRFxtqer6ZrhlGuw1rd23AleYGSQw7x+H7O7fiZFWFZIbF4cBD2zbhUl2NJAbH8/jRll04WOzsGC2FYXNweGzjNpwuF+7O3Fc8T/HLLfuw43KB8/93sX/jeAqO5/GT9TtxuFC4k3VfUUrx2qZD+PpMrmQGTyle+HIfdmcJd/2+VW9vcRohUhmUAr/76jA2n8yTzPjX1rP419azToaLQmK9i/tf1h/Hun1Zkhnrd17CXz87dtM1hNT7Gj7ccBoffX1GMmPH7hz8+a8HnIu3RMZXG8/jHx8elcw4vP8KXv/tDvA8FTVC+jJ2bcvCX97cI7nY3Jmjhfjds+vBcbzLomu0h3Fo52W8+eIW8CrbR0jR/1lDxMYZcazuGQA8XD+a/iMKHg3dF5HX+i+Xc+28FZuqXgBHHRKMkJsZdd2FON74ocu5HHXgg7I/wsHbZDF48KizVGFT9Ueu51IebxS9i27OIskI6ctosDTi/bKPXc6llOLVKx+i0y7NmOrLaLZ24A8Fn0qa/8ucr9Bk7ZJVup2jPNrtZjyb9YWkH/YvL21DtblNJoPC6LDiB6e/ksR48fw+FHc0yyruxVEKC2fHQ4c3wCHh4fHbc0dxualeFoOnFHaew4N7N8Ii0Bm2r/589jTOVlfJKnPPUwqOUjy8fYtoJ91evX/2Io5cK5PFcBpFFI9v3I4Ws2tDet35HOzKK5JVs6N37k/X70JdR5fL+dsuXMXGs1fk1wUB8MKX+1De5NrIPZhdgs8OXZJJcOq3Xx5CUZVwi/penckrxwdbpS/2ffXOl8dwudi1AZpbWIN3PzmqiPHhhtM4l3Pd5byS0ga89d4BRYwNmy/g2Mkil/OqK1vw+m93yL4+pcDu7TnYvyfX5dym+g787tn1N/5ODuPo3jzsWH9e9uuTq2/FEPnrX/+KmJgYGAwGjBkzBufPf/Nv7HrXbth5k6zF+z+iKO7YAAcv7k0o6jyGbq5DEYOCIrd9N6ycSXReXsdFtNmbZS3e/2HwuNR2Al128Z3+lY6rqLXUK2Lw4HGxLRtNVvFd+NXOchQbq8ApZGS3F6PCVC86r6SzDpdayxT1j+Eoj/yOalztqBadV2lsxeG6YkXVPznK41pXE842iT8EG81GbCvPV9SjhqMU1aYOHK65Jjqvw2rB+sI8xYzmbjN2Xxd/0Jrtdnyam62obi1PKbqsVmwtKhCdZ+M4fHQ+SxGDUqDbYcfGy/nir4Wn+PDkRQUEpzHi4HmsvyC+YFBK8dGRi4qqpPYaVV+dynE595MDFxWXnicE+PKoa8YXey6qqir7pQSvyFc7LoJVyGAYgq+2u/48N227BKXtlRiGYP0m1+vcts0XFfcGIwT4et1ZlxubXV9fAOfgZRkhfbXxk1PfuFfkGzdE1q9fj6effhovv/wysrKyMHz4cMyZMweNja4ta6WilKKoY72qa9h5EyqNh0TnZLVuVdXp1kHtyO84KDrnRNM+xWXnAecD6lzrEdE5+xuOKC47DziTWA83Hheds6P2BFgVDJYw2Fl7SnTOpspzqroCs4TB1xVnReesv35JVQ8RljD4ovSC6JyvruUovr6TQfBJofiDdmPxFdh5V+FKYTGE4OMr4gvGjuJCmO12xQwA+CQnS/RBe7D4Gtq6uxVfn1Lg80s5ot2LT14rR32ntNBgf+IpxVcXcmFzCN/v7Ou1KGtsVdxsgOMpNp/Lh9kqfL+LqhpxpbxecRNGjqfYfb4AnWbhDVpVQxvO51cq7uvC8RRHs66hqU34fje1GnHi/DWXoRIh8TzF+dwKVNe3C87p7OrGwSNXXYZKxBhXC+twrUx4nes227B352XwChmUAhXlzcjPE9482WwO7Np4QVWfnab6DmSdkRZeVKpv3BB566238Oijj+Khhx5CUlIS/vGPf8Dd3R3//ve/vzFmsyUXRns15IRkbhdBSccmwdEmSxkaraWqO91ebtspONZsrUeZqUChV8cpCopTzcLuxXZbB3La8xR5Q3rFg8ehhmOCC4bJ0Y3jTTmKvCG94iiPffVnYef7DwdYOTt21mSp6qbLUR776i7D5Og/HMBTig3lWYp7ofQyDtcVo8Ui7An7oiRbVcdejlKcbqhAtbFdmFGQo/j6gPNe5DbXo6i1SXDOurzLqrrDUgBl7W3Irq8TnPNldp4qwxAA6ruMOF1eJTi+4VKeqkZ8ANDRbcHRojLB8U3n8hTv8HvVbbNj/+ViwfEtp/NVMxw8j93nhfNRdhxXzwAFdp28Kji852i+2kbpYBmCnYeEc14OHilQ1cAOcBYt27VX2BN2/GgBLBZ1hjrLMti9PVtw/PzxInR1KDfUAYBhCXZvVOYRlMz4Ji9us9lw6dIlzJz5n54LDMNg5syZOHPm9hii1WpFZ2fnTf8pUZdd3L0uTVT0Om222gFhdNiFww0ttoHxGrXbW8ALLNBN1mbVxhQAmDgzugUSY5us7aoMhF5ZeTs67P3vlFptRlgFjBQ5clAeTQJJq512C7rsrnMWXIkHRa25vd8xO8+hsVv57ruvKkUMkaqujgH41IGKTmFGRXv7gDAqO4QZ5a1tqoy2G4x2Ycb1pjZVxifg9FJVtQmHSK83tine4fdKwzCobhVmVA4Ag2UYVDe3C45XNbSr/jwIIahuFGbU1LcpasLXVzwFahpEGLVtqjvUchyPmlrhvJ26mjawGvWM6qpWYUZVq7Qy8CLiOYrqCmkJ8Er1jRoizc3N4DgOISE3l/oNCQlBff3tC/Dvf/97+Pj43PgvKkrZuXIHb4ZqkxkAR4UtSTuvzsrslYPaQAUWaaknXqTIJpDvYuXVL6y9sggwLNzAMcwC1zI7pJ1AkiKTwLW+FYZ94BhGgWs5eB72AYr5mkReb7dD3W6vV0abMENt6AdwGgkmMYbImFQRQmCyCl9HbEw6RPw6Jov63yGlgElkF2+22BTnIvSKpxRmi8jnYbFD7deXUnqjYmt/6h6A9wEARpMIo9s+AKsUYBJl2FR7DAHALMIYCH2nTs08//zz6OjouPFfVZWwu1RMGsYd6sIyPdchwrVEtDLqjIgzdCACeQ16Vl0p8b7SMf1fS8+oK4neV25s//dESdEzIXkI3BO1pd2lXMtDo65E/U3X0vbPcJfZW0ZMXgIMDcNAqzQL7xZ5ijTLc9OqaxsgheE+AAyOUnjqhO+7h179Z0IpFb2Op2FgPndRhpv63wghgIdB+J57uOlUlxNnGAJ3kfvhblDWefYmBiHwdBf57hp0qr0uAODlKcJw0w6Ix9BTlKEbEI+hhwhjIPSNGiKBgYFgWRYNDTfXHGhoaEBo6O1Fl/R6Pby9vW/6T4m8ddILIQmJgIG3LkZw3F8fqZoBEPjqhAtDBenU91MAAH9dkGBJ9mBDkKqE2155abxgEDBqgvR+0Lgo1y5FbqwePtr+e5f46zzhprKXDeBsahds6P9756U1wEer3gBlCEGku68gP9xdfTsDAmCQl3ARuBgfvwHZjQ32EWYM8fMfkN3YYD9hRlxgwMAw/EUYwQGq8x44ShETIHKvQtQzHBwv2q9mcKi/+vfB8Rgk0hl4UJif+rAJTzFIpCR7dLj/jVoXikWAqHARRqS/+hwRhiAq0l9wPDI6AJxDfR5K9CDhBoaRMYGKk2F7xbAMomNdl5ZXxfgmL67T6ZCZmYlDh/5z+oTneRw6dAjjxklvwS1XAfoUeGtjoCY8Q8FjqM9ywfFAfQzCDAkqF3GKDL9FgqP++mAM9UxRdaKFgGBi4GzBcR+tN0b6pas8NUMwM2SK4E7IQ2PAtOBMVadmGDCYFzpWsHOvjtVgcWSm6lMz88Mz4C7gEWEIwd2xmapPzcwJHwY/vXBJ+XvjR4BR8b1iCcHk8FiEewgbNPcNS1d8fcB5L0YEhyPOV7ga7drU4ap2YwRAfEAg0oKFu7iuyUhTzYjw8caYQcJh4FUjU1XnVgR4uGFyfIzg+Iqx6hkeeh1mpQlXcb1rfIpqhk7LYu6oRMHxRZNSVJ3QAACGAAsmCneKnTc1eUCMnUUzUgXHZ0xLglajbvPE8RQL5wpXo504JRHu7uo2TxzHY8GSDMHxURPj4eOnrrklz/GYv2Kkqmu40jcemnn66afxwQcf4JNPPkFBQQEef/xxmEwmPPTQQ98YkxCCBN9VUBOe0TFeiPKcJjonw3+JqkRPDdFjmM900TmTguaqOtHCEAaj/aeKzpkVMk0VgwKYFjRJdM6i8ImqTs3w4LEgfILonGVRo1WfmlkRPUZ0zqrBIyRXMxRirIkV/1GvjEtT9aDlKMV98SNE59w1NBk6F834xMRTigeSxRkLhibASyTk4UoUwAPDM0Rd/dOHxiLQRYM5V7ovM13UuBw7OBqRft6KTUOGEKwZPRxaVnhhS40OxdCwQMWfO8sQrBibCoNIOe4h4YFIjw1XbEizDMHCMUnwEgnxhAf5YHxajGLPC8sQTB8VD39v4c/U39cDU8fGq2KMGzEYoSLN9jw99Jg9IxmswkRPhiFITY5AjIi3wmDQYt6iDMVhJkIIYocEI0GkSZ1Gy2LhqlHKQ1kECI30Q/roWGV/L1HfuCGyevVqvPHGG3jppZeQnp6OnJwc7N2797YE1oFWjNdc6BkfxTU4EnzvBkvEH6LxXpPgqQlQyCBI91sEnYtckyTvDAToQhR5LAgIRvtPddlvJsk7EdFukYoYDAjG+o9EgF7YBQkACd7RSPIe7LJrb/8MBqP9hyHSXdw9GOsVgrGBQxU9aFnCIMMvBok+4j1UItx9MSciSTFjmE8IRgUOEp0XaPDA8tg0RV4RlhAM9vLD1PAhovO8dHrcO2y4osWVJQRhHl6YO1i8h4peo8GD6SMUMRhC4O/mhsXxwrtvwJnv8vCYTAWEnjwBvQ7LUsX7tDAMwSMTRynacjAE0GlYrMwU3n0DzkXl4emjFCVIEjjfy+oJaS7nPjh7pCoP0t1T013OWTtvpPIaH5TinjniBi4A3L1I+fvgeIo1i0e5nLdsSabivSzPU6xZIb6pAYDFyzLBsIwiA5RSitVrXffNmb98JLQ6jbLcHQqsfHCi6rwfV/pWklWffPJJVFRUwGq14ty5cxgzxvUHpFZaxgNTw98GIRrIe5sEEe6TkOzn2mOjYXRYHvVbZ8KpDAYBgyj3NEwKftDlXJaweGzIc9CzBlmGAgGDaPchWBrxgOu5hOCZhCfhqfGQxWDAIMItHA/H3i9p/ovJD8Ff5y0rfMKAQZhbAJ5NvE/S/NeGr0aEm78sBksYBOq98IeMe6QxRizCEK8gWbUlepvS/X3cGkk/6l+PmoXUgFDZDE+tHh9NXw1WQjLqL0ZPweiwSFlGFUsI9KwGn8xdAb0Ej8pTo8dhyqDBshgMIdAyLD5ashweEjwq3xudiXmJ8bIe5gwhYAjB+yuXws/ddd7PqpGpWJ6RLMuoIj3/+701ixHs3X9uU18tGJGIeycLu9mFGcCf7luAqABfl/OnpA3Bo/OUPX9/fd8cxIUL7/B7NSopGk+tFveQCunZ+2cgeUiYy3lJQ8PwzCMzXc7rTz+8bzIyU1znEcbGBOEXz8xXxLh/zThJXXjDI/zwwitLQSlkGyPLVo3G9NkpLucFBHvjpbfWgBB5DEKA2UszMH/5NxuWAb5jp2YGWgGGJMwIfw9axg0EruJ9zlsR7TkdE0N/C0ZicmWQYTBWD3oDetbTpTHSm08y2GMk7op6FSyRlvEfpA/DU3G/hqfGW4LB42QM8RyGx4b8EjpGmms8UB+Al5N/AX+dn+S8l1jPGLww7Gdwk3i6x1/njbfSf4RQQ4AkBgFBjEco3kh/Cl5aae53H5073h/zKGI9gyUzotwD8K+xj8Ff73qxAABPrR6fTrofyb5hIHCdicSAIMzNB+umPIRQiYmoBo0Wn864GyODonpepwsGIQgyeODr2feJJqn2lY5l8e85yzE5MubG6xQTSwh89QZsWLQG8f6uFyTA6bH424JFmBUbd+N1umJ46nT4YtlKpIrkhvQVQwjeWDwXS5OHSWYYNBp8dPcyjIyS1kWYEIJXFs/E6lFp0hgMgU7D4m9rl2BCnLgHrK9+vmgKHpqaeeMarhgsy+CN+xdgRqr01vOPLxyHHywYK5nBEIJX75+DBWOGSWbcO28kfrR6smQGIcAvHpiB5dOld/i9a046fvboTBAClyGU3tfwxH1TcI8Eb0ivZk9PxvPPzAfDEMmMh+6dgIfumyiZMXFKIl78zTKwLOOydklviGXVPePw2JPSDbHM8XF45Z210Oo0khkLVo7Cj3+1+Bv3hgAAoWoC3t+w5LQRFpPJXo+ijvW41rENDmoCgQYU3I1FnYJDgD4Z8b6rEOM5W/A4rZiM9hZktW3D5bZdsPJGMGB7KqI624Hz4BCsH4IM/yVI9pkp2dDpqy57B04278Op5v0wcf0zQvSRmBw0F6P9p0LDyM8BMDpMONBwBAcajqDD3gkWLHjwTgJx5jiEGkIwJ2Q6pgZPgo6Rf3zS5OjGztpT2FZzAi22DrCEuZF3QQjpYfhjScRkzA8bD4OC0zDdDhs2VZ3DhoozqOtu75cRYvDBqkHjsCxqNDy18o9KWzkHNpRn4fNr51FhaoWGMDfcxQwhcFAeQQZPrI0dhTWxI+Gjk3/ixsZx2Fiai48KL+BaZws0hAHtyUxi4GT4691xb3wG7k8YiQCD/HwJB89jc0k+PrpyCQWtTf0yfPQGrB02HA8mj0CwuzSDra94SrG9qAAfX85GbkN9j8eqh0EIHDwPT50Oa1LS8MDwDIR7yf+9U0qxp7AYn1zIQVZNrXNhoLiJ4abVYuXwZDwwMgPRfr6KGIcLy/DpmSycL6++sfhQip428Tz0Gg2WZiTh/nEjMDhQmlF4q04UXMfnJ7JxpqjixsLAUwqGMOApDw3DYtHIYbhvcgbiQqUZhbfqXGEl1h3Owon86yDEabb/h0HBMgRzMhNw74wRSIxSdmoiu6gaX+7LwvGsUoDgJgalFIQA00fF4545IyR5QvpTfnEdNuy6iCNnip2eBYaA5ykYpqfJGwUmjY7DqgWZSE9Sdtqx5FoDvt56EYePFYDjKJhbGJRSjB01BCuWjkRmhnTDs6+ulzVi8/rzOLT/ChwODgzD9DAIKKXgeYqRo2Nx18pRGD1OuuHZV1Xlzdj2xRns354Nm80BlnUyej9/juMxfNRgLL1nLMZOTVRlhMhZv/+/MER65eAtqDAeQIslHza+CyzRwcAGIMZrNvz08aJ/SykHjtrAEoPoh+PgbSjuOoka8xVYeCNYooE764sE7ykIc0sQZfA9DI1LhgN5HRdQarwKM2cCQxh4arwx3HcMYtzjRf+Wpxwc1A4t0YvO4yiHrLbLuNJRABNnAgMGXlpPjPTLQKKXKwYPO2+HjhGvKcBRHhdbC3ChtQBdDjMICLy17hgbkIJ036Gi+SQ85WHj7dAxWpfzzjeX4mRTIdptZgAUPjp3jA9MwNigoaIhHEopLLwdekYjyqCU4nxzBQ7VFqHNZgZPKXx0bhgfPBhTQ+OhEQmTUErRzdlhYF0zLjXVYG9lIVqsZnC80zgYGzIIs6PioRU4TdSXoWc0oiEbSikuN9VjV1khWrrNsPE8fPR6jA6NxNzB8aKhGEopLJwDOoZ1GRbKb2zAjpIiNJtMsHEcvPR6jAgLx4Kh8TBohA1bSiksDge0LCt6TwGguKkZ2/ML0Wg0wepwwEuvR1pYCBYmJcJdNzCM682t2JZTgMYuI7rtDngb9EgMDcKitER4GoQTOimlsDo4sAwRTWAFgKrmdmy/eBV17V3ottnh5abH0NBALMwcBh93cePZaneAEKdnRky1LZ3Yee4qals6Ybba4Ommx+BQfywckwQ/T3Hj2Wp3OENpLhiNrV3YdeoqqhvbYe62w8NNh+hQPyyYmIQAH/FTHTa7s2KyTiQRFwBa203YczQflXVtMJutcHfTITzEF/OnJiMoQDxPzu7gwPMUep04o73DjP2H8lFR1QKTyQo3gw5hoT6YPSMZoSHCya8AYLdz4KlrRldnNw7uu4LyskYYjVYY3LQIDvHGrLlpCI8QN2wdDg4cx0PnIifEZLTgyO5clBbVw9Rlgd6gRUCwF2YsTEdUjDLD9lbdMUQGSCZ7Fco7v0Zl13bY+HYAvfVFEhHrczciPGaDFSgUJlVd9nrkt+9EQcdudHNtNxj++sFI9b0LQ72nqy6e1mFvwcWWA7jQegBdjl4GQaA+AuMC5yPddwr0AsXIpDM6cKLpGI43H0WrrfUGI1gfjGnBMzA+YALcNeqOkXXYjThYfxZ76k+i0dIKCupkGPwxP2wSZoaMhbdWHaPTbsae2kvYXHUGNd29DCDY4IslkWOwOGI0/HTyPQJ9ZbRbsKP6Mr66fh7lxhbwPYwggxdWDMrE8kGZgrVMpMrssGF7RT4+K76Eks4mcNTJCDB4YFXscKwZkoFwD/EHpytZHHbsuF6ITwqyUNDaeKMMur/eDSuHpmJtYjqivXxVMaycA3uuleCT3GzkNdbfYPjqDViWmIS1KcMR6yeeKO1Kdo7DgWul+DQrGzm1dXD0lO300uuxJCkR96QPR3yguoezg+dxtLgMn5/PwcWKmhsMD70O85Pjcc+o4RgWqq5WA8fzOFlUjnWnc3CutAr2njoY7jotZqcOxd3jhiM1Sl1tIp6nOFdcia+O5+B0YcWNRn4GnQbTU+OwetJwDB8cpmonTSnFpaJqfH04Bydzr8PaY4jotRpMTBuMldPTkZkQqZqRV1KLTftzcPzCNVhsvcYOizFpMVgxOx0jUwapLpxWUFyHrbuzcexUMbp7KtJqNSxGDI/GsoUjMCojRnUp+WvXGrB9axaOHLkKs9lZkVajYZCaGoWly0Zi3Lg41QwlumOIqFS3owE5Tb9GY/dpELCguLVrJgOAh4Z4It7vEcT5PCD7R2F2tOFow5soN54GAemnsR0BQKElbkj3X42RAffJDhl1O4zYVvNPXOk4DYgydBgXuBAzQ9eAlRkysnDd+KLyc5xrOdPjzu//66QhWkwNmoYVkatkh4wsnA3/KtuEgw3nwFO+XwYBAUMYzAoZi0dil0EvM5xj4x34a/EubKs+Bwd1ft63UpwMgtmhGXg6cYlgvREhOXgOfyk8hHVl52Dr6YtzK6M3T2N2eDJeHL4Q3jILqPGU4t0rJ/BB4Tl0c/aeT/gWBnG6emdGxOO3o+YhwCDPeKOU4u955/C33LMw2m0939+bKSwh4CnFlIhY/HHiHIS4i+9I+2N8nJuNd86fRofVCqbnercyOEoxPjIKf5w+F5EKnhFfXc7FmydOoa27W5QxMjICv58zW7TwmZC25xbg9QPH0Ww037jeTQyGgOMp0iJC8driWYgPlm/07M8rwR92HEVDh1GUkRgehFeXz0JypPxTi8fzy/CHjUdQ09J543r9MYaEBuClu2ciPVb4WKmQzl+txB8+P4jKhnZRRnSIH56/bwZGDZNfvPJyUQ3++K8DuF7dIsoID/LG0w/OwIQR8o+uFl2rx+vv7sO1skawLLmti29vaCc40AtPPjINUyaIe8v70/XrTXjjT7tQWFAnyvD398D3fzAds2a5TmwdSN0xRFSoy3Ydp+oegY1r78cA6V9RnouREfSKZEOh01aHrVU/gcnRLLmzbqznFMwK/xVYIm0R77C34MPSl9Bqa5DMiPcagbWDfgGNxLyPTnsn3ix6HXWWWkl1SAgI4j0T8KOhP4VeYtl3o8OMl/L+imvGKkk1WwgIhnpF49WUJ+ChkbaImx1W/Dz7I1xuL5fEYEAQ6xmKdzIfha9O2iJu5ez48fmvcLrpmqQTgSxxJtF+OOEByd4RO8/hx6e3Yl91kaT5LCEIc/fGF9PWItLTV9LfcDyPn5/cg82l+ZIZgW4eWDd3NYb4CBc/6ytKKV46dgifX7ksmeGjN+DzpSsxLDBIMuNPx0/g/fPSuoqyhMBdp8PHK5dheJj0XIa/HTuLvxy9vcFnf2IIgV6jwftrl2LUIOm5DJ+cyMLrO49JZmhYBu/evxgTE2IkMzaeysVrGw7dyLlxxWAYgj89tADT06TnMuw5W4CXP9wLSqnLY8zOEyAEv354LuaNlZ5Ee/RCCV58Zyd4nro8/kt6/tcvHp6FJTNcH43u1fms63jhtS1wcLzk4m5PPDwNq5ZKP52Se7kSzz+3ATabQzLjoe9Nxr33iddiGkjJWb//T5+akSuLoxmn6x6TZYQAQJVxO/Jb/yyNwXVge/UzMDlaJBsIAFBmPI7jDW9LKqZl4cz4uOxVtMkwQgCgpCsbG6v+Itipt6+snBXvlLwl2QgBnKmJxcYi/LPsb5IYdt6O1/LfR6mxWnLhOAqKa11V+O3VD2CX0I3XwXP4Ve7nyJVohADO7rllpgb8PPsjWDnXTdd4yuP5rM0401QquSwBRymqzK34wZnPYHK4bjhFKcULF/Zgv0QjpJdRZ+7E/Ue/RLtVWhPH35w/ItkI6WU0d5tw794NaOo2SfqbN8+dkmyE9DI6rBbct+1r1HRJ69j9/vkLko2QXobJZsODX29GeZtwR9W+WnfhsmQjBHB6s6wOB77/xVYUN0rrdrrt0lXJRkgvw+7g8NSn23GlWrjzd18dvFyC36w/5EzKlMjgOB4/+/cuZJVK64R+Ou86Xv7XXvC8ayMEcCaI8jzFy//aizNXyiUxsguq8at3doLjeEk1SGgP5w//OoCj50skMQqK6/DL17bcyDmRqr9+eAR7D1+RNPf69SY8/9wGWK3SjRAA+Ojfx7Ft6yXJ879N3TFE+uhq6zuwci2yjJBelXZ8hjZLnst5F5o/QZe9QQGDoqBjF2rM2S5nHm/cjCZrjexqqRQUeR2nUNjp+gF9qPEAKs0Vihi5HZdxrvWsy7n76k/jamepbAYPHnkdJThQ73oR2F+fg3MtxeBlVi7iKY+CzmpsrDrlcu7hukIcqLsqm8FRHqVdTfj3tZMu555qKMem67my6y9xlKLa1I53810zshpr8XGB/AcZRykau43406XjLucWtTTjrxfPKWK0Wyz47cmjLudWtXfgjeOu3++t4imF2WbDKwcPu5zbZDTht3uPKGJYHQ68tOOgy7md3Ra8stn1vFtF4exL88v1+1xubLptdrz0xX7ZRemcizjFLz/d63KxdDg4pydEQfUwCoqX/rUHDof485TnKV792x7wvNxfoVOv/WMvLFbxTQelFH94ew84jldUmO7N9/ajy+i64/qbf9oNm82hqMLzX987iNZWo/wX9w3rjiHSIxvXjmrjHkVGCAAQsLje+bXoHDvfjYKO3bK8FLcy8tq3is5x8Hacb92vgsHgbMtu0Tk85XG48aDi8vYEBIcaDojOoZRiR430nd7tDGBH7TGXP9aNlacU9wuioNhYedplWfl118+BVcjgQbGh/KJL786nxRcV99nhKMWGshyYHeKt6D8rzFLF2Fp6FR1W8Qft53k5soq43crYV3YNjSbxB+2Xly8rLnPOUYpT5RWoaGsXnbcx64riNvI8pciprkNRQ5PovG2XrsLuYgEWY5Q2tiKnok503t5LRTBZbIp+6TylqGvrwpmiCtF5R7NL0dbVreh+UQq0dXXjaE6p6LxzueWob+5U/JmYum04eEbc23iloAblVS2Ke+3YHRz2HRb3NpZea0BBQa1iBk8p9uyW7m38tnTHEOlRZdc2xUYI4KxFUm3cAxvXLjinuPMAHNS1m12MUW48CaNd+AGV33EG3Zxyi5eCR6kxF83WWsE5uR2X0W5vV8GgKDdfR6VZ+AF1peMaai1Nijv5UADV3Q242in8gCroqEZRV42qfkGN1g6cay4WHC/rasKFlnJwKhjtNjMO1RUKjteaOnG4tkRVnx2zw46dFVcFx1stZuwoK1TFsPMcNl4Tdj932azYWJh/W6KlXH11VdgzaXU48OXlPFUMhhB8dTlXcNzB8/jiQo6qUuosQ/DlBWEGpRSfn8pRfP0bjDPCixKlFOuOZavqe8QyBF8dzxGds/5QtqpGkgxDsP6QuKd44/5sVZ2HCSHYsDdLdM6WXdnqTqdQYNOOLNHN0/bt2Yr73wAA5Sm2bs1S3Vl4oHXHEOlRtXEv1DTJAwAKO+rNJwTHSzqPQE1HYCcDuG4UdivndZxW2RHY6RXJ7xAOnVxqu6CqWy/gLN1+sfWC4PjJ5mxVnXQBZ+n2k805guNHG/MGhHGkUXjBOFh3VfEOv1cMCPbVCi/g+2uKoPZ7RQDsrBQ2RA5WlcKhwggBnN/dHWUFguMnKitgcbjO6xETTym2FwsbbeeqqtFlVb4ZAJxeke0Fwozcmno0G83qGDzFrivCjOL6ZlS3dqh6YnE8xb68YsHddU1rJ4prmxV7EXoZJ65ev3E89la1d3Uju6RGldHG8xTZxTVo7+o/z8liteNMznVVnYcppSipaEJdU4fgazh2uljVAk8B1Na3o6xCOD/oyOGrt52OkavWFiMKC4Q3mv8N3TFEemTlWlRfg4CBjWsVHDdzrVBr7DBg0C3idemyt6na4QPO0InJ0f8PDnDWDFHTrRdw7jCMji4RRpeqhxPgfHh02oUZbTb1sVKO8qLXabWaVBuGPCiaLcKMFotJtbFDATRZhJNJW7rNqhkARBNWW7rNKu9Uz3XMwkaA2JgctXULJ/e2mgaG0WW1geP7/521qjR0euXgeJis/YfkWrsGhkEp0GHq/34NFAMA2gSu1WFUFvbpT60d/TNMZiscjoHxMrQLMDiOh8mkzoi+wWgfuPs+ELpjiPRITVjmPyLgqfCOTmxs4BgD8T4AToTBDQCDUnqjXkf/jP7rhchiADcKRwkxVCIAOE/eCI6p9CL85zpi94oOyAJuF3kfHOVVG1ROhvD9cPD8gPS1ELvnQgu7XIldR+w7N1Ach4rd/e3X6p8xUPcKEP7cBzJE4BC4lloPgjTGAL4PAYPm27hX/y3dMUR6pGXkFVzqTxQctKzweWmDyJh0Bg89K/xa3TXq3wcAuLHC1UM9NZ4DEP4h8BCptOqpcVcdNmFA4ClSEMxL46Yq/t3L8BZpyCe3IJmQxOqVeGv1sk/k9Cc/vfBr9dbpVeWH9MpXL1yJ2FuvV+0FA5yvVYwxEPIUuY63SHl3OdKxLHSa/usG+bgNDAMAvARer7ebuqrRN1+rf4aXx8AxhK7l5TFw98pbgOHpOYDvw7P/16vTaaDVyu9R1j9j4F7vQOiOIdKjILexEjr0ulagIVNwLNJ9hITuueKi4BHuJtyhMtYzZQBCARwGeyQLjid4Jar2VnDgEO8p3N8nxSdO9cLHgUeyt3BBpXS/WNUeCx4U6X7ClRdHBsSoZhAQjAqIERwfEzxIdYInQwjGBQszxoZGqzZ1WEIwIVy4Idjo8EjVPheWEEyIEq62mREerioxspcxLjpKcDwlPAQ6Fz1kpDBGDRLuDDw0NBAeevkNJ/uKIQRp0aHQCCRYRgf7uuw140oEQGyoP7wEDJFgX0+EuugDI0VhAd4I9u1/8+TprsfgiADVmw5fbzdEhfr2O6ZhGSQlhKkuCe/upkPsIOGifGnDo1QztFoW8QnqSv0PtO4YIj2K8V6pMjzDIMCQCS+d8KKU7LtI8bFapwgC9XEIcROuJJjpP0OlIULgrwtFrGeq4Iyx/uOhJeoegj5aH6T5pguOTwzMgDurzmr30LhhYlCGMCNomOq+MQZGizlhIwTHxwYNRoS7r6pPhCUEd0ULv4/0gHAk+ASpW8QpsCZOmBHvF4iRwRGqFnGOUtybmC44HuXtg8nRMapyUThKcX+q8PsI8vTA3Pihqhn3jUgXHPc2GLA4bZiqUxocpbh3jPD7cNNpsXxUqioGTynunSDM0LIsVk1MU/WZUwBrp2QIhtwYhmD19AxVRgIhwOoZ6YILNCEEK+dmqMoTYQjB8lnp0Ig091u2cITiY7WA814snJMGg0H42br0rpGqGCzLYPbs1AH14AyE7hgiPfLWDYG/YQSU3xIesd53i87w0oZikMdYFV4RilS/ZaIzPDW+SPEZr+pUy7jABaKxeneNO8YHTlDMICCYHjxTtK+NntVhTqhyBgMG80InQidSrl7DsFgWOe5Gfxe5YgmD+REj4SHSc4YhDO4ZPEbR9XsZ8yJS4acXDs0QQvBA/CjFHguWEMyIGIowd/HQ4QPDRigOnTCEYFxotMsy7w+kZSj27jAAUoNCkBIs3kflvhHpihkEwGA/P4yOFC/BvmZUmqpTGiFenpgcFyM6Z/VYdQwfNwNmpYiXYF8+TnhDIkVuOi3mj0wUnbNoYrLLjs1i0jAMFk4Q9uACwJyJw2Bw0fXWlRZPF78XUybEqwp58DzFknnponPGjBmCgADlmyeO47F4qfDG6b+lO4ZIHyX5P6XIm0DAwlefglCPqS7njg58CISwkHvckoCFv24whnpNdzl3WsgqsEQr+70wYOCvC0Gmn2vG3NAF0LN6RQxfrS+mBE1zOXdJxDR4atxkGwoMGHhpPbAoYorLucuixiFA7yU7H4UBgTurxz2DJrtmRI9AhLufIoaOYfFovGvGkkHJiPMOlL3TJwA0hMGPUia5nDs3Jh6pASGKGAwIfpbpmjFl0GCMDo9U5rEgBL8Y75oxMiICUwbHKN7p/2LqZJdJtclhIZifHK+Y8fNZk1wuzjFBflgxOkWxN+Gn8yYK5qD0KsTPC/dOU75w/XD+OLjrxRtQ+nq64XsLlBvrDy0YA18XISR3gw7fX6W8z8rq+SMQ7C8eQtJpNXjsQde/1f5ECLBwThoiw8WbKrIsgx887vr53D+DYNq0YYiLk9/w8JvWHUOkjwIMGRgR/Bqcj05pv24CFu6acIwNfReMhHBFkCEec8JfBgEjeREnYOGhCcCiqNehYVwnXgUbInFvzHNgCCvZ+8KAgbvGGw/Fvgw96zouHKQPwo/inoaGaCR7LRgwcGPd8HT8z+GpcW3VB+h98OvUJ6BjdLIYelaHV1N+CH+d6zb3vjoP/HnEI3Bn9ZINBYYw0DIavJHxEMLcXLeg99Qa8P64++GjdZPOAAHLMHh39D0Y7Om6G6tBo8XHU+9GkMFT8iLOgIAlDN6bsAxJfq4fTlqGxUezViDS00cyw/lLInh7ykJkBgvnPNx4TYTggwVLMMTPXyYD+MO02ZgQJZyDcmM+IfjL4oVICg6WbSi8OGMaZsYNkTT390vnICNKfk7KT6dPwMJUcS9Cr361dDomDB0k2xh5dNoorBwjzdvxk8UTMXN4nOwt2prJ6bhPohHz6OKxWDA+SSYBWDghCY8uHitp7t3zM7Fidrqs6xMA00YPxRP3SDMwFs0ZjrUr5RlVhACjMgbjpz+YKWn+9BnJePgR15usmxkEqamRePa5hbL+7tvSne67/ajBfAIXGp4FR3vLUffXdp4FBQd/QwbGhLwNHet60euralMW9ta+BBtvAvpt1v4fRpA+AQsifwd3jetFr68qTUX4rPx3MHNd/bZqB5wLNw8eIYZoPBDzK/jo5LUgrzCV4y8lf0aHo8MlI1gfgp/GP4MgfbBMRh1eyf87mq1tYED6PSHyH4Y/Xkl5HFHu8pKxqs3NeCbr36jubhFhOP89QOeFN0Z8D/Fe8tqc15nb8cNzX+BaV2O/rdr7Mvx07nhvzFqk+U5lZusAALbsSURBVEnvwgoATd1GPHJ8A6601Qsyer9t3lo9/jFpBcYEu168+6rN0o1HD23GxcYalwwPjRbvTVuCaZHyWql3Wq14Ys92nKyuFGUAgJ7V4M+z52PukKGyGGabHT/duQuHSssEGb0cDcvi93NmYWmyvMXS6nDgl9v2Y9eVon5bzveKIQQMIXhx/jSszpTe6RUA7ByHVzcfwuaL+S4ZAPCzBZPxwCR5Xg6O5/Gnzcfw5fEclwwKih/OH49HZ4+WdRyb5yn+tuUkPtl9AaSnhX2/DIaA8hQPzB+FH941UVbyJqUUH285hw82ngIhwoze97hyTgZ+fP9UWaEjSim+3nYJf//oKEAhGM7sZSyYlYqnfzhLNP+kP+3ckY2/vLO/p1GgAIMl4DiKadOT8OwvFkCnMjwlR3LW7zuGiIDsvBHVXbtQ1vkljPbym8YIGIR5zMRg79UIMIxQXPvAznejpPMQcts2o9V2/RYGQYznBKT6LkWEuxqGFXntp3GmeRdqLWW3jSd4ZWJs4DzEeaaDUXhc1s7bkdV2EYcaD6LMdHtJ9STvZEwPnok0n+GKGQ6ew9mWXOysPYb8fsq2p/jEYWH4FIzxT4WGUXZiwcFzONNciK8rT+FSW3+MaKyImoApISnQMcp+0BzlcarxGr68fh6nGktuM3eSfMKxNnYMZocnw8AqSwjmKcXphnJ8VnIRh2puZyT4BOHB+FFYOCgJ7hpxt7mQKKU411CFTwuysbei+LaH7RAffzyYlIm7hiTBU6vs+CSlFFn1tfgsLwe7rhXfVu8i2tsHDw4fgeWJSfAWORbsSrl19fg8Owc7Cgpvq3cR4e2N+0akY3lKMvzclJ8gKahvxJcXcrH18lXYuJuT4kO8PLF29HAsz0hBgIfwUXBXulbfjK/O5mLLxXxY7DfXAfL3dMOacelYMToFwd7KcwzKG9vw9clcbD6TB/MtTeB8PQxYOXE4VoxPRaif8pMw1U3t2Hw0F5uP5cHYfXMBL083PZZNScWyqWmIDPJVzKhv7sS2Q7nYfPAyOm9pNOdu0GLx9DTcNTMN0WHyNn991dxixM79udi6KxtttxQp0+s1mD8rFUvmpmPwIHmbv75qazNhz+7L2LrlElpabi56qNWxmD07FYuXjPivhGPuGCIDKEopOmxFsHCN4HkrNIwXvHVDYdAIJ91ZHU3osObBwXeCEC30bAB8DZmCoRtKKVqsZTA5muCgVugYD/jrY+ChEf6Cdjva0GS5CivfCQYsDKwfQt2Gg2WEF5ZGSxXabE2w8xboWXcE6SPhK+IB6eaMqDYXwNLjUXHX+CDKPRlakfBQvaUOzdYmWDgL3Fh3hBhCEKgXPo7WzZlRZiyGydHVU1vEC0M8E6AXOTFT292EekszzA4L3DUGhBkCEeYmxrCgsLMUXQ7nD9VL44EEryFw1wgvLLXdragyNcHEWeHO6hHm5o9BHsIMC2dDbnsZOuwmUErhpXVHik8MvERqjNSZ23Hd2Ayjwwo3Vodwdx8M8RL2Flk5O7JaK9BmM4GjPHy07kj1i4SfSI2RenMXrnU2o8tugYHVItzdG/E+QYKGrY3ncKm5Ei0WE+yUg4/WDWn+4Qg0CC9ejWYjitub0WmzwsBqEOLuiST/YEGGg+dxobEKTd0m2HgOPjoD0gJCEeIuvHi1dJtR2NyEDqsVepZFsIcnUoKEGRzP41JDLRpMRlg5B7x1BiQHBiPCS/g50t7djauNTei0WKBlWQR5eCAlNEQwtMJTiuy6OtR3daHb4YC3Xo9hQUGI8hH2jnZZrMiva0BHtxValoG/hztSw0MEd9yUUuTW1qOmoxMWuwOeej3igwMQ4y+cS2Cy2pBf3YCObgtYhoGfuxtSokKgFThSTClFfk0Dqts60W23w1OvQ2yQP4YECz/jum125Fc0oMNsAUMIfDwMSB0UCq3Arp5SiqLaJlQ2taPbZoe7XoeYYD8MDRN+/ljtDuSX1aPT5DQUvD0MSI4NhV4rvAm4VtOMioY2mCw2uOu1iAr2RXyk8Pfd7uBw9VodOowW8JTC28OApCGhMIgcjb5e04Ly2laYum1w02sRHuSNxMEhwt93B4fCknp0dHaD43h4eRmQMCQU7u7Cz+qq2jZcr2yG0WyFQa9BSJAPkoaGCn/fOR5FRXVobzfDYefg5WXA0PjQ/+rpmDuGyH9BlFK0WS6guvNLNJj3A7cc09Uyfoj0vhuRXqtg0Cg7w00pRZMlHwXtm1HWdei248Y6xguJPkuQ4LsEXtowpW8Ftd0luNS6C/kdR2+rsKpj3JDhNxcj/ObBXy8vNNFX1eYKnGw+iPMtJ2CnN++sdIwe4wKmYmLQTIQalDOqzHXYX38cRxpPw8rfXMZax2gxJWgs5oROwSAP17kLwowmbK85jd2152Dmbt69aQmLWaGZWBI5AfFe8kIsfVVjbsPGygvYWHEBXY6bd28awmBueCpWDRqDVN9IxZ6zOnMH1pdlYV3ZJbTbbi7HzRKCORHDsHbIKIwMjFLMaDQb8dW1y/i0MAvNt5STZ0AwK3ooHkgYgXGhgxQzmrtN2FB4BZ9eyUb9LV14CYBp0bG4PyUDk6OUJ6u2W7qxMT8fn2TnoKar87bxSdGDcH96OqYOHqz4NEiXxYoteVfx6fkcVPbT6XfMoEjcOyodM+KHQKOQYbLasCOnAJ+fyUFZ0+2tKTKiw7B2XAZmJsVBJzNs0Ktumx17soqw7kQ2imtv76GSHBWCeyalY3Z6vKiBISar3YEDF4vx1ZEcXK1ouG08PjIId09Lx5zRCXDTKfMy2h0cjlwowYYD2cgrub1jcUy4P1bPzsCc8cPg4abMy+hwcDh5oRSbdmUhJ7/6tvHIMD8sX5CBuVOT4TmARdq+Kd0xRL5lOXgjLjf8GK2W0zfyOvqX84GREPBLRHuvlcmw4Gjdr1FpOiHKIGBAQTEy8DGk+q2V9UB38HbsrH0b+R3HwIAFL8rgMTHobkwOksfgqAMbKj/G6ZYjN/I6+lPv2PTg+VgSsUZWSIejPD4r34RddYclMWaFTMLDsatFjxPfKkop/l22F59XHARDGPACRctYwoCjPGaFZuLniauglRHSoZTi36XH8V7RQWc8WygO3MOYEZqE36avkB3S+bTkPH53eT9AROLZPYxJIUPwztjlssMt60su45dn94GCumSMCY7C+9OWw0dmuGVbSQF+dmQvOMqLMJy5IMODQvHv+csQ4CYvFHKg9Bp+vHs3rA6H4HHpXkZCYCA+vmsZQjzlhUJOllbgyU070G1zGuj9cXoZMf5++Pc9dyHSV16O2sXr1Xji8+3oslgFMtSc+R48pQj39cYHD96FwUHywhRXKuvxxPtb0WbqBiHot45HLyPI2wN/f+wuxIcLex37U3F1E578yxY0d5huXOt2BsBT5+mcd59aiuQYeRvBirpW/Oj1zahv7gQjkLvSew893fV44+klyEiQt/GobWjHz17dhKraNmFGz2PWoNfitV8swej0GFmMb1t3DJFvUQ7eiAu198JoL8GtXhAxDfH9EWL9HpfIsGBP9Y/RbCmQVRAt1e8ejAr6oaS5HG/HV5Uvo8KUJ6tq6gi/eZgb9kNJxghHOfyr9M+40pkDOU1eRvlNwL0xP5BkjPCUx7slH+Nks3Bn31tFAIz0H45nEr4v6VQLpRRvFn2NXbXnZDAIRvgNxR+GPyI5h+Wtq3vx6fVTkhkMCNL8ovCPMQ9KNkbevXoM7149Lp1BCBJ8grFu6oPwkJhf8kH+Ofz20hHJDJYQxHj5Y/O8+yQbI19cvYwXjh8QXFT7Y4R7emPzXfcgyF04tNVXWwuu4pm9ewEZjEB3d2xecw/CvKTlTOwvvIYfbdoJgEJKiRCWEHi7GfD1g3cj2t9XEuNUSQUe/3QreCpsFN7KcNNp8cVjqzE0RFo+w6XSajz2j81wcMJG4U0MhkCn0eCjJ1ciKUpaPsPV8no8+uZG2BwOSfVUGEKgYRn8/afLkREnzQtaVt2MR179CharXTKDMARvPbMUY1NjJDGq69rwg1+sg9FkkcQghIAQ4DfPLsbkMfIStL9NyVm/7xzfVSFKKS43/Fi2EQIApe1/QZ1xp6S5x+tfk22EAEBe2zoUtm+VNHd33V9lGyEAkNW2B+datkiau6X6C1zpzIbcTnMX2k5hb500xoaqnbKMEPS8mgutl/F5+WZJ87+qPCLLCHEyKLLaSvB20SZJ8zdUnJdlhADOcvO5bVV46bK097G1IleWEQI4PSZFHY348ZmNgpn6fbW3skiWEQI4q4qWd7Xi0SMbJS1ix6vK8avjBwBI/2ZxlKLW2ImHdm+GnXNdUflCTQ1+tm8fqExGs9mMBzZvgsVhdzk/v64BP92yC5RKM0J6GZ3dFjy4bhOMVtedWa81tuCpL7aLeo36Y3Tb7Xjko81oNwt3He5VdUsHnvxgm2QjBAA4nsJqd+AH/9yMxg7XXbGb2o144i9bYJVohADO766D4/Gjd7eiplm4u3iv2ru68dQfN0k2QnoZPM/j2be343qN647uJrMVP33la8lGCOBcdyilePmNHSgqvT0U9b+oO4aICrVbL6HVchpyjZBelbS+CeqiD0mLpRjlxqOKS8NfavkAHBV/CLZaa5DbflBx/5jjTetg4y2ic9ptrTjetF/R9QHgQMN2mB3CLeQBoMtuxNYa5YzddYfRZhN/QHU7rPjkujIGBcWuunOoMd8eK+8rG+fAe0UHFTF4UOyvu4LiznrReRzl8UbeIWUMSnG8oRTZLbfHsfuKUoo/XDqqqG4tRynON1bjRO11l3NfPyfPmOrLuNLcgAPl11zO/fNpeUZhX8a11lbsLCp2OffdE2edRzEVMGraO7E596rLuR8cvQA7x8kud87xFC1GM9afz3M595Mjl2Cx22VX4eUpRafZii9P5Lic++XhbHR1W2WXO+cphcVmx2cHLrmcu+VwLlo7zLKr11LqzPf4ZMd5l3P3HM5HQ1OnIgbPU3yy4Yysv/uu6o4hokJVnetUNcqzcvVo6T4pOqegfbNKRgcqjOIP6kttu1U147PzFlztEGecaj6s+PqAM6xzrlWccaTxjGCuhhRRAIcaxBecAw1ZsPKud7dCYsBgR634w+NgfT467a53nkJiCYOvK8QfgsfqrqHR4nrnKcb4ovSi6JyzDZUo72pTVXr+06Is0Tm5jfW40tyoivHJlWzROaWtrThbXa24vD0B8HG2+Puo6+jCkeIyVc0LPz2fI+qlajN1Y09ekeKy8DylWHc2Bxwv/BszWWzYej5fFWPD6VzYHA7BOVa7A5tO5CnuucLxFNtP58NksQnOcXA8vj6Yo/gz53iKA2eL0N4l/DumlGLjriy5DuIb4nmKkxeuobG5S9kFvkO6Y4golJVrRqNpv8pGeSyqOr8QYXThWtc+VQwCBgVtwuEAO29FTtt+1c34LrRsFxzlqAMnmpR7XACnN+FY4z7BBy1PeeypO6Kasbf+KDgqfL83V51Q1VyOB4+dNWdh44SNmS/LzyrufwM4vR3bq7NhtAt7qT4rvaCy8RuPPdVX0WoV9lJ9Wpilurnc4eprqDEKe6k+z89RzThXV41rbcJu9C9yL6tiUABXm5pwuV7YS7U+O0/xSaFeRmVbO86WVwnO2XzpiuouzU1dJhwrEvZS7bxUAJtd2IiQoq5uKw5cLhEcP3ipBF1m12EoMVlsDuw5VyA4fiqnDC3t4h5YV+J4ih3HrwiOX8qrRE19u6qu1oQQbN9/WcUVvhu6Y4goVKc1T6URAgAc2izCLsJWawl4F2EVV6Lg0Wi5IriAN1urYOOV7757KY3Wcjj4/ncYLdYmmDj1VnuLrQlGR//X6bB3odnWpprRYe9Cs/X2o4yAMyxTYW5Q9eAAABNnQaW5qd8xnvK40l7Tb2VXObLyDpR0CcePLzVXqV6UHJRHXuvtRxl7da5BPYMCyG6uFRw/U6ueAQCX6oUZ56qrVTMYQnCpVphxoVK5x6VXLEOQVS3MyKqolZTXIyYNwyCrokZwPOd6rapuvTcYZcLvI6e0BhpW3dLFMAQ5pcKMy8XqGZRS5BYLM/IKasCy6u4Vz1NcvioeIv1f0B1DRKHs3O31A5SIo2bBPBEbr9x13lcUPBy0f2PDwg0MA3AWQOtPZs7c778rY/S/SzE5Bo5hFLiW0aHWYHPNMDlsqrw6fSUU3uEoD4uIR2YgGABgsqvbtfaqwybs2emyqWcwhKDDKszosIjnP0lldIow2rvVMwgIOi3C96PdrJ4BOD0WQuo0W1QbbTyloowus/zckNsYPEWnSZhhNFsH5FcoFpoxmqyKmqzeqo6ugflc/5u6Y4golJQGd1LkzP/o/8vIYGAYgPDrZcnA9R4QupZGRn0OpQw5NUBcSSPEUFg6vl+GwLWUFqfqT1oBBtPTcvGbZABQ1dpdKmMg7helVLDqqCu+HIkyRMYGjjFAn4cYQ8Oq/mYRIv5atRpWccfhG4ye6whJw6p/HwCg04rfq4HQQF3nv6k7hohC6Vjl/QH6Ssv6CsaG3TTiLaElM4g7WAFDxGOAGM4y8/3XY/DSyCu2JCZPTf/1GLy1yntb3CofgWt5a9wHbAH3E2AYGC0MzMAYoP66/gtpEULgq1PeN6WvAvTCxboCDMr7pvRVkEG4zofUGiBiogACRQqbBXt6qP7UHTwvygjx8lAd0uApFe1VE+QtvTOzkCil8PcUZgR4uoNRbRwS+HsJM5xj6t4HwzAI8BZh+LirDmMxDEGgr/D308/XXbVnhyEEgf7Kewd9V3THEFEoX0MGtIzyhkiA0xsS5rlYcDxAHw9PheXg+zJivWcJjvvrwhGoj4aaHzYDFoneE8AIeCV8dH6I8YhTdTKHAYNk73TBHjQeGjcM900Co4pBMMwrDr66/ovvaBgWk4JSJRU9ExIBwRDPcIS79d/HgxCCueFqGUCkuz8SvIW/O4ujU1UvSoEGD2QECFeQvCs2WfXi6q3VY1yYcHfgpUOHqTYSDKwGU6MHC44vTkhU7abXMAxmDhkiOL4gKUF1jgilFHMS4wTH56XGqw6bcJRiXmq84PicjATRUzWSGDyPeRkJwoyR8QPCmD1S+H3MHJOg+ORPr3ieYuYY4fcxdVy86jAsTylmTkpUdY3vgu4YIgrFEC2ivO+BmltIwSHSa7XgOCEMkvxWQI2RQMEh0XepCINgtP9iKD5DBoAHh5EBC0XnTA2aq+pkDg8ek4Nmi86ZFzpVsJy7NAbFvLCponOWRk4Ep+qIMMWyyImiJyRWx4xRxQCANTFjRRl3DxmhalFiQHDvkFGioZE1Q9PVfK3AEoI18ekwsMLhw5UJKapCQCwhWJGQDC+dcMn6JcOGwU2jPITJEoKFCQkIcBfegc8dNhTeBuX9Q1hCMC0+FuE+whUspyQMRpCXcg8SQwhGD44ULfU+Ki4SUYG+ip9YDCEYFhksWl01KSYUiVHBisMzBEB0sC8y44WN6Jhwf4xIjATDKH/2Bvp6YHy6sIEbEuSN8ZlDwKpgeHkaMGXcd7e6qlTdMURUKMJrpeK/JWDhbxgPd2206Lyh3vPBKMzjIGAQZEhCgF78i5rsOxU6RlmXRgIGgfpoRLoNE5033HcUPFhPRclZBAT+ukAkeqeKzkv3S0aAzlcxw1vjiVH+6aLzhvvGIso9SFGIhgBwZ/WYHpIhOm+YTziSfSIUeRMIAC2jwaLIdNF5sV6BGBsUo9grQgjBysHi7yPMwxszo+IUM3hKsTZenOHv5o4lccMUMzhKcW9yuugcT50OK1NSVDHuGz5cdI5Oo8GaEWmKPUgcpbhvZLroHJZhsHZcumIGTynWjhNnEEJw72Txz8wV455J4gwAWDMjXXZRtr66e3qGy+PSq2ZnKA6dEEKwclaGSyN5+YIMxZ4XhiFYMmc4dAqbBX6XdMcQUSGDJhiJAS8q+EsGGsYTwwJfcTlTz3pjYshzyhjEgEkhv3Q5U8cYsCjiadkEAgKWaLA44mmXP2oNo8EDg5+QzQAAhjB4IOaHLnvNsITBj+MfltUgr1cEwE/iH3bZB4YQgheS1kLDsAoMHoJfJt0DA+u6R8uvh98FA6OVzaAAfp12F7y1rnNAXstcAE+NXtHC9ErGPAQZXMemfz16Fvz07ooW8RdGTke0l6/Lec+Pm4wQD2X5Dz8ZOR6JAa4brf103HhE+fgoYjySmYmMMNddpB+bMBpDgwJkMwiA1RmpGD9YfFMDAPePH4GUiBD5DALMT0vAzCTh0E+vVoxLxai4KNnfK4YQTEmOxcKR4psaAJg/Zhgmp8XKZzAEIxOisHyy+KYGAKZkxmH2uATZ9V1YhiA5NhR3z3VtkI0cPgiLZqXJ9u6wDMHg6EDct3yMvD/8juqOIaJSUd53I87vp5LnE7DQMl4YEfoh3LVRkv4mznsOxgb95MYVXDMYaBkDZke+CV99jCRGovd4LAj/Uc/CJ42hITqsjn4ZYW6uH04AMMw7DQ/EPAEGjKQF1snQ4JHYnyDWUzjWejMjDs8kPAoNkWYoOI0pBj+JfxipvtJirQneUfhd2sPQMRpJnhHS8z/PDluNCUEpkhhxXiH46+j74cZqJS0avTOeT16IeRFpkhjRnv74aPK98NLoZS1MP0+dgdWxIyTNDfPwxhez7oav3k0W40dp4/HwsFGS5ga6eeCLhasQ7O4hi/FwWiZ+nDlO0lwfgwGfLV+BCG9vWYzVKSl4btJkSXM99Tp8uOYuDA7wk7XALkhOwMvzpktaMA1aDf7xwFIkhAXJYkxNiMXvls+WxNBqWLz98CKkDQqVzCAEGBUXhdfvny8p1MYyDH7/6HyMTIiSvIgzhCB1cCjefHyRpFNKDEPw4qNzMDEjVhqghxEXFYS3nlkKg8510jkhBE8/NhMzJkjP82AYgqgIf7z50gq4u0lrPPld1zfWfbe8vBy/+c1vcPjwYdTX1yM8PBz33nsvXnjhBeh00m7e/0L33V7VG3ejpPVNWLhaELD9FDtjAfAIMIxHYuDLko2QvqownsD5pvfQZa/pl9H7b2FuIzAu5Bn46oST/IRUaryEA3X/QoutCsz/Y++84+Mor/39zMw2rXq3LMu25N57xdjGmN4xvUPKTbs3NyG/VHJDcnPDTSC9h4TeTDO9mWKDsY1777Ykq3dpe5t5f3+sZFw0s7M7gkvZLx/xAb1n32eb5j1z3vOeg4J2EqPvd0MyxnHu4K9S6jL/R9qng969PF3/EI3Bo8jIp+R19P1umHsEV1bczLBM/SQ/fUY1/zzyBEf8iRhD+ELVVYzLSX6f9ZC3gd/tf5bdnppjLeyPV9/vhrlL+cboS5hZYM6ZOl5HvK38YtdLbOqsNmQMdRdy+/hzWVSafOLaUV8nP9n6Ku+3HDnWXr4/Rrk7l+9OXsp5Q8YnzWj0e/jxB2/wdv0hpH7atfdxB7mz+e60RVw+wpzDdrzaAn5+/N6bvNHbO0aPUZyRybdmzee68cbbJf2pOxTkzrff4eUD+xH9MPpa0ednZPCN2XO4ZVriLYCT5Q2F+Z83VvHCrn2ovQ3O+mPkuJx8ad5MvjR/VtKRgUAkyt2vrmbF5j3Hmv4dT5GkeD+TTKeDm0+bzlfPmJN0Lk44GuN3L63h6bU7iMT6YfT+f4bDzrULpvD18+cnfYw5qqr89fm1LH9nO8FI9NjzPp4B8WO0l58+mW9eviDprQxV07jvufU8/uoW/KEIssQJTQml3n/ZFIULF07gm9cuIsOV3Mk3TRM8tmIDj67YgM8fRpalU7aFJElCkSXOWjSOb35hCZnu1HOKPg4ls35/ZI7Ia6+9xvLly7n22msZOXIku3bt4ktf+hI33ngj99xzj6k5Pk2OCIAQGh3BtdR5HqUrtAlV+JGwYVfyKMu6iCHZVyfMCUnMEDQFt7C3+xkaA5uJaUEkZBxKNlXZSxmXdxm5DuuM+sAeNna+xBHfFiJaAAkZl5LFuJwFTC84jxLXcMuM2sBh3m1bye6erYTUeOGfDMXNlLxZLCheSoXbGgPgiO8orzevZkPndgK9BcncNhcz8idz7qBFjMweCEYTzzesZXXrdnyxIEII3DYXswvGcOmQBUzMHW6pfDdAta+NJ2s38HrjTjzRIJoQuG0O5hSN4Jrhc5lZYJ1R6+vkiSNbeOHoTrojQWKaRpbdwayiYdwwYibzS5MPhZ+sOl83jx/YzrNHdtERChDTVDLtDmYUl3Pz2BksGlxluf5Is8/LY3t38PT+XbQHAkR7GZOLB3HzpGmcOWyE5fojbX4/T+7axfJdO2nz+4moKm67nfElJdw0ZSpnjxxpuTZIpz/AM9t38+TWXTR7vURiKhl2O6NLirhh5hTOHTcKh4UkWoCeYIjntuzhyQ07aOrxEo7GcNltVBUXcO3cqZw/eQwuizkIvlCYlzbtZfn7O2jo6IkzHDaGFuVz9WmTOX/GWNxOa3f2gVCEVzfs46nV2zna2k04EsPpsDG4MJcrF03mgrnjyMqwtnCHIlFWrt/P0yu3UdPURTgcxeGwMagwm8vOmMwFCyeQk5larl2fItEYq9Ye4NlXtnLkaDuhUBS7w0ZJYRYXnjWZC86cSJ7BseNPkj4Rjkh/uvvuu/nrX//KkSNHTNl/2hyRkyWEMLU4+CN7aPU9Q1htQtNC2ORs3I5xlGRdkbBeiVlGd/gQNd6X8EebUUUQu5xFjqOKqpxLyLAlZgAJOZ3hOnb3vEFPpImoFsShuClwVDAh71xy7CUDwugIt7Ch8x3aw82EtSBOOYMi5yBmFyyh0DkwjPZwB6vb1tAUbCakBnEqLkqcxSwsPo2yDOPj1OYZ3bzR/AFHA834Y0FcipNSVwFnlc5mWGaZ4WP7OInfKy8vNWzhoLcJXzSES3FQ6srlgvLpjM4ZGEZXOMCKo9vY092ENxrCpdgpcWVz8dDJTMovHxBGTzjIs9W72N7RiCcSxqnYKM7I5JLhE5heVJ7w8WYY3kiYFYf2sLmlAU8kjF1WKM5wc9GIccwZNGRAGIFolOf372VDYz2ecBibLFOY4eb8kaOZXzE0oZNnhhGKxnhl337W19bRHQyhyDIF7gzOGj2ShVXDB4QRicV4fc8h3j9cS08ghCxBnjuDJWNHsGhUZcLS6GYYUVXl7V2HeW9vNT29FWFz3S5OH1fJkokjEjp5ZhgxVeO9XdWs3nmYHn8QTYPcTBfzxg3jzKkjE0ZOzDBUTWP9zhpWbT5ElzeIqmnkZmUwc1wFS2ePTrh9Y/b6/knUJ9YRueOOO3jttdfYtKn/rp3hcJhw+MOyux6Ph4qKik+tI2IkIQQdgZdo9PwLX2RH77aKRjxYGf9DlpApdJ9Pee6XyXQkHw4XQtDgX8X+7kdpD20/gSEhI4iHFcszFzM2/yYKXRNSei3Vvg1s7niKusD23nlFLyOebyIQVGXNZWbhlZS7kw+5Axzw7mBV64sc8O1A7mWIXoaEhIbGmOwpLC6+mFHZqTH2eQ7wctNrbO/eidT7vE9mjM8Zy/ll5zAlL3GyW3/a66nhmfq3Wdu+41jYWOtlyEioaEzMqeKyIWcwv8hcrscpr6OngYer3+Ptll1oQiAdz5AkVKExIXcI1w1fwNJBk1K60B3wtPKvA+/zSn28kdqHjHhysSo0xuUO4qaRc7m4IrXTIIc9Hdy75wNWVO8iqqm9z/1ExujcYm4dO5MrqyanFEk56unm7zs38vSBXYTV2AkMRZKJCY2q3HxumzCDa8ZOTqnKaqPXwz+3bmb5np0EotFjW0THM4bm5HLLlOlcN2kyToPjynpq9fm4b8MWlm/fiS8cOYEhyxKqJijLzuLGmdO4YfoUMuzJF83r9Ad4YN0Wlm/cQU8ojNI7L3Dsv4uzMrl+9hRumDuNrBSiHD2BEI++t5Un1m6nyx/sl5GfmcE186dw/enTyHUnH4HwhyI8vmory1dvp93j75eR63ZxxemTuX7JdPKzki/+FwpHeeqtbTz55jZaOr39MjIzHFy2aBLXnTuDorxPf1Gyk/WJdEQOHTrEjBkzuOeee/jSl77Ur82dd97JT3/601N+/1lzRDQR5UjHHbT6nyLudBjVjIgnXY4q+jVFmRclwVDZ1v5bDvYsT8iIOyiCWSU/oipHv8DayRJC8H7b/WzseKLXATFixMcXl36NaQWXJsV4q3UFrzUv7zfX43j1jZ8/6FrOKLkkqQX2taaVPHrUPOOSwReybEhyjFeb1vLHg08eczj0GRIagkvLF/GlqkuTOgX0WuM2frrzaQDDWiR9jEuGzOR74y9JeFroeL3ZuI9vb3waTQhDRp8zd8GQifxi+iU4klhgVzce4SvvPkNUUw3rnfTlGSwdMoo/nHYJGTbzC+wHTXXc+sazhGLRhAyA0wYP4+9LLyXLZI4bwLbmJm554Vl8kbApxoyycv554aXkuswvsHtb2rjtyWfpCgQT1oaRJBhfUsK/rrrMsArryTrS1sltDz1Dm9efkCFLElVFBfzrpsspzTG/wNZ39PDlfzxDQ6cnYXE3WZIoL8jhH19expBC85WbW7t9fPWPz1DT0pWYIUsU52bx129cTuUg88UrOz0B/vPXz7L/aGvCI8aKLJGblcEfvrOM0UMTn976NOkjdUS+//3v88tf/tLQZu/evYwd+2HSXENDA4sWLWLx4sX885//1H3c5yEiIoTgUMd3aPM/h/lqT/HL1JjiP1PoPtcUY0vb3RzyPJX085tdcieVOReYsl3T+i82dixPmrGk9BtMKTDn8LzVsoJXm59ImnF+2XUsKbnElO0bzW/xcO3jSTMuHnwBV1ZcZpKxnt8eSJ5xyeCFfGXkMlO2bzbv5IfbkmNIwIXl07lj4jJTTtXq5oN8bd3jvbEiswyJsweP4zezrzAVGVnfUssNbz2OJswzZEni9LJK/rnoSlP5H1tbG7nqpSeICc10RVNFkphRWs4j511pKmqxt72NZU89RlhVk2KMLy5h+eVXm4paVHd2sezBxwhEjJ2pkxmVhfk8eeM1ZDsT5000dHu44u+P4QmFTNe8UGSJwbk5PPnla8l3J44otHl8XPP7x+nw+pNiFGZnsvyb11GUk7hQW7cvyI13P05TpycpRrbbxaPfvY7BhYnXIF8wzBf++wmONneaZsiyhNtp5/6fXM+wQfmmHvNpUDKOSNKxzNtvv529e/ca/lRVfXiSorGxkTPOOIP58+fzj3/8w3Bup9NJTk7OCT+fNbX4HqPNv4LkSk7GbQ+0fZNQtC6hda3vtZScEICNrT+jJ3w4od1h77qUnBCAt1v+THPwQEK7g95dKTkhAK80PcZh356Edod8R1JyQgBeaHyZbV07EtpV+xv5/YHUXsfzje+yqnVLQrv6QAf/tf3JpCubCODFhi08X9//dunxag16+eYHTyblhMQZgtcb9/DQofUJbbvDQb646mmESO4vRBOCdxuP8Odd7ye0DUQj3PL6M0k5IRAvGrapuYG7N72X0Dasxrj1hWeJJOGE9DF2t7Xys/feSWyraXzxyRVJOSF9jCMdXfz4tTcT2goh+NpjzyflhMSfm6Cx28P3nn3NlP3tD72clBPSx+jw+vn2Qy+Zsr/jwdeSckL6GN5AiG/9/QVTvWfuun8ltUk4IRA/MRMMR/nWb1ZYLl3/aVXSjkhxcTFjx441/Ok7ntvQ0MDixYuZMWMG999//wA0Q/p0SwiNhp6/k1rJdoFAo9n3aAKGYF/XQykyACQO9iR2YjZ1PJly7xgJma2dzya0W932Usq9Y2RkVrclvkC93vSmJcbLTYkvtC82vJdStVeIRxOeqX8rod0zRz9AQ0u5ovrD1e8mvNA+VbOZqKamzLj/0LqEpeufOrKDQCyS0isRwP37NxFWY4Z2zx3eS3c4lFJvFw3BI3u34Y9GDO1eP3yQFr8vpTL6mhA8vXc3XUH9FvIAq4/UcLS7J2XGq/sO0uTxGtptrG1gf0t7StU/VSF492AN1e1dhna761vYWtOYGkMTbK1pZE99i6FdTUsn7++pSZlxoKGNLYcaDO1aOr28ufFAStVYVU1Q39rNuh01ST/2s6CPzDPoc0KGDh3KPffcQ1tbG83NzTQ3N39UyE+8ekJrCav1pN6AQ6XF+ziaCOtadIZ30xM5lDJDoFLjfYmo5tO1aQ/X0BjcnXLvGIHKAc9qArFuXZvOSCv7vFtT7h2jobHXs4WuSLuuTU+0hw2dmywx9nkP0BBs1LXxx4K82bLBMCfESALBIV89B7xHdW1CapTn6jZaappWF+hgS2e17nhUU3msepMFVwdaQ17ebT6oO64JwYP7N1lqA9YTCfHa0f2640II7tu12VKjvGAsxnOHjKNtD2zfaumos6ppPLV3l6HNI5u3WW5c+OT2nYbjj36wzVIvFEWSeGKTcdRw+drt1hiyxPK1xoyn39thnfHudkOb51btSPmGo4/x5JvbUn78p1kfmSOycuVKDh06xFtvvcWQIUMoKys79vN5VYtvOfHCZqlLFV46A2/ojh/xPIdkmRHhqHel7vju7tcsMwQae3v0Q8MbO1elHHH5UBIbO/VD3Gva11vufikjs7p1je746tYtxITxHXoiKci83rxOd3xVy278qr5zaoohyayo36A7vqblEJ1hv0WGxPLqzbrjH7QcpcHvscSQJYlHD+pvZe1sb+Fgd4elT10CHt67TXf8cFcnW5ubLDmGAnhkp/7C1+Txsqa61lLjQk0IHtuqv4B3B0Ks3HvIUhdaVQie3ryTmNq/Ix6MRHl5yz5rDE3w0pa9BCPRfsdjqsaKtbssM97aepAef6jfcSEEz76zw9JnrmqC9btqaOk0jlJ9FvWROSK33HILorcq4Mk/n1cFo4fhlIqryUohFNO/O/ZEavup6pqcJBR80Xrd8e5I44AweqL60bH2sPXImQR0RFp1x1tDrSlvy/RJQ6Mt3KY73hhqR5EsOoZoNAT1GfWBDpQU+uucwBAaR/360aNaf2dKjf5OZAhqfB36DJ9xCN+MNCGo8erPU+vptswQwFGv/jxHe6wzABq8Ht3rZV13j0UXOq7OQJBQtH9HubEn8ekVM/JHovQE+1/A2zz+Y1VXrSgSU2n39O8o9/iDBML9OynJSBOC5s7+HeVwJEaX13grzawa23oGZJ5Pkz7fSRsfs1Rh7Y4S4vkVqsG2SUyzzgCIiYDuWHgAGAJBRNNnRLRwyls/fdLQCGv6F4eQGrIcEQEIqPqMoBoeEIY/1v+FPM6PWAoJf8jQj6oEYhHLVVXjDP3cioFiBGL6i44RPxkFY/pRLn/U+qIH8YVPL98lMEAMAH+k//ckoPP7AWWEB5ChM9dAOCEfJyMQGrj35NOitCPyMUqRrBetEQgUWX8eu8FYMrJL+sfhnHLio3KJJCHhkPXrGDhll+WtGRkZp6x/dNCluAZkAXcr+gy34hwQRqZNv66EW3EMiLOTadM/yum2OQbk7jjLpl+DY6AYboNaIpkG/AFjpFAsrD8pkqR7TDjTREM1s9IrPJaZRL2UlBkWS7ufMJdLh6Hz+9QY/f+NuJPsLWPIsFiK/tOotCPyMcptH4XVHBGI4bJV6o7mOKoGIH8jRpZBv5p8R4VlJ0FDJd+hXwK82JW4bXoiCaDYqZ+TVOYalHKiap9kZAa5SnXHh2SUEBPWQs8KMkPd+qXlh2UWJzyNkpAhyVRl6ZfIr8wqspSoGmdIVOXoF22qyim0ND/EC7WNzNWfpyrPep0GGajM1Z+ncgAYEjA0N0+3tsuw/LwBcG+hODMTp06/msF5OZZ78gBku5zkZvTvSBfnZuG0Wb0mgtNuo1ineFpupstynxmId/wdXJDd75jLYacwdwBu0CQYUmK+QNtnRWlH5GNUafa1WM0Rscl5FLiX6o5X5VxqOX9DkTIYmnWW7vjEvHMtb5vIksK43DN1x2flLyb100V9EswqWKw7elrR3KSqlvYnDY3FJafrjp9ePA2nbO1uSUXj3EH67eoXlY4n2yBiYoohNC6rmKM7vqB0BCWu/i/C5hmCaypn6o7PKh7C0CxrC6yG4PpR03XHJxSWMqGgxFK+iwbcOG6a7vjwvHxmDx5i+UTLjZOm6o6VZGWxeGSlJYYsSVw/Xb8DcW6Gi3MnjLZ8aubqGZN0y++77DYumTXB8omWS2eO123Op8gyy06baJlxzozRZBuUlL9iyRRLW4uKLLFgStVnstx7IqUdkY9ROc7ZvdGMVL+sMqVZ1yFL+qHGAtdY8p1jU2ZIKFTlXIzNYEujwFnBEPcUC3VEFMbmLMGl6Besy3MUMj5nhqUaHxNzZ5Fr1y/NnG3PZk7BLGuMnPGUGkRE3DYXZ5XOscCQGJM9jKos/eiRQ7ZxWcXslBdXCRieWczkPP0omCLJXFs109ICXpaRy2klI/SfhyRxyxh9R8WMCpwZnF0x2tDmlonTLUV3Mu0OLh4x1tDmpslTLZ1osSsKy8YZ95e6Ybo1BsBVU4z7Ml03e7Kl0yaaEFw907gv01XzrDFUTXDVfH2HCmDZAuuMKxcaMy5ZNDH1S3sv44ozp6Y+wadYaUfkY5QkSZTn/hup3elLyJKdQdnXJbQcl39zygyQGJl7RULLmYVXWoiKCKYVJC6Nvqj4Qks1PhYVX5jQ7ryys1LOr9DQOL/snIR2F5cvJNUbJQ3BlRX6kaM+LRs6B5uspFgqD26qWpSwxPsVw6bjVGwpOyNfGDU/4R3jsqpJ5DhcqTPGzk7YmO7iqrEUZ7hTiiZIwC3jpyXsaXN21UjKs3NSZEhcO2ESOU7jKNeCymGMKCxIiSFLEpdMGEtxlvF2wrSKwUwqL00pmiBLEmeOHUFFQZ6h3ZjBxcweWZESQ5El5oysYHSZcQfxiuI8Fk8egZwiY8KwUqZUGpeeKMrL4ty5Y1OKiiiyxPDBBcyZMCzpx34WlHZEPmaVZF5Jadb1ST4q7iCMKf4LTlvi3ImKrKWMzbsxBYZg3qD/JscxPKF1ZdZs5hXdlCQjrrPKvkWJS//OuE9VWeO4eHBqjEsH38LwzDEJ7YZnDuO2ytQYVw65jEl5iTsWV7hL+c6YZD+PuK6qWMppRcZ3YgBlGfncNfVa6O0UnIyuqJjDBYP1txr6VOTK4i/zrkWSkiNIwCUVU7iualZC2xyHi/sWX4kiy0ld0GUkzqkYzVcm6G9h9clls/PguVdgl5XkGJLEgvLhfGvGaQlt7YrCgxdfTobdnpSjIEsSM8rK+MGCRaZs/3nlpeS4nEkxFElibEkxd56d2MGVJIk/XXMxBW53Uo6CIkkML8znrssSO+oA99x4AaW52ckxZInS3GzuvtFcb6yf3XQOw4rzk2bkZ7n5zZcvNtWL6Xs3L2XU0OKkHB5FlshyO/ndty5LyVH6LCjtiHzMkiSJqoI7Kcu+pfc3iRK1FCTsjC3+G/kZZ5jmTC78BuPyb40zE3zMEgoSMvNK/4eKLP38k5M1p+h6TivuYxi/jj7G2WW3MyHP3MUJYGHxBVwy+GaAhNsbfeOXDr6FBcXnmWYsLjmd2ypvQkIyzbi6YhkXDT4/CcZ0vjv2RmRJNs24dug53DI8cVSnT6eXjOOX067HJskJ64r0LVzXDjuN28dfZLqL8NziSv4271qcii0hoy+qccWw6fx8urkLOcD04iE8vOQaMpTEi3ifI3HhsHH8/rRLTDsWEwpLWX7BNWQ7Ei/ifa9j6dAR3HvWpQkjLn0aUVDIk8uuIT8jIyHjWIffIUO5/+JlpprqAQzJy+XxG66mJCsr4WuXen+mlpfx0LXLcJs8eVOak8XjX7ya8twc04yxg4p5+NYrydY5ZXKy8jMzeOgbVzO8ON9U9FCSoLK4gIe+cTX5mYmb6gFkZzj557euZHR58bHnaSRZkhhcmMsDt19Nicm8jQynnT9/9womjSgzzSjKy+LeH17D4OLPX5Jqn5LuvvtxKpnufZ9GdQbeotFzP57wWj50SDQkFAQqsuSgOPNyynJuw21PHEHoT82B9ezvfpzmwNpjzoJAQ0JGoCFjY2j2uYzJu5Y856iUGHX+7WzpfJYjvvW9d+NS79zyMdbonEVML7ic0gzj/Xs91fj3s7rtZXb1bAT6Wsx/+DpAYlLubBYWX8DwzNQYR3w1vNa8kg2d8XLpfW3s++7/BYJpeVM4t+wsxuUkjrb0z2hgRcMqVrVuRhUasiShCYGMhCC+3TMrfzyXDlnE9HzjPAR9RgtP1KzllcatRLUYsiTHGb1XeFVozC4cyTXD5rOgJDVGja+Dhw9/wLO12wirUZRehnQcY2bhMG4cMYezBo817YQcr3pfN/fv38QTh7YRiEWxHWPEP/+Y0JhWVM6tY2Zy4bBxKTGa/V7u372Fx/ZtxxMJ98uYVFTKrROmc+mI8bpJl0ZqC/h5cPtWHt25ne5wCJvcyyB+YxLTNMYUFnHLlGksGzsBu5L8KZLOQJBHt2zj0S3b6QgEjzEgvtjFNI2qgnxumjmNKyZP0D0pYyRPMMTjG3fw6IZttHr9/TIq8nO5cc5Urpo5WTd51Ej+UISn1u/gsTXbaOr29ssoy8vmugVTuWreZNwpHP8NRWI8+/4OHl+1jfr2nn4ZxbmZXL1oKledPtkwQVVPkWiMF97dxfKVW6lt7kKR5fgWsIh3242pGvk5GVx55lSuOHMqeVnmnKlPk5JZv9OOyCdAwWg1bf7nCMca0UQIm5yN2zGW4sxLscnGr1vTQkiSgiQZ3934og3UeF8hEG0iJoLY5UxyHSMYln0+TsXYE1e1MJIkIydgeKOt7OlZSU+kmYgWxKlkku8Ywvjcs3Db8hIwIiBJKAkYnmgXmzpX0x5pJqQGcSkZFDnLmJm/iBy7MSOmxYsO2RKcYumJeljTvo6mYBNBNYRLcVHiLOL0ovkUOPWTX+OMGAINu2x8gfRE/bzVspGjgWYCagin7KDUlc/S0tmUuoyPscY0FQ0NR4LX4Y0GebVxG4e8zfhiIVyKnVJXLucNnsbQTOM99ZimogoNh2wzXOD90TAv1e9kT3cTnmgYl2Kj1JXNRRWTGWFwVLePEdVUXIrdkBGIRXixZi87OproiYRwKgolGVlcPHwC4/L1jxzHGVovw/h1hGIxXqnez6aWBjyRMHZZpjgjk4tGjGVSkf7RaYj3hQmrMTJsxq8joqq8fvggHzTU0xMOYZcVCjIyuGDUaKaWlhk+Ns5QybAZv46oqvLWwSOsP1pHdzCETZbIz8jg7DEjmTmk3PCxmhCEojEy7MYMVdNYfbCaNYdq6QmGkCWJvAwXZ44dwZzKigFhaJpg7YFa3t1bTU8gXswv1+1i4bhK5o8eZriFIYQgGI3hstkS2m3YX8fqnYfp8cebIOa6XcwbP4wFEyoNnU4hBMFIDJc9MWPbgQbe2XyIHm8QVdPIyXQxc1wFC6eNwDYAR5c/qUo7Ip9hCaHSE3yLNu+D+MLrEcQXV1nKJN99IcXZN+F2GGepJ2ZotAbfp8bzBO2BD9CIV/pTpAzKMs9keM415DknpXQH+iFD0BDYwu7uFdT5P0AVfQwnw7NOY0LeZQzKsM6oDezhg45X2O/ZRLSXYZMcjMqeztzC86jMtM6o9h/mnda32Nq9iYjWx7AxPmciZ5QsZVzOBMvHhA/5jvJK47u8376VkBbuZShMyh3NBYMXMj1/guUy74e8TayoX8ubzdvxq6EPGXnDuKLiNE4rGo/N5LaEno5423iydgMv1m3HG+tjyEzMG8J1lXM4s2wcdjn5O+njVe3t5PFDW3imZgfdkXjVW0WSmZBfyk2jZnF+xTjTWx96qvP28NiBbTx5cAcdoUAvQ2Jsfgk3j5vORZXjEia0JlKTz8vju3fwxJ6dtAX8iF7GiPxCbp40lUtHj7NcdKzV52P5zl08sX0HLT4fgnhUYHh+HjdMncrlE8aT7bRWg6PTH+Dpbbt4YvNOGj0ehIhvrVTk5XLdzClcNmUCeTp1RsyqOxDiuc27Wb5+O3WdPccYZbk5XDV3EstmTqQgS7+Aohl5g2Fe3LiH5e9t52hb97GoVnFuFlecNonL506kOPfzd/RWT2lH5DOqTv/zNHT/nKjaQnwr5+R6IfHfuR2TGVrwK9wO4+N//anJ/za72/+XoNp8bIvoePX9LscxmslFPyHflbzTc9T/Ae+3/A5PtNGQkecYxsLS2ylzJ07WPFnVvl280Pg32sMNyMinnL7p+12Bo4yLBn+ZkdlTk2Yc8R3i4dr7aQjWJ2AUcs3QG5iap1/fQp9Rz58OPsphf10CRi63VV7O6cUzkmbU+Fu4a89T7O45iiLJpxRHk5HQEBQ4svjqyPM5b3DyR2zr/J38ZPtzbOqoQZGkU46d9jHy7Bl8feyZXD18dtKMxoCHH2x4iTUt1YaMbLuTf59wOreNnp20E9oa8PGDta/xdv1hZANGps3BVyfN4WuT5yV9iqIrFOSH76zktSMHkXq37o5XPK0cMmw2vjBlBt+aPT/pLSNvOMx/vfkWL+2LdyrujwHgsCncMHUq/+/0BUlvGQUiUX7++js8v2MvqtA4eaXpY9gUmSunTeL7Zy1MessoHI1x98vv8vTGeGO9/hYzSYo7opdMH8/3L15sOjemT1FV5Q8vvs8T720j2tsX52RO32d87vQx/OiqJWSZzI35LCvtiHwG1dzzZxp7fmnSWkaWnIwovo9sV+IM/z5V9zzOro67+PBSl4CBwsxBv6XUvdA0Y1/PK7zb/Kve2RMx4umjZw7+MSOyzSfq7uxew9N1v0X0/pOIAHDZkG8wLX+Jacb27q387fCf0IRq+vjvdUNvYnFJ4pMKHzL28/M9fyOmxUzXvbh5+KVcPsR8wvGO7hq+s/VfhLQomsnqrLdWLuULI842zdjT3ciX1z+IPxY2XQH2xqp5fGf8uaYdhQM9bdzwziN0R4Kma2tcXTWVn88837SjUOPp4trXHqc16DPNuKRyHL8+/ULTFUobvB6ufe5JGrwe04ylw0fwl3MvwmHSUWjz+7lh+VMc6eoyVVJfAuYPG8o/Lr0El8ny9d3BELc+8gz7WtpMMWRJYmp5Gfded5luOfiT5QuF+bf7V7DjaLNpxpiyYv75xWXkmcz7CEaifPPeF9hw8OgpjpQeY3hpPvd+/QqKcqxXWv00K5n1O31q5lOgdt9jSTghABqaCHGo7VYCkT2mHtHge6XXCQFzNUg0NGJsbP4WXaHtphg1vvdZ3fyr3oXbDEMgUHmr8b9p8Ou3dj9eh33bearut2hophyEPmfl2fo/sd+z0STjIH87/EdUEUuqBsljRx9iY+cHpmyP+Or4+Z6/EU3CCQF4sOY53mxeZ8q2xt8Sd0LUiGknBOD+6jd58ugaU7b1gS6+vP5BfNFQUmXoHz6yjnsPvmvKtjng4aZVjyblhAAsP7KNe3a8Y8q2IxTgutefSMoJAXihei93fvCmqa7jPeEQN7zwdFJOCMBbNYf57tuvm2IEIlFuffpZqk06IRD/S113tI7/fPkVVC3xZxiOxfi3J55jv0knBOIRmW0NTfz7Uy8SVRNXho6qKt985CXTTkgf40BTG1974DnCOh2Hj5eqafzgoVfZeLDOlBPSx6ht7eJrf1sxoI3wPutKOyKfcEXVDuo6f5zCIwVCRKjt+E5ihuZje9udJF8WMO4obG27I+FFMKaFeafpF0nO3yeNd5r/By1BzxZVqDxd9ztSLQ3/TP0fjiW06kkIwX3V/0hq4T5eD9X8i5Cq30m3j/GHg4/0Jr4m/1r+evgJPFH9Ds19unvvs3EnJAXGnw68SFuoJ6HdXTtfxh8LpcbY/xa1vo6Edv+7/S06w4GUqoz+fd86dnc1J7T79Zb3aAl4k2YI4JH9W9nYWp/Q9s+bPuBoT3dKjOcO7GVVbXVC239t2sT+9vakGZoQrDx0mFcPHExo+/imHWxvaEqJsbb6KM/tSHzz9PzmPaw/dDTpJomqEOyoa+KJ9TsS2q7cdpB3dh5OnqEJDja289A7m5N63OdZaUfkE64O/3ILvWM0gtFdBCLGf3QN3pdQRZjUFnANf7SWjpDxH90R7yoimi8lhkDgj7VT5zeOJuz3bMIX606xUqogqPrY41lvaLXPu4e2cGvK1VjDWpgNncYRi4O+Wqr9DSmXIVeFxtstxu9Vta+F7d3Vlkqdv9BgzGgIdLGm9UDKZcgVSeKpWuMoVXvIzyt1ey0wZB49ZPzd9UTCPHN4l6XX8dBe44heKBbl8T07LDEe3LnV0Caqqjy8bXvKHY5lSeLhrcYMTQge3rg15TZREvDQhm2GNzZCCB55f2vK1YoR8OjarWgJSr4//u62lHvHaELw5JrtxFRrPbk+L0o7Ip9gCaHS5n0QLDWYU2jzPmzAEBzxPGZh/nhyaY3ncUObXV3PWOrYKyGzq+tZQ5sPOl62zFjf/rKhzTutb6bcNybOkHir5Q3DC+0rTe9aOgEjELzUtNowavNc/TpLDA3Bivp1xDR9J/np2k2WTiSpQvDM0c0EYxFdm6eObDMdNu+fobGiZieeiH6UasXhXUTUxKF8fYbg1dr9tAb0o1QvHTqAN6L/Os0wVh+toc6jH6V6+/AROgKBlBmaEGxqaORAe7uuzbrqo9R3e1J2bwVwoLWdHY36Uaoddc0cbOlI+XMXQEOXhw8OH9W1OdjYzrbqxpSdNoAOb4DVu46k/PjPk9KOyCdYgcguomqTxVlUugIv6o76ojX4ozVY6XQrUGn2v4Um+r9Y+6NttIX3W+rYK9CoD2wkovV/IQ2qfo74d1pm1AX344129jse02Js796acv+bOEPQFGqkNdzS/7gQrGnbklQ+RX9qC3dS7W/QHV/Zss0yozvqZ2dPje74qw07LV3IAfyxMBs69LccXjy621JUByCiqaxqOqzPqN5raX6IL+Jv1h3SHX/p4H5LnVshXhzttcP6WyevHDhgmaFIEq/uP6A7/tqeAykVfTteNlnm1T36jNd3WGcosszrO/Xfqze3H7TUrRfihctWbtN/HWl9qLQj8glWTNO/80hGmgigiXC/YxE18R68GQk0opq337Gg2jUgDIBQrP87voDO71ORP+bp//eqL+UtmZPljfbPCGsRojoOXbLqifb/eWhCwxsNDgijK+JPaSwZdYb152kPWWfISHQYMFqDfsufuiLJdIb0oxGtAZ9lp02RpGM1TfpTm89vmSFJEp1B/e9Ohz9gKqHVSEIIOv36jE5/AEthMEDTNDp9+u9VpzdgKZoXZwjaPQPzN/BZV9oR+QRLJEjOTG6u/he31PNPzDMSJZkmI43+GeoAMlSd9yTVBNV+GTrP12qU4njpbZvEzywNjEMVM3jfrbaoP8Yw2P4ZEIYUr8Cqy7C4sPYpajCPET8ZGeUkDBTD+HVY/zwExs81pln/9gogavBeqZpmJUh8TEaMtD5U2hH5BEtJUN49iZmQpf6rCtrk7AFigF3n+TqVgWM4dZ5vhjJwFQ0zlP7P/7t1fp+K3Lb+58pQnEl3z9VTlq3/z1yRZFwJStCbVbZNv0dGlm1gijrlOPQZOXbrDE0Ich36dSVyndaqfh5jGMxT4LLea0QTglyXPiM/I2NAvlm5BpVWczOS6wTcn2RJIsegIFiOyzkgW0y5bn1Gdob175UE5GVa/+58HpR2RD7BcjsmIklWv8gKWU79CpLZ9hED4IzI5DrGocj9//Fm2weRoRj3aDGjXHsFLp2+OFm2PAocg0j+CPKJyrEVkGfvv0eKU3EyJGOoZUchU8lkkKus3zFZkhmXU3Ws42uqcsoOKrOG6I5PK6hCsfjnb5MUxudU6I7PKaqyXHZeRmJq/lDd8dMGVVpe+ABmFeszFpQNt8zQEMwu1X+v5g0ZanlxVYVg9mD9z3xOhf6YWcU0jdkG88weNsRylCqmacweps+YVTXEcnRHFYKZlfqMmSMrrEeQpPg8aSVW2hH5BEuRsyjMvJIPO/OmIpWS7FsNGE6GZV9h6bQJaFTmXq87Kks2JuZfZnkBn5i/TNehkiSJuYXnW5pfQmJO4fnIkv77vaT0LEvbGjIyC4uXYDdoWHfh4EWWEjBlZJaWziND0b+rWzbkNFQLSbeKJHPWoKnkOvSjRNdUzra01aRIMmcMGktphn5k8PoRMywtfIokMb9kOJXZ+o7y9WOmWmLIksSUojImFpbq2lwz3lp/KAkYkVfA7LJyXZtlE1Pr7Hu8yrKzWVRZqTt+wYSxuE1WX9VTvjuDpWNH6o6fOWGk6cqoenI77FwwVb/z9PxxwyjNsxZltSkKl8xJvs3G51FpR+QTruKsmzi1p4x52eQicjOMS34Pz7nS0uJqk7IYnHmOoc3Y3Auw8nVTJAejc4zLik/NX4Iipd7MTEJmeoFxCfZZ+XNwyalfBAWChcWLDW3mFEwhR2frxow0NM4rO93QZnbhKEpdeSkzVKFx+ZD5hjZT84dSlVWcsvupCo1rKucY2ozLL2VqweCUI0iqENw02rh3zrCcfE4fnHpURBOCW8cZ9wAqzczinKqRKTMEcOuU6YYJlrkuFxePG5syQ5Ykbpo21TBy43bYuWLaREuMa2dMNixX77ApXD13SsoRJEWWWDZrIhkGPWcUWebahVNTTlhVZIkLZ44lx6LD9HlR2hH5hCvDMYYC9+Wk+lGV5/0QKcHi7LYPYVj2VaS6rTGu4D90t2WOMWyFTCm4JqX5AWYU3oIjQY5GhpLJGSVXpcw4vfgysmx5hjZOxcml5VekzDijZCmFziJDG5uscHPlpSnNLyGxpGQOFW7j1vWyJPP1URemzFhUPJFxucZhZ0mSuH28sYOqJxmJecUjmF2of/fdp+9OWZLSV1eRJKYXDuGMslEJbW+fdjqSlHxMT5EkJhSUcN7wMQltvzlrHjZZSYlRlZfP5WMS331/dc7seOv6JBdYRZIoy87mmsmJIze3zZ1Bdgp5HIokUZCZwQ2zpia0vX7+VAqyMpI+YitLElkuJzefnrg55LJ5kxiUl50Sw2m3cdvS5Bs3fl6VdkQ+BRpa+EuynHNJ9uMqy/02hVnmFs2JRd+jxH06yV7RR+TezPBccw7G7KIvMiLbfGO5Po3LvYipBdeZsl1YvIwZ+eabvvVpcu5ClpRea8r2jJKlLC1NfoGdkjuNKyvMMZaWzuPqivOSml9CYnLuaL420hxjSelkvjbqgqQZ43Mr+K+J5hinl47mh5Mu7H2sOclIjM4p5TczrzF1RzqnZBi/mn0RUhIMRZIYnlXAvadfZaoh3dTiwfxx4cVIkmQ6+qJIEmWZOdy/9EqcSuJI3djCYv5+3sUosmx6EVckiSK3m4cvvsLUlsjw/HzuvexS7Ekycl0uHrxyGTkGybB9KsvN5t5rL8Nps5mOjCiSRIbDzn3XLaMws/8k6+NVmOXm3i8sI8NhN+0oKHLcQfj7rZdRlpc4Ly7H7eLvX7uc7AynaYYsSdgUmT99+VKGFueZekxa6e67nxppIkxtx3fpCqwgnjOit10jAxIV+XdSnH1zkowYu9rvotb7FBKKwdHe+IV7XMF/MCL31qTCl5pQ+aDtb+zoehIJWbcAWXxMMKPwZmYU3pIUQwjB262Ps6r1KRMMjQVFl3LWoBuRk0isFELwWvPLPNfwNCDpMmRkNDQWFS/hmqE3oBjkn/SnlxpX8a8jzxp2Eu5jnFEyh6+PvBa7nNz21EsNG7ln37PxVu06DEWSUYXGGSWTuGPCNTiV5PIAXm3YyY+3PUtU67+N+vGMBSWjuGfGVbiTPHXzZsMB/nPdc4TUaELG3JJh/PW0K8gxOC3Tn95rrOar7zyHLxrR7VGtSBKqEEwrHsw/z1xGoSvxwnq8NjTW8+VXnqc7HEJG6jdfqI8xrqiYBy68nNLM5PIZdjQ188UVz9ERCCBLUr/1RfoYIwoKuG/ZZQzJ7T9RXE/7W9r50uMraPH6EjKG5OXwz+sup7IwPylGTVsXX77vWRq6PLqMvt+X5mTxt9suY/Qg44jkyWro6OHrf1tBdWsXiiyh9nNEuY9RkOXmT/92CROGGkckPw9KZv1OOyKfMgUie2n3PUyH/ynESUXKbHIJxdk3U5R1NXalJGWGN3KEWs+THPWuQBUnFhZyyPkMz7maoTnLyLDpJ98lUk+knj3dL7C350WiJ1VLdcrZjM+7hHF5F5FtT/0PuivSysbO19nY8Toh7cTCQk7ZzcyCs5hdcA4Fzv5PsJhjdPJu2ypWt72FL3ZiCW+n7GRB0SIWFp9BWcZgCwwPK5vX8nLTu3SfVAjNIds5s3Qu5w06nWGZVhg+Xm7cyDN1a2kLn1gczi4pnFM2ncuGzGNMTuonL3oiAV6o28aj1etpDHafMGaTZM4rn8Q1w+cwMa885b15byTEitqdPHhgIzW+EwvpKZLEeUPGceOomcwoGpIywx+N8NyRPTywZxMHe04sCCgjcfawUdw0djrzBg1NmRGMRnnx4D7u37mVve1tJ4xJwJLhVdw8aRoLKoalnCsRjsV49cABHtyylR3NJ1b6lYAFw4dx07RpLKocnnIl04iq8ua+Qzy8YRtb6htPGZ8zvIIbZ03ljNFVpiJT/Smmaryz9zCPrd3GhiOnNhecNmww18+fypkTRuKwpZasq2oaa/bU8MR721i3r/YU13Di0FKuXTiNs6aOwmlPPU/ts6S0I/I5kKp5CUR2EtN6kFCwKYVkOqbo5oMIIQhFthFT69FEAFnKxmkfg8M+QpcR0wJ0h3cT7WU4lDzynBORpf7vhoUQ+CJ7CMbqULUAipxFpr2KTId+BnxMC9Ma2ktY9QASLiWHEtc4FIM6F53hQ3gjdURFALvkJttRQYHTiBGlPniQQMwLCNy2bMozRmI3yGtpC9XSEakjogVxyBnk2csodVXpLiwxLUaN/wi+mA+BhtuWxXB3JU6DkystoUaaQ/WE1CAO2UmBo5ihbn2GKlQOemvpifrQhEamzc3IrArcBrU8moOtHA00EFCDOGUHhc4CRmVVGjA09nnq6Yr4iGkq2fYMRmeXk203YnRyyNeAPxbCIdsocuYyIXe4boRJExp7e5poD/uIqDGy7S7G5paR59CPHLQEe9jracQbDcYZrmym5g/TPR4shGB3VzNtIR8hNUaO3cWYvBKKXPp5Rq1BLzu7GumJhnDICkXOLGYUVWCX+1+8hBDs7WqjJeAlGIuS43AxOq+IErd+dKI96GdbeyM9kRA2WaHI5WZWSYVhcub+jnYavR4CsSg5Ticj8wspy9LfWugKBdnS0kh3OIRNkinIyGDWoCG4bPoL5KGODup7PASiUbKdDqryCyjP1b/mekIhNjc10h0KIUkSBa4MZpWXk2GwPVTd0UVdVw/+SIQsp4PhBXlU5Ofp2vvCETbXNdAdjPcBystwMaOinCyn/rWhrqOb2o5u/KEIbqeDoYW5DCvSj7IEI1E21zbQ5Q8ihCDXncG0oWXkZOhHyho7PdS0duELhnE77QwuyKFqUKGu/edVaUckrWNSNS8e/9N0+f5FNHZqA6YMx1zys28lK+NcJB0HI5Fimp8W/0vUex7GHz21f0OOcwpDsm+kJPMcZCm1QloxLUy1dyX7ep6mM3xq/4YC5xjG5V3J8KwzsSVInNVnRNjnWcOmzhdpCu0/ZbzYOYyZBZcwPncxjhRPzsS0GDt7NvJu2+sc8Z/KKHGWsbD4XGYVLMClJBfS75MqVDZ37uC15lXs9pzKKHUWc27ZYhYVzyNTp+hZYobGxo59PNewho2d+04ZL3HmcemQBZw7aDa5jtSOQWpC44P2wyyvXc97rftPuQstdmZz1bA5XFoxk0JnagwhBB+01fLI4Y282bT/lNB+odPNdVUzuapyOqUZqdXbEUKwua2Bh/Zv5uXafaccBc5zuLhh9HSuHT2F8szktj+OZ2xva+bh3dt4/tDeU2pgZDucXDduMtePn8LQnLyUGAC7W1t5ZMc2VuzdS0Q9ces2027nqomTuH7SZKoKUq8bdKC1nce27ODZ7bsJxU6spOyy27h88gSumz6Z0SXJbbEcr+q2Tp7YsINnNu8iEImeMOawKVwydRzXzJnCuLLUI8ufd6UdkbQA8IfW0Nh+G5ro25bod0cbULHbhjOk+DEctuFJMXpCW9je+hViWg/o7prLgIZTKWNq6b/IdOhHYfpTR2gfbzbeTkjtMmDEf+9SCjir/DcUOEcnxWgL1bL86B14Yx1ISDq5EnFGhpLNlRU/pdytX4egX0a4mb8euouOSKth7gqAS87gi1W3Myp7QlKM9nAn/7PnDzSGmo/ljvQnCXDIDr41+stMy5+YFKMj7OGHO+7lkK8BWZJ1S99LSNgkhR+Ov56FJVOSYnRF/Hxr0yPs6K47ltehx1Akmf+afCkXlk9LiuGNhvj6uqdY31ZjyJCRkCT4ydTzuKYq8WmL4xWIRvjGe8/zdsNhQ4bSm2PwoxlL+MK4WUlt6YRiMW5/51VePrL/WM6FEeNbM0/j36fPTYoRUVXueOtNnt6zOyFDFYJ/mzmL/3fagqS2jVRN43/eWMUjm7fr5mIcz7hhxhR+dPbipLaNNE3wu5Vr+Od7m4wZvWOXTZ/AnZecabn+yudRaUckLXzBlTS030Z80TZTVEpBlrIZVvqi4XbN8eoMrmN7yxd7F1RzDEVyMaPsMbIc5hbx1uBO3mj4dzQRM9VZV0JGluycM+RPFLvMLeItocM8XPP/iGmRJBgyVw/9OcMyJ5tkNPDbAz8hrAZNde+Vev/5YtV3mJBrboFtDbVzx65f4o36TTMAvjn6i8wrNLfAtod7+Mbm39ER9ppkxL+B/2/sNZxbZu44Y1fYz83r/k5TsDupgmjfG38hVw+fa8rWEwlx7eoHOOxtT6oR3HcmLuHLY04zZRuIRrhm5WPs6mxJivGNifP5zrSFpmzDaowbX36aTU0NSRXBu2XiNH4yf4kpZySqqnz5hed5t7YmqWpDV4yfwC/POtsUQ9U0vrXiFV7fd9A0QwLOGTuK311+gSmHRwjBHSveYMWWPSYJccbpo4fzp+svwaakD5kmo2TW7/Q7+xlUKLKHxvYvg2kHAUBFE17q2q5B1RJ3sg1Eq9nR+tUknJA4QxUhtjV/wVTXX1+0iTcbbzfthEC8C7AmorzZ8G380ZaE9v5YF0/U3mHaCfmQofJU3Z10Rk5NwDtZgZiPvxz6hWknJM4QaAjur/4tjcGjCe1Daoif7/m9aSekjyEQ/PHgfRzyVie0j2gxvr/973RGzDkhcUZcv963nG1dhxLaxzSV/9j0cNJOCMAv97zEmtZTt6JOeU5C8I31TyXthADcs+ttXqnfbYrxH2teSNoJAfjTrrU8eWiHKdvvrXqdjU31SVfifWDXVh7YtdWU7c9Wr0raCQF4es9u/rJxgynb36x6n9eScEIg/t16bd9Bfv3OGlP2f1+9ISknpI/x3oEafvHyO0k9Lq3klHZEPoPq8PwWQYzk20eqxNRGenyPJ7Ss7b4XISKYd0I+ZES0Thq8iRl7upcT04KmHYQ+CTSimp893U8mtN3U+SJB1ZsCQxDTInzQ/kxC23Udb9MT7TK9eB9PUYXKyubnElq+2/YBLeG2FBjxRfPp+pcT2q1u3Ua1vznlsu33V7+a0GZN2wF299SnxJCQ+MP+N0gU5F3fVsP6tpqkHYQ+3bPz7YSP3d7RxJv1h1Jm/Grr6oS9Tg52tfPcob0p10T+zaY1hGJRQ5v6nh4e27E9ZcafPliPNxw2tOnwB7hv/eYUCXD/B1vo8AcMbbyhMH9f9UFK8wtg+YYdNHR5EtqmlZrSjshnTDG1GV/wVVIvCy/o8t2HMFgIomoPzf4XDOqMJJJGvedRNKF/EYxqQQ72vJgyQ6Bx0PM8MS2ka6OKKFu7Xk7aCTmesbPnTUKqT9dGExrvtr2Rcgl9DY1t3R/giXbrPw8heLUp9Ts2DY2t3btoDbUb2q2oX5NyKXUNwa6eamr8zYZ2j9esS5khEBzytrCr59QjnMfr4cMbLTXjqw90s67VOIL08P4tlhrltYf8vFl/auL38Xpk93ZLDG8kwstHTk38Pl6P79qZ8hFkiOeWrNi319DmqW27LDSYiCdOP719l6HNC9v2Eoml3ipDkiSe2rQz5cenZay0I/IZU7fvMctzxNR6AqF3dcebfCt6Iy6pK6p10h54S3e82ruS2Ek1TJJnBKj2vqk7vt+zlqDqtcRQRYyd3fqvY69nO93RxNtQRhII1nes0h3f5z1EY8h4gU8kGZk3W97THT/kbWC/96ilZnyKJPNCw/u640f97WzsOGKZ8WSt/p1vc9DD240HLDbjk3j08Ebd8a5wkOdr9lhulPfgPv0ogT8a4cn9Oy0zHti5RXc8oqo8tmN7ylGdPj24datulErVNB7ZtM0SQwh4ZNN2VJ0IkhCCR9aZ24bSkyYET2zYbsmZSUtfaUfkM6Zg+AOS3y45WTaCEf293Z6w/sXLrCRshvO0BncgWeo6DBIKbSH9u5j64F5kywxoCOrvO1f7D1hmCARHfKceke3TPu9hZIt/yhoaezz6d8e7eo5Y7J0cv3Pd3nVYd3x7V+JcGDOMzR360YrtnckldfbPEGxs13+uuzqaLbeQ13qP/Oppf2c7wZi1mwFNCHa2t+g+15ruLnoSbKskkgCqu7t0t2davD5aff5+x5KR0TyeUJjajm6Lnzp4gmGOdnZbnCWt/vSxOCLhcJipU+OdDLdt2/ZxID+3UrWuxEYJJRkmrEbVbpLPPzlZgqimH42IaD4LWz99BI2IASOs+ix1HY4zhGFUJaj6LS/gAH4DRiAWsBQ+75Mvpr/P7ouFkiqBryevAcMbDaW8LXMiQ387zhu1trD2yReL6I55BogR0VTCav/Ohseig3DiXP2/XwPJ0HNoPKEBZAT7fx3e4AC+VzqMtKzpY3FEvvvd7zJ4cOrlp9MyLynFgmGnzIP+PHqVVZMlyOjPI0s2Uu0G/CEhMSP5fqqnSjF4PxRJsfoyALAZMGwJuiubZhj0qLFJCgNx0N+mU6UUwC4rlqMVEH+u+oyBueTZDJyygWIYcQaSoVcjY2AZ/c81kEdi9V7Hx8FIy5o+ckfk1Vdf5Y033uCee+75qFFpATalFOsfq4qi6FctdCjFlrdNQOBQ9KsvZigFSJZfh4TLpl/eOVPJszg/yChkGjCybbmW99glZHLsebrjufZs3aJi5hkS+Xb9s/75juyUTuScrEKHPqMgxeqopzAM5ikaIEaBU78irVEZ+WSU63DpFusqcqdWEfdkOWSFLHv/Nx1F7oF5HbIkke/qv0VA4QC9DoACd/+MPHdGyv14TlZh1sA937Q+1EfqiLS0tPClL32Jhx9+GLeJL1w4HMbj8Zzwk1ZyynZfgvUcEUG2+0Ld0dLM8wZg20SlJPN83fHh2WcOCGN41pm64+NyT0ezyNBQGZejX3xqav7clE/l9EmgMT1/vu747MLkKor2zxAsKNIvODavaLxhpMGMJODM0ukGjJFkJNnV91SGxPmD9au4zioaSq5Dv2+OGcmSxMVDJ+mOTyksY1CGNYdHkSQurRyvOz46v4jK3HxLwTZFkrho5Fjdbb3ynBwmlZRaWsQVSWJpVRVOnT43+e4M5g6rsHT6R5Ek5g6vIF/HEXHZbSweW4Uip86QJYmJ5aUMzksX1vwo9JE5IkIIbrnlFr7yla8wc+ZMU4+56667yM3NPfZTUVHxUT29z6yyM85FkVPv8wAKbtdiHLZhuhYFGafjVKy0uZbJdU4ny6Ffhr3ENZlc+3BS39eQyHOMoNilX7681DWCMtcYS9szufZSKjOn6o4XOwcxOnuSpWTSLFsOE3P1K58WOPKYVTDVEiNDcRlWV82xZ3Jm6XRLx17tso2lg/SvBW6bk0srZlpiyJLEpRX6r8Oh2LiuaoalxVUIwTWV+g6VIsvcNHaGpXwXVQhuGK3PkCSJWyfqj5tl3DhhqqHNzVOnWYroqUJw4xRjR/mGmVMtnf5RheDGmVMNba6bM0W3nLsZaUJww1xjRlqpK+m/+O9///tIkmT4s2/fPv74xz/i9Xr5wQ9+YHruH/zgB/T09Bz7qaurS/bpfe4lSQ7ysm4mdR9TJT/r1gQMmYqcm0jdSdAYknNjAobEuPyrSD0pVjAu78qESZyzCi+2kLAqMbPgYqQEC+ei4nNS3taQkDi96Ox4romBzh20OGWGjMzSktNxKMb5RZeUL0j52KuMzNmDZpFl0CkY4Iqhs1NmKJLM0kETE27xXF05PeWvlSJJLBo0kvLMPGPGyCkp34ErksSskiGMyjNu6nbZ6PE4bbaU/goVSWJcYTFTio1vKC4YPZocpzMlhixJDM3NZX6CG8olo6soznSn5BzKkkRxViZnjKoytJtbNZSKgtyUGBKQ43JyzsTk+lelZV5Jr1a33347e/fuNfypqqri7bffZt26dTidTmw2GyNHxtu0z5w5k5tvvrnfuZ1OJzk5OSf8pJW8CrK/jtM+AZLO45DIcV9Npkt/O6NPQ3JuJNc5PQWGTLH7XErc5ya0HJVzIYMzZiedKyIhU+6ey8gc/a2fPo3PWcTo7PlJR0UkZCrcE5ier7+F1acJOdOZlX960gwZmSHuSpaUJmaMzxnN2aWLkpq/jzE4o5TLhyR+r8bkVHDt0CVJMxRkSl353FaVmFGZVczXRy9NniHJFDqy+Pa48xLaDnbn8sMpZ6fAkMixZ/CTqYkZhS43/zMn8Xf8ZMmSRKbNwS/nJX6vsh1Ofr34vKR9KlmScCo2fnPG+QkddafNxm/PPS/pU1kSYJNlfn9eYoZNlvnNZecjkdytjUT8tfzm0vOwJUislWWJX10Zt0vWFZEkiV9eeR5O+8Akhad1qj6ypndHjx49IcejsbGRc845h6effpo5c+YwZMiQhHOkm96lrpjaTn3b1YSj+zGbM5KVcSGDC/+MZPJUTFTtYVvLF/BGdptkSBRmLGRi8R9RZKc5hubnzYbv0BragbnbWIlBGdNYMvhX2GVziWVRLcyzdT/niH+LKYaERJlrNFcP+29cirlcgJgW48GaP7CjR78Q1okMmTLXEL4+8kdkGSSRHi9NaPzl0IO8126ulLWMzCBXMT8e/58UOPUTbk9m/OHAs7zYuNY0o8SVxz1Tv0pZRqGpxwgh+MP+N3jwiH6BteOlSDIFjkz+Puc2hmcVm3oMwJ/3vsvv96w2yYg7IQ+cfj3j8sxvS/5zzwZ+vvlt3Z7RJzMybQ4eWno1U4vMnzJ8bM92fvTeSjDJcCo27jvvcuYONr/1/dzePXznjdcRInH8UJYk7LLM3y66mEXDK00z3th3iP9c8TKaEAm3g2RJQpYkfnfZBZw9dqRpxnsHavj3x14gpmoJGZIU/1u/a9k5XDR1nGlGWnF9Irvv1tTUUFlZydatW5k6daqpx6QdEWvSNB8t3Xfi8T8F/faekQENWcqhIOerFGT/e8JthpOlaiEOdf2KJu9TaPSVbD+eE2coUiYVOTcxPO8bvUdzk2FE2NLxN/b3rEAVffUbjmfEL/OK5GJs7uVMK/o3wyO1/UkTKu+1PcLGjueJitCxOU9kgCLZmJp/HmeU3IrdpDP1IUNjZctzvN3yEiEtiIR0yraQhIQsKcwuOJ3Lym/CqbiSYggheLFxJc81vIZfDSAjnXIkNs6QmF84i1srrybTltxJACEEzzes4aGalfREff0y+u47FxZP4d9HX06eI/nkzefqNvGXA2/RHvYi97awP5khgEWlY/nBhIsodiV/jXjx6E7u2fU2TUFPv+3tZUlCCMHC0pHcOe28hFsy/em1o/v5xeZ3OOrr7pfR97vTBg3j53POoTIn+Ryvd44e4b/XvsORni4UST5le6uPMWtQOT8/fSljCsw7bH1aW3eUn616hwMdHYavY+qgMn62ZAkTS0qTZmypb+S/X3+H3c2tKLJ0Sl5H3+8mDCrhx+ecwfQhyZeF2N3Qws9ffJvt9c2GjFGlhfzg/MXMHTE0aUZaaUckrZOkqp30+JfT7X+UWKwBQQRJysBpH0Ne1q1kuy9ElpJb8E5WVPXQ7H+OBu/jhGKNaCKMLLlw2ysZkn0dpZkXosjWTitENT+HPa+zv2cF3mgDqgijSE5y7OWMyb2cqpyzscvWjhxGtBB7elaxpfNlOiP1REUYm+Qg1z6I6QXnMzH3TFyKVUaEbV3reK99Jc2heiJaGLvkIM9RwPzCM5lTuJhMm7VTF1EtyobObbzW9A5HAw2EtDB2yU6BI5clpQs4o2Q+uSYjLXqKaSrvt+/iuYY1HPTWE1LD2GQbBY5szh00mwsGz6PQOQCMtgM8UbOeXd31BNQINjkeAbloyHQur5jJoIw8SwxNCN5rOcwjhzayuaMOfyzOyHe4uWToJK6pmk5FprmIkZ6EELzfXMuD+zezvvko/lgERZLIc2RwSeV4rh89LSUH5GTGhqZ6Hty9lTX1tfiiEWRJItfh5MIRY7lhwhRG5RvnnZhhbG1q4uEd21hVXY0vEr8xyHG6OHfUKG6YPIVxxck7OSdrZ2Mzj27ezlsHjuDrLYaW5XRy5ugqrp8xhUmDrSTLx7WvqY0nNmznjd0H8YbCCAFZTgcLx1Ry3ZwpTKkoG5BCgZ9XfSIdkVSUdkQ+GgkhEv6BCREiEHyRUOg9NK0bSZKR5UIyXOfici1BSpA8aYahiQhdgdfpDq4mpnUDEnY5nzz3meRnnImUIHJijhGl0f8uTYE1hNVuAJxKLoPc8ynPPCNhcTZzDJUa3waO+NYSVHsQQsOlZDM0cwYjsxdik42TQM0xNA56t7Oz5wP8MQ+qUHHbshiRNZEpefNxJIjOmPvMBXu9+9jQsRFPzEtMi5FpczM6exTzCufiShCdMcvY7TnC6tbNdEU8REWMLJubsdnDWVI6i8wEyaymGT11vNa0jfaQh4gWI8vuYnzOEM4vn0GO3ToDYE93I88f3U5LyENQjZJjdzE2t4zLhk6lwGnsrJpl7Otu5ZkjO2j0xxlZdidj8oq5qmoKxQmOCPdd2hNxDnZ38NSBndT7eghEo2TZHYzKL+Sq0ZMpy8weEEZ1dxdP79lFbU83/miULIeDyrx8rho/kSE5uQPCqO/p4ekdu6nu7MIXiZDpcDA0L5crJk9keH7egDDSMq+0I5JWylLVNry+v+PzP4wQHuLJqH21NmxADEUuIyvrNrIyb0M2mYdxvKJqF82ef9Hqe7TXATmeEf9vu1xEafaNlObcik02vhj2z/BysOdxDnmeIqx2IqEcq0vS998OOY8RuVcwOvc6HIrxxbA/RbQg27tWsL3refyxjn4ZTjmLSXkXMq1gGW6Dwme6r0MLs7b9Dd5vf5XuaBsyyrHaJzIyGhpOOYPZhWeysPgicu3J31XHtBjvtK7mjZaVtIbbjs17IsPJwuIFnDvobIqcyd9Vq0Ll9aZ1PN+wmvpgC4oko4n4hk4fwyHbObN0NpcPWcLgjOTvqjWh8UrjFh6veZ/DvuaTGPFtMJukcM7gqVw/fCGVWSVJM4QQvFS/k4cOr2N3d1M/jPgWxXnlE7lt1HzG5CZ/5y6E4NW6ffxr3wa2tjecyJCk+I6hBOdVjOVL4+YyubAsaQbAm0cP8Y+dG/iguR5FktBEvJ7M8SdLzho6ki9Pms3M0vKUGKtra/jn1o2sqTuKIsXfH03EGVLvfy8aVsm/TZ/F3CGplWtYf7SOez/YxLtHauJbaScxVCE4bdhQvjB7BgurhqfESCt5pR2RtFJSJLqPtvZr0LR2SFjoS8ZuH0dx4WMoivkLeihazd6WG4mozaYYLlslY0sfwmkzvxfsjzbxbtPX8EXrSZREKyHjtg1i4eC/kGU3fyH0xzpYUfd9OsM1CY//xhn5XFbxKwqd+vVZTmV4uO/IXdQHDydkyMhkKFl8seoOyt3mEwT9sQC/P/hHDnjjLeeNODIyLsXJ7aP/k5HZ5hMEg2qYu/bcx+auvQmTNhVJxi7Z+a8JX2JKvvnjkmE1yk92LGdV6+5+c29OZtgkmbumXs/84rGmGREtxn9tfYEX6nb0mxdzMkMC7pl5BWeX6xcmO1mqpnHnptd59NDWfvNiTmTEF91fzbmQy6v0C6ydLE0I7tq4in/s3NhvrsfJDE0Ifj7/LG4YZ75wnhCC329Yx+83rDPFUIXgB6ct5EvTZpqOSggh+NeGzfzvqvdMM/7jtLn8+2lz05GPj0HJrN/p7rtpARCL1dDadplJJwRAIxrdR2v75WgGDfKOVzjWxJ7mq0w6IXFGKFbDnuariKodphihWCerGr+IP9qAmZM8Ao1ArIV3Gr5AMNZqjqF6ebr223SGj5qqQRJndPF07X/SE2kyxQirQf5++Kc0BI+YYmhoBFQffzv8X7SE6k0xIlqEX+//LQe9hxC9/yRiBNUQv9z/a2r9taYYMU3lZ7v+wdauePfgRK9EFRphLcJ/7fore3qOmGKoQuNH2x9jdeueXoYxRRUaES3Gd7Y8xMaOQ6YYmtD44ZbneLFuR/z/TTBUofGtjU/ydpN+5+TjJYTgxxtf47FDW3uZiRjx0yXfWf8iL9TsNsUA+MWGuBPSN0cihgB+tHYlj+/bbprxh14nxCwD4K733+Vf2zabZty3cQv/u+q9pBh/eH89f3x/vWlGWh+P0o5IWgih0dZxI0J4Mecg9EklFquhs+vbJhiCg21fIap1Jc2IqM0cav+mKev1LT8gGGtLqjy8QCWsdrO2+f9hJkC4sulueqJNSTI0wlqAF+rvMMVY0fBPWkL1SRUpE2hEtQj3HfkFqkj83J44+iRH/NVJMgQxLcavD/yOiBZNaP9I7cvs7DmUVDM7gUATGj/d/XcCsWBC+4erV/N+276kCtOJ3p/vbn2Yrogvof1jRzbySv2upGp29Nl+e+NTNAUSO+vPVO/kicPbkq4LIgHfWfciRzyJnfVXqvdz7y5zR8hP1g/XvsHujpaEdqtra/hdrxOSrH6xZjWbGhsS2m2ub+Cud95NifGH99fz7pGalB6b1kejtCOSFqHwamKxQyTnIPRJJRh6lVjMuAquL7wFf2RHygxP6H0CkYOGVt3hg7SFNqXUo0ag0hneRVfY+M6yO9LIEd/alPrHCFQ6I7XUBbYa2nmiXWztWpMSQ0OjK9rGXo/xnaU/5md123spVZXV0OiJetjYabyghdQwLzakyhD4YkHebt1kaBfVYjxRsyalQqkCQUiN8mK98XulCY37Dr2fAiHujMSExpM1xq9DCME/9qxLqYJp3KkSPHIgcTTh7zs3pFzeXgIe2L0lod0/t25MuXeMLEncZyIqct/GLZYq19630XzkJa2PXmlHJC18/vtJvkLq8ZLx+R82tGjxPmyRodDqe8TQ4rDnKUtdgSUUDvU8aWizq/slS12BJRS2dz1vaLOh8y1SL20fz0l5v/1VQ5s17WtNRU30GRIrW94ytFnduoWQFrbAgBcaVhtGkFa37qE7GkiZIRA8fXSdYVn5Na2HaQ6m3oBTE4InqjcR0WK6Npva6jnk6Uj5U1eFYPmR7QRiEV2b3R0tbGtrSrl3jCoEKw7vpicc0rWp6e5iTd3RlHvHqELw+pFDtPj0o1QtXh8rDx5OuXeMKgRrao5S29Wd0uPTGnilHZHPuVS1lVDoTVKLVBybpfeUTf8XhpjmoTPwsmVGm+8pNNH/hVbVwtR4X7LUsVegctT3OlHN3/+40NjZ/bKlbroClWrfWgKxLl2b9e1vWOh/E9+iOezbRWdEP+fl7dZ3LDIE1f4a6gP6+SivNq2x1FBQAA3BVvZ5a3RtnqvbYKm5HEBruIdNBrkiT9VsttQdFqAnGmRV0wHd8eWHt1lq9gcQjEV55ah+PsryAzstM2KaxorDe3THn9qzy/J7BfDMPv3I5LO79PlmpUgST+3YZXmetAZGaUfkc66YWoeVu+8+CdGNEP3fxURizQj07wbNShNBYmpnv2MhtRNNpH733SdBTDdpNaz5iOg4KckxBJ5o/3vtqojhMXBSklFnWH8/vy3cPiCM1nCb7lhjsN2Ss9OnpqD+c631tyWVf6Kn+kD/3yuAam+7pe6wEF/46gL6n+sRT0fKzf76ZJNk6nz6jOqeTusMWeaoR59R09ONhSa3QPwYdG1Pt+54bVc3Vn0dARztNpdkn9ZHr7Qj8jmXGICFtU+ajiOiiYFjqDpzxUTq4flT5tL6nyuiJU6cNKuozlxhVT/snazCWv9zqUK1tC1zvIIGzzes6W8TDBQjpFpnyJJEQNV3Yo22O8xKQiIQ02f4ogPBMJ7HG7HOEMKY4Y9ELDufGgK/wXMNRKNYLTqhCXGsYmta//dKOyKfc0kWS6IfL1nqv/CYLA0cQ5H6ryZpl5IvrKYnvTLxjhSKt+lJb65ke8sYyan0X0FUkRRsSfb70VOGDgPApRhXlDUrt8F7kmFLrt9Pf9KEINOmz8i0W2cIBG6D55o9IAzIsuu/5zkO6wxJMmZkORwpJ8P2SZYkMh36DLfdPiCMbKf19yOtgVHaEfmcy6YMYyC+BrJciKTjcDhsZUgk14SuX4aUiU3pv3Ko01aIIlnrZQMgYyfD1n+BNqeciSuFKq8nS0Imx95/xU1FspFnt9YPpE9FDv2qnqXO5KuK9qdBLv3GZkMySi3liPSp3K3/XCszSyzniAAMdeu/5yOziy3nPahCMDxLv/vwqNwi6/kbQqMyW58xMq9gQHJEqnL1q/dW5RdY/jQ0IajKN2AU5qeccNsnCagssNY/KK2BU9oR+ZxLUYrIcJ2L1VMzWZk361YrtMnZFGZebJGhUJJ1jW5vGEVyUJlzseVTM8OyL8CmE62QJJlJ+RdZPjUzMvt0Mmz6JeXnFZ5taQGXkBmdPZU8h/7iuqT0jJTnh/g+/sisEQzO0C8vfv7gBZbC9BISw9xljMrS7356WcUcSzkiElDmymd6gX412iuHz7CcI1LgzGRh6Sjd8WtGTrWcv5Flc3BuxRh9xpgplhkOxcbFI/QrxV41fuKAOAnLxk7QHb984vgBcXaunDzR4ixpDZTSjkhaZGXdirUTLZCZeb3heGn2jRYZKiXZ1xlajMi5wvKpmRG5VxjaTMy7wPKpmcn5FxvazCpYYvG0icb8onMNbU4rmoc9QcM/I2kIlpaeaWhzevE0w22VRBIILipfaFiOe0HxWAoc1joVXzFsHrJBpGBucSXl7ryUPxEZiWsrZ2GX9Z3kKYWDGZNbnDJDkSSuGTkNl03/Mx2dX8TMkvKUtzUUSWLZyAmGWzxDcnJZOGx4yhEkRZI4f+Roitz626BFmZmcO2aUJcbCquGU56bbhnxSlHZE0sLpOA27bTypRSxkMjIuxqYY94LJck4hyzkjRYZCnusMMuxVhlY5jipKM+alFBWRUChyTSPfOc6YYS9lVPailKIiEgrFzhGUZ0w2tMuy5zKjYHFKzoiMTJGjjLHZUw3tMpQMlpSkzihw5DMzf7qhnUO2c0n54qTnjzMkcuyZLC6ZaWhnkxWuH356yoxMm4sLBxu/DlmS+eKoBSnFXWQkHIqNK4cZMyRJ4isT5qfEkHqf4w2jjRkAX50yx1LE4pbxiRlfnjYr5QiSJgS3TZ2R0O62WTMs1UP54mzj71VaH6/SjkhaSJJEUeGDyHIeyTkKCnbbGAry7jZlPar4rziUkqQZLlsFI4p+a8p6bun/kGkbnJQzIqHgUoqYV/orU/ZLB91OgWNYUs6IhEKGksNFQ35uquHWpeW3MSRjRFIMGRmnksEXqn6ILCV+/VdVXMGY7NFJOSMyMnbZzu2jv4VdThxRuXbYOczMH58kQ8Im2/jZxK+SoSROKLx2+ALOLJ2UFENCQpZk7pl+M7mOxMnUVw2fweVDpyXltkm9//rjnKspyUh8933J8AncOmZWEgSOPZ8/nHYpQ7MS5zwsHTqS/5g6LylGn+5ZeD5jChJ3RZ5fMZTvn7YwJcZ/L17K1EGJuwlPGTyIn569JCXG9xafzrxhqXX6TeujUdoRSQsAm20IJcUvoChlJP5aSICEwz6V4qKnkWVzoXGHUsz4QU/isg01zXDbRzNu0JPYFP2cihMZuSwuv5ccRxWYWjYksuwVLBlyHy6bfoLciQw3y4beQ4lr1LHnaUyQyLYXc+Ww35FtN9fe3i47+eKIO6jM7OsOm4ghk2XL42sj/5tCp7nW8zbZxrdG/weTcicee55GkpHJtGXyo3HfY4jbXFt4RVL44fjbmFc42TTDbXPxi8nfYFS2fm7ICY+RZO6cfBXnDZ7aO4cxQ5FkXIqd38+4lan5w00xJEnizqkXctXwmaYZDtnGn+dcy2kl5jsV/2j6Ur48bm7vHIkZiiTzxwWXcY5BbsjJ+vb0BXxr2mkmGRKyJPGbhedz+Uj9vI2T9eVpM/lBrzNihiEBP1+8lOsnTTHNuG7aFH569hIkM4zecvDfX3w6X5ydOOKS1scrSZjpwPV/pGTaCKc1MFK1Lny++/H5H0DT2gAb8S62fQtuDJutiqzML5CVeR2SlHwOQEzz0Op9jGbvA0TVFiRsx+VdyEAMp1JBac7NlGRdhyInfxompgU57HmaQz3LCcSakFCOJU7G28SrZCiljMy9mhG5y7CbdKZOZETY1f0y27ueozvagHwSQ0PFrRQwJf8SJudfjEtJ/sRNTIuyqWsVa9pepjV8PEMgIaOhkqnkMK/obOYXnUuWQRKsnlSh8n77Wt5ofpO6YD0K8rH+JbIkowqVTMXNGSWLOav0TPIceUkzNKGxqnUzLzau5oD3KIokI0QfQ0IVGm7Fxbll87l48CKKXcmfaBBC8FbLTp6sXcuO7tr4CRERz2fpY7gUBxeXz+TqYadR7jbneJ7MeLt5Pw8fXs+G9ppjp1CE+JDhlG1cOmwqN42Yy/Cs1E5ArWo8zP37N7Km6UhvToeEJkRvO3sNu6xwaeVEbhszm9F55pzbk7WmsZb7dm3i7brDx/JGNBFf1DUhkGWJi6vG8YUJM5lYpH86ykgbGuq5b9tmVlYfBuJ/3WofA4EEnD9yNLdNnWEqEtKftjU28cCmLby67yACjn0OsiQdqzeydNQIbp05jVkVQ1JipJW8klm/045IWv1KiBjB0BuEw++had2AjCwXkJFxHk7HPMPtBSE0ECGQMhLYqXQHV9MTWkVM7QEkbEoe+RlLyXHNRzJIIBRCQxMhZMmV0K4l+AFNgfeIqD0IBE45j0Hu+Qxyz0My2MIQQqCKIEpChqAhsJ3DvrUE1R4EGi45h6GZ06nMmmu4TSKEICZC2CRnQkZtYD87u9fjVz29C3cmI7ImMiF3FopBXRAhBFERxibZEz6XI/5qNnRuxBP1oooYblsmY7JGMbNghuFWTJwR6WUYR7sO++pY3bqF7oiXqIiRactgbPZwTi+ehtOg9ogQgogWxSYrKAm2ng57m3m9aRvtYS9hLUq2LYNxuUM4e9AUMmzGjLAWxSYp2AySSyFecfWFuu20hLyE1CjZdhfjcgdx4ZBJZNn1HfQ4I4YiyYYJrAC13i6erd5JY8BDMBYhx+5iTF4xl1ZOJNdh7KCH1RgSEg7FmFHv7eGZQ7up8/YQiEXItjsZmVfIslETKHAZ184JqbHePBhjRrPPyzN793DU040vEiHL4aAyL59l4yZQ7DbeGgur8arMTsW49k2738+zu/ZQ3dmNLxwm0+FgaH4el08cz6BsawnNaSWvtCOS1scuTW0jFHicsP8RNK2JeHklGVkZhivzZpzuK5Hl5O/Wj1dM7aTTv5wO7yNEjpWml3AoFRRm30hB5tXYFGu1AaJqD/W+F6j1PE4g9iHDpZQyNOcqKrKX4VT0azWYUUT1c8j7Onu6n6UnUtcbDZJwK4WMzbuYMbkXkWmzVkskooXY2f0uGzpepjV89FjEKcuWx7T8s5iRfw55jtTupD9kRNjUuZ5VbSupDxxFO8bIZl7h6SwsPpNii/VKolqUte1beaVpNYd8xzPcnFE8h3PKTqc8I7W79T7FNJX32nbxTN377PbUHjvimmVzcdag6VxaPo/KLHPbXUaMd1v383j1B2zprCHWy8i0OTl38CSuGjabsbmpRQT6pGoa7zYf5uFDm1jXWkNUi58gc9vsnDtkHDeMnMnkAuOk8kTShOD9phoe3LeF9xqrCatxRoZi4+yho7lp7DSmF5ebyoPSkxCC9Y11PLRzG2/XHjnBEVkyrIqbJk1l7uAKS4y0PnqlHZG0PjYJzYev5w4iwRXEF+2Tj7b2XSwcuDJvxJ3zQyQpuYqbmhakoetOuvxP9fas6e8rKyFhIz/zasrz/ws5ye0cVUTY1/Fr6rxPoh3ri3MyR0ZCYnDWhUwo/JFuvRHd1yFibGq/l93dT6OKaL+voy85tTJrMaeVfgdnkts5mtBY3bqcte0riIow8ff/RI6EjEAwNns2F5V/ncwkt3OEELze8hKvNb1ISAv2bnWdyJCR0dCYmDOFG4Z9gTxHcg6iEIKXmlbxZN0r+GIBZKRT6oX0MSbljuYbI2+gxJW8g/hiwwf84/Cr9ET9/TIUSUYVGpNzK/ne+CupcCfvvL1Uv43f7n2d9rAPuXfboz/GpLwh/GTypYzKSd6xeq1+Lz/f+gbNQW/v9k3/jPF5pfxi5oVMLEje6Xm7/jA/+WAldb4eHUb8d6Pzirhr3jnMKEl+G+T9+lruWP0m1T1dhozK3Hz+Z9FZzB9iLo8orY9faUckrY9FmtqOp+Ma1NhBzNUIkbA55pFT8ACSyUVc1Xo43Ho9wchOTnVy+pOM2zGFqpJHUGRz35mYFmBj81fpCm/BXANAmWz7KOaU/ROHyQhMTAvzZuMPqQ9sNMWIV18dwvkVvzcdHVFFjKfr7mGvZ50pewmZXHsRN1f+nHyHucVPExoP1dzL+s41puxlZLLtOXxr9A8Z5DK3+Akh+MeR5bzW/J5pRpbNzU8n/gfDM80l0Qoh+NvhV3i8dpVphtvm4J6pX2J8rvnF7+8H3uEvB942x5AknLKNP8++iRmFw00z7tv/Ab/YvtIcAwmbLPO3BVexcNAI04zHDmzjR+teBxJ/e2UkZFniz4su4Zyho00znjuwh2+/9eqx3CEjScQTiH9z5nlcOlq/wFpa/3dKZv1On5pJKyUJLYin86YknBAAQSyyHm/XVxEmmq5pIkx1220EI7sw54QAaAQiO6hu+yKaSNzkSxMxtrR+m67wVsx3IdbwRQ+xseVrqDqN5Y6XEBqrmv/btBMC8aJknmgDr9XfTkSnCd+JDMGLDX9mr2e9qfk/ZLTzcM1PCMS8ph7zVP2jpp0QAA0Nb9TD7w78L56ouW6njx19ybQT0sfwxQL8ZNcfaAvrd9E9gVG7yrQT0scIxMJ8Z9u91AX0Ow4fr+U1H5h2QiC+7RFWY3x9w0Mc9Oh3Tj5eK2p2mHZCIJ64G9VUvrLmSXZ2Npp6zGu1+/nRutd7E5jNMVRN4+urnmdDS50pxqqj1Xz7rVfRhDBVjVcQf7++/darrD5abYqR1idXaUckrZQU8v8LNbqL5KulakTDbxEJPpfQstP3OP7wxhQYKv7wOjp9TyS0bPS9THtwDeYdnbgEKj3hXdR4HktoW+N7jxrfasw7Oh8yuiM17OhMzDji38627reTZmhodEVaWN22PDHDd4h3Wt9Iav4+hifazXMNTya0rfU38nT9aykxfLEA91c/k9C2MdjJPw6/kgJDEFQj/G7/cwlt20Nefrk7NUZYi/GzHc8ntPVEQtyxKXmGIN6X5v9teIFEAfFgLMp33k+NoQnBt957KWHhsaiqcvtbryR8Lv1yhODbb71CVB2YbtJp/d8o7YiklbSEUAn67yfZxftDyb2PN2II2rz3pTg/gES79/6EF7caz6Ok/mcgqPU8ljC6s6f7mZT70wg09vY815tToq8NHS8jW2Bs7VpJJEF0Z1XbypQZGhobOtfij/kN7V5rftcS44OO7XRGjCMvzzesS7mEvio0NnYeoCHQbmj3bN3mlBZWiC/gO7rrOOBpNmbU7CCixQxtjBiHPO1s7ag3tHuxei++aCSliq8agga/h/cajSMWb1QfoiMYTIkhgI5gkJU1h1J4dFqfFKUdkbSSVjT8NkIzFzruXxpqdBux6C5dC394PZFYNcne4X8oQTh2CH94g65Fd3gXnsgeUneoIKQ20xZ8X58RqaUpuNVSf5qw2kOtT3+roifSxn7vxmMnSlJRRAuxq1uf4Yt52dT5gSWGKlTWd+gzArEgb7eut8QQwJsta3XHw2qUFxvWW2qUJyPzQoP+FlhMU3mi5gNLDEWSebJW/7srhODBg/rj5hgSjxzabMi4f+8mSw3mFEnioX1bDG0e2Lkl5f43fYwHdmxN+fFp/d8r7YiklbTCwZew1kkXQCESfFl3tDvwEvFialZkoyegz2j2r7TUrRfipdub/PrbFdXe1Za69cYZMke8+rkGe73rLXcjBYldPfpOwo7urWgWGyMKBBs79RNpt3XvJaIZR37MMN5t26g7vr37CL5Y4rweI2lovNmyTXd8V3cDHWGfJYYqNF5t2Kk7vr+nlTp/twVXJ95z5dW6PbpbJ/W+HvZ2tVlmvF1/mFCs/8+1MxhgY1ODpf43qhBsaKqnM5g4lyqtT6bSjkhaSUtobVjt1gsSmtahOxpTOwaAoREzYERU/TGzEqhEVP0EyZDaZdkREWgEY/oMf6zHMgMEvliX7qg35kl5y+R4eWL62yY9UWuLt5l5uiIDxdDfYuocIIYvFjpW0+RkdYSNt7jMKio0/NFwv2PtoYFZ2AXQFQ72O9YxgM5DZ7B/RlqffKUdkbSSlhCp7UufNAsYzKNfLyQ5htFzFaiWCQCaQf6GZuJ0kBkZ5YgMHEP/vVIHjKE/jyrUlHM3zDNS3/YxO4+qDQzDaK6YNnBVF6I6r2Wg3iuA2MfxOgbwfU/r41XaEUkraclyPuYayhlJQjKotKrIuQzE9o9iwLDLOQOw8Mk4DBryOZVsU8cRE8ml5BmMZQ4II0PRP+vvVtwDwnAr+uW8M20Dw8hU9IvZZdmS743Un9wGXYGz7cn3RupPdlnBoVPWPNcxMK8DIEenHP2AMpw6DGfi7spmNZBzpfXxKu2IpJW0bM65AzBLDLtjju5olnMu1rdmYmS69BkFrpm9kRcr0ihwzdQdHZQxFTEA21hl7qm6o8MzJ1lKho0TZKqyJuuOj84eZ9lJkJEZlzNRd3x8jvkCW0aMKXljdccn5A5L2Dk3kRRJZnq+fkfd8XmDE/aQScyQmFkwXHd8TG4JmQY9c8xIliSmFpRjk/tfBobn5FPgTK568MmSgFG5heTY+3cSBmVlU55lvVhleXYOg7KSbyqZ1idDaUckraTlzFgGWLsISnIJdtdS3fE894XIkrULiyLlkOe+QHe8xH0GDjn5DqwnMlyUZ12kO17unkG2rQwrESQZhTG5+q9jSMZoSpzDLDFAMKPgHN3RwRlDGJE5ylIuiobGwuIzdcdLXUVMyxtvKRdFQ+O8soW644XOHBaVTDrWNTcVqUJjWcVpuuM59gwuLJ9qkSG4tnKe7niGzc5VVdNQLJw20YTg5tGzdMftssINY6daOtEigFvHz9TtCyNLEjdPmmbpmysBt0yaZul5pvV/q7QjklbSkuUcnO4rSX3rRMaVeSuSQcdYWc6gMOs6CwyFwuwbkSX98LIs2RiWcy2p/hlIKAzJugybrL/dIEky4/OXpTR/H6Mq+0zDrRlJkphTeCGp5tRIyIzOnk2u3biU/BklZ6cceZGQGJM9LmGZ9wvKFqV8fFdCYkTmUEZkGZdgv3zIaSnnP0hAhbuIKXlVhnZXD59tKceixJXDgpJRhjbXjZh+Si+WZJTnyODscv3oEcC1o6ZYStVy2+xcUmlcgv3KcRN1ozJmZJNlrhirH2lL65OvtCOSVkrKyPoakpRJ8l8hBVkuxZV5Q0LL4uwvppgromCT8ynKvjWh5bCca3AqRSkc45VR5Ewqc29JaDkm50Ky7WUpMCQUyc7UwpsSWk7OW0SxsyKFiIWEIiksLrkmoeW0/JkMdVemFLGQkLlk8JUmGOMZnzMy5ajITcMvSWgzOa+SuYVjU9qiEcBXR16YsPPruNzBnFM2MeUcpG+POydhRKUyu5Crq1KPJnxn0hk4dXJQ+lSWmcMXxutvPSbSt6cuINNuHD3Nd2Xw9Rmpb/d+fcZc8l0Dk5eT1v+N0o5IWilJsQ0lu+BB4ls0ZhdYBUnKJqfw8d6EV2PZbYOoKnmkN6phniFLGVSVPIJdSdzIzaHkM3vQvShypmlHQUJBlhzMKv0LbnviJmsOJZPzyn+LU8lJgiEjSzbOLv9f8hyJm6zZZSc3DL+TbFu+aWdEilO4suJ7lGUY3+EDKJKNb4y8nUJnkWlHQer957bKr1KVZXyHDyBLMj8c92+Uu0uTYMT19ZHXM9kgP+SYvSRx58QbGJVdnrQz8s3Rl7CgeIIp2/+eejnTCoYmzfiPsUs5r1w/X+d43Tn9XE4fNCJpZ+QrY+dzzYjppmy/P2Mx5w0dnTTj5rHT+cJ4/a2f4/XNmfNYNsbc+3q8rhgzgW/O1N/CSuvTobQjklbKsjtnkVu0Aknua7+u93WKL76yMpTc4pdR7PqJfifL7ZjEqNLnj3Mq9Bby+O/tShmjBr1AhsP8RS3bMYLTBj9Bhi3uVOgv5PHfO5QC5pc9Qr5rqmlGjmMwlw6995hToc+IX+6dSg4XDvkjg90zTDNy7UV8acSvGeSqNMeQ3dxU+TPG5JhbLABy7Ll8b+ydVGbGE0sTOQsO2cnXR97OzAL9pOGTlWlzc9ek25mUO9qQIfX+2GU73x37Jc4sNb8guW1O/jDjq8wrGgdgGH2QkLBLCneMv4ZlFQtMM5yKnb/NuZmzB09MyJCRsEkyP550MV8Yucg0wy4r/H3BVSyrnNLL0HcXZCRkSeKHU87iO5OXmGYossyfFl3CzWNnJGZI8RjQ7VNP587ZSxNGjvokSRJ3LzmXr06bjZSAofQyvjptNr9acq5pRlqfXEki1YYIH4OSaSOc1v+dhAgTCb5C0H8/avTUcs525+m4Mm/F7jwTSUot50OIKD3B12n33t9v2fZM51yKsm8lN+MsJMmeEkMTMdoC71LjeZSO0AenjOc5pzA853pKM5eiSKkl62pCpcG/gd3dz1AfOJVR5BzDhPwrqMw6A5uc2nFETWhU+3ewoeNl9ntP7fhb4hzG3MKLmJh3Og45tSOaQggO+vaxqvVNtnZvOiV3ZJCrjDNKzmZOwWm4DI7TJmLs91bzatNq3u/YckrOxSBXEReULeaMkjlk2lI/3bHXU8eKurW82bKV2Ek1SEpd+SwbchrnDZ5Jrl0/FyiR9vU08WTtBl6s33ZKf5gSVw7XDJ/DpRXTKXRmpcw40NPGY4c380z1doLqiXVnCp1ubhg5k6uqplGakXoS+JGeTh49sJUnDuzAHzuxu3W+M4Mbxkzl2tFTGZyZ+vX6aE83j+7ZzuO7d+CJnFhsLcfh5NoJk7l+/BSG5ualzEjro1cy63faEUlrQKXGDqPGjiKEH0nKRrFVodgqdO2F5kGLbEZoXcRrixQgO2YhyfoLSzhaTThWiyZ8yFIWTttwnPbhuvaa5icY2YDaWwFVkfNxOWehyPoX5EC0Hn+0lpjwYZMyybCVk+Wo1H/dWpCe8CaiahegYZNzyXFOx25QY8QXbaE7UktU82OTM8iylZLvNGKEaQluI6x2o6HilHMoyphomMjqiXbQFq4jpPqxy05y7UWUOIfp3kWqIkp9YDf+WDeaiOFSsijLGEOmTX8rrSfaTWOwnoAawCE7yLPnMyRjqAFDpdq3D0+sh5iI4lYyqXBXkWvXP8HUE/VS62/AFwtgl+0UOHKpyqzQZWhC44D3MF2RbiIiSqbiZnhmBUXOwn7t4+9VgIPeBryxIHbJRoEzmzHZ5cg6kQxNaOz11NIe7iakRsmyZVCVNZiyDH2GNxpiT08j3mgQm6xQ4MhkQl65brRECMGunnqagn0MJyOzSxmaqZ9Y7IuG2dXVRE8khCLJFDgzmFSgf6RYCMGu7ibq/d0E1QhZNicjsosYkVOsywjGomxvb6I7HEKWJPKdGUwpKsOh6DP2drVS4+0iEIuQaXdQlVPImDx9RigWY3trE92heEn+PJeLKSVluGxWWz+k9XEo7Yik9YmXFt1DLPAoauAZ4KQS01IGSsbV2DJvQLalXlsiHD1Aj+9BPIEnEOLE8s+S5CLHfRW5WTfjtI9LmRGIVtPofZxm71Oo4sSy2xIOSrMuZnD29WQ7k9//7pM32sjBnhUc7HmOiOY9YUzGxvDssxidu4wi14SUw9SeaBvbu15lW9fLBFXPSa9DZkzOAqblX8QQd+qMnmgXH3S8zZr2N/CeVOpdQmJi7kxOLzqHkVlWGB5Wtb7P683v0BXtPoUxNW8iZw86g8m543UdjETyRP283ryB5+vfoyV8aln8GfljuKR8AbMLx6d8hNcbDfFyw1aeqF1LXeDU8v4zCiq5Ztg8FpaMxZZizRJfNMyLdTt5+PAGDntP7SY8rWAIN46YzVnl43CkyAjGorxQs4cH9m1iX3frKeOTC8u4ecwMLhg2LmHibFqfLn1iHJGXX36Zn/3sZ+zYsQOXy8WiRYt47rnnTD8+7Yh89iRElGjPf6EGnyCe16FX7Cs+Zsv8IrbsHyAlcUEXQqW952d0++41xcjJvJGSvP8xPE58KkNQ0/07jvb81ZAhoSBQKcm8hDFF/4OcxJaOEIJdXQ+xreNvSEi6R2f7GEMzF3PaoDuxJbndsrnjed5u+TuYYFRmzuDiIT/EqSS3FbK+422eqvsnovef/iQjo6FRlTmOL1TejtuW3DbF++0b+NvhB1CFmpAxInM43x377+TYk9umWNu+k//Z8zBRLZaQUZlZxi8m/xtFTv2oWH9a13aQ7259jKAa3/rojyIjoSEYmlnEn2fewmB34uTv47WxvZavrluONxpC0mNIEpoQDHbn8q/Trqcq2/h498na3t7Ibe88SWc4qM/ofR0lGVk8sORqxuWXJMVI65OrT4Qj8swzz/ClL32JX/ziFyxZsoRYLMauXbu46qqrTM+RdkQ+WxIiRqTrK2jht0mmOIHsuhRH3q9NOSNCaDR3/ju+4IoknplEpuscygrvNZXDIoTgQMePafY9mRQjzzWPSaX3IpvMYdnc9gf2dD+WBEGmyDWRpeV/MO2MvN/2KO+3PZIUo9hVyXXD78Yhm8v9eKf1JV5oNM+QkSlyDuKbo35m2hl5s2U1/6p+NClGobOAn034PnkOc9eWN5s38ct9j+ouqqcwJJl8exZ/nP4til15phhvN+/me1sfB0AzQVEkmWybiwfmfYWKTP0toeO1puUw/7b2cTQhTDIk3IqDxxffyqgcc47ChtY6bnrzcaJCM9VZV5EknIqNJ866nkmFxrVm0vp0KJn1+yM5NROLxfjmN7/J3XffzVe+8hVGjx7N+PHjk3JC0vrsKer5n6SdEAAt9Bwx3x9M2XZ67knSCQEQ+EOv097z36as6zz/TNIJiTO6Q+s42HGnKev93c8k5YTECRptoV2sbTH3OnZ3v5WUE/Iho5rn636BmXuYHd0bknJCIF4dtT3czL+q70EzURRsR/du7qtO7r3S0OgId/Kr/X8gpiUu87+r+wh374szzH57NaHRHfHx/R1/I6xGEtrv7Wngh9uWIzDnIEC8yqs3FuJrG+/HFw0ltD/kaePr65ejCi0JhiCgRrh1zSN0hRN3y63zdfOFd5407YT0MUJqjJvfXk5LwJv4AWl9pvSROCJbtmyhoaEBWZaZNm0aZWVlnHfeeezatcvwceFwGI/Hc8JPWp8NCbUZNfAQqZZpjPn+itD0W8gDqGonnd4/pzQ/CLp9/yKmthgzND+13X9KmdHse5pg9GgCRoRtHX9LkaFR63uLrvBBYyuhsrr1vpQIAo1q/yYag3uN7YTgxabkHIQ+aWgc8e/jgHdnQtsn6pJ1PD9kVPuPsrlre0LbB2peTYmhonE00MKq1m0Jbe899DaaMOseHMcQGk3BLl5q2JrQ9u/71xDVUmEIOkJ+lldvTmj7jz0fEIxFTTshfdKEwBMJ8eD+xIy0Plv6SByRI0eOAHDnnXdyxx138NJLL5Gfn8/ixYvp7Dw18apPd911F7m5ucd+Kir0T1uk9elSLPC4xRmiqMFnDC08geVgqYmdoMdvvHC2+F9EOynxNTnJNHqfMLQ46nvnlKTUZCShcKDnWUObI76N+GL6f4uJGTJbOl80tDns20N7uDllhozMe+2vJ2DUUO0/mnJDPhmJ15rfNrQ56m9he/ch0xGEkyUhsaLhXUOb5mA377buR7VQT/2J2rWGUarOcIBX6nenXHpeQ/DIkY2Gj/dFwzx9eEfKpedVIXj04BbCqtVmlGl9mpSUI/L9738fSZIMf/bt24emxb+oP/rRj1i2bBkzZszg/vvvR5IknnrqKd35f/CDH9DT03Psp66uztqrS+sTISGixPyPgMUOsTH/A7oXWiE0un33YakxBho9vgcQQv8i2OB5CGvN5VSavE+gaWFdi33dT1lqLidQOex5mYjq17XZ0vmiRYbGfs97BGLdujbvtb9huYHdXs9WuiKnnujo05stqy0yBPu8B2kINunavNS41hJDIDjsa2C/Rz8S9mzdRostC6Eu0MnGziO6Ns/UbrXUmwagLeRjVZN+tO256t2WnQhPJMyrR/dZmiOtT5eS+uu6/fbb2bt3r+FPVVUVZWXxZKPx4z9sduR0OqmqquLoUf0/RqfTSU5Ozgk/aX36JdQ6EKnffffOEp9H638eVWsjpjZYZBjPo2p+AtFDWHN2QBU+ArH+FwwhNNrDu1NuLvchI0J3RH/BqA9YZ2ioNAf1GUd8e1NuYNcngaDGr8/Y49lvmQFwwHtYd2xH9yHLDAmJ3T3VuuNbO2tSjrj0SZFktnfV6o5vaa9LOXLUJ5sks7lD/xq+qbXechdcmySzqbXe0hxpfbqU1MHt4uJiiov1C9D0acaMGTidTvbv38+CBfGSyNFolJqaGoYNG5baM03r0ytt4HJ9hOhB4tTTAdoAMlSth/7OtcQsbJecMpfa//ONagGsOjp9Cqv9P19NqMSEfkQmGYU0n8GYlS2sDxU0iOwEVOsMGRl/TD8J0xsbAIYk4TOYpyeaOAk0kSTi9Uf01B2xzgDwRvW/O55IyHLURSOeK5LW50cfSQWZnJwcvvKVr/CTn/yEiooKhg0bxt133w3AlVcm7sCZ1mdMKZZD7186cyVRAySR9ErEJ1NnJFWGPIAMRe91xLuOWI6IxBn6z1eW5AHxqYwYSootA46XQGAzmMdoLBkZFR7Tq3qarIzmscsD890yZCiK6ePNepJ650nr86OPrJTd3Xffjc1m48YbbyQYDDJnzhzefvtt8vOTK7yT1qdfHzbFszwTkk7XXkUuBMuXwL65+i/cZJNzMS6QZl4OpX+GIjlRJCfqAEQsXLb+S6ZLkoRLySaoGp9CMiO3ov/3nGXLpTNyajXNZJVt1y8IlmvPoSdqLRomEOTa9beBC505NIXaLX2zVKGRZ1A8rdiZwwGaLW3PqEJQ4NCvu1KckYUiSZYiFgJBgVO/mF2RKxNFkomlmBAbl0ShM/W+Pml9+vSRdd+12+3cc889tLS04PF4WLlyJRMmpF7mOq1PrySlFMk+DWtfNwXZuRhJ7v8Cpcg5uJ0L0e/Oa0YyLsccbEr/24+yZKfYfZZlRqZ9LC6d/juSJDE8+2wkSwyJLFs5+Y5Ruhbjc8+wlKwKkKnkU+7WL48/M3+BZYZLdjMqS/+6saBoDpKlNE9wyHam5E3UHT+jZLpl91aRZOYX6TPOLptsOUcEYMkg/ffq/PIJlrdNVCE4f4g+48Jh4yw6IXGn7aLhqbddSOvTp4/MEUkrreNly7wVa6dmVGzumw0tcrNuw1q0QiMv6zZDi8E5N1hmlOfcZNhLZUzuMoTFqMvYvKsMGVPzL7C0NSMhMa3gImSDbYt5hWdiJUIlIzO/6Ezssv7W3qLi+Sn3jOljLCyej9umXyX2zNIZOOXUOjpD3AlZXDKNPINoxdJBE8m2pdYJuY+xsGQMZRl5ujaLy0ZR4kq9u68sScwuGmZY6n1u6VCGZeen7BrKSEwsKE1XV/2cKe2IpPWxSHGdA1I+qR19lUEuR3aebmiV6VqCTSkjta+1jCIXkpVxrqFVrnMWGbbKFBkSipRFSeYFhlaFrrEUOselGE2QUCQ7I3LON2Y4hzDUPcVCxEJmcv45hhZ5jkIm5s5M+eirQDCvcKmhTY49m/mFs1JmaGicVbrI0MZtc3Fu2ZyUGarQuHjwAkMbh2LjiqFzkFNcwlWhcdWweYY2iiRzw4jZKTM0IbhxxGxDG0mSuHXszJTmh3ii6i1jUn98Wp9OpR2RtD4WSZIDR/7vSN4RkQAZR/7vEvaakSSFQQV/Ib51kgxHAiQGFfxFN4n0Q4bEuOJfI2NLkhHX2OK7UUz0aJlf+mMUyZkCQzCv9A4cSuJmbucM/g+csjslZ+Tssq+TpZODcrwuL7+FTFt2Sov4xYNvoMhZmtDu+mFXkO/IS4mxbMhFDHUPSWh38/DzKMsoTKmb7hVDFjM+d3hCu1tHLKIquyRphgRcXjGLOYWJO1XfPHIOk/IHoyR5xFZC4oIhEzhr8NiEtteOnMbc0mFJM2RJ4szykVxaqb+FldZnU2lHJK2PTYpzIfa83xN3FMx89WTAjiP/bygOc3dJGc45lBX+g3getllG3IFxu4wjLn3Kdk5kYunfkSWnSUbcmRpTdBdF7jNNMfKcVZxZ/ltsUoZJRyF+0Z9d/B0qs882xch3DOaqYb9I2hlZVHIbU/LPM2Wb5yjkqyPuwG3LSspROLv0chYVG0d1+pRrz+FH475FniM3Kcb5g5ayrPxCU7bZdje/nPIVSp35STHOK5vLl0ZcZMo20+bkTzNvYVhmUVJRi7PLJvO98RcZbsX1yaXY+fv8axmTU5oU44yyUfzvjEtMMRyKwt8XLWNq0WDTDAmYVzqMP55+KYqcXpY+b/rIuu8OhNLddz+bUsPriXp+iojto/9TKPHfSfYpOHJ+iuyYkjQjFN5Ca/cPCUd3GDIc9gmU5P2cDOecpBm+8B4Odv4UT3grEsopeR19v3PbRzKi4EcUZJyWNKMnUs0HrXfTEtxiyMi2VzCj6D+oyDLnTB2vrkgjK5v+TI1/S7/HemUUNFRy7KUsLv0CY3NSYbTzdP197PHoMWQ0NHLtBVxQdg2zChYmzeiOeLiv+lE2dW1D4tTutX2MPHsOVwy5mDNLk2d4on7+dPBZVrdtQwhxSoGwvrb2ufZMrht2FpeVLzS1eB8vbzTEr/e+zCuN29AMGNk2FzdXLeTmqtOTzpMJxCL8cudKnq3dRlSLf6eOp0hICASZNge3jpzL18YtTDpSE1Zj/HLrOzx2cBuR3mqrJzLi/59hs3PzmBl8e8rCATvGnNb/vZJZv9OOSFr/JxJCIKLbiPofjnfkFb2FsaQcFNc52DJvQLZbP2UViuygx/cgvtBrx4qeyVI2mRlnkZd1Ky7HVMsMX2Q/Td7HafO/RkzzINCwyVnku06nPOcGcpzTk16MTlZPpIYDPc9S411JWPUAApvspixjJmPyrqQ0wzqjK9LItq5X2NPzNiHViypUnLKbIe6JTC+4iOGZ0xJujyVSZ7iVdR1vsbHrPXwxD5pQccguKjNHs6DoHMblTLWUfArQGenirZb3WN32Pj1RLzERw6W4GJE5nHMGncH0/MmW6490hj282vwBrzWtpyPiIabFcClORmaVc0n5Ak4rmmxYN8SMusJ+XmjYzIq6TbSGeohoMVyKg5HZpVw1dC5LB03EoVirwNATCbKidjtPVG+mKdhDWI3hUuxUZRdxfdUsLqiYgEtJPVEXwBsJs6J6F48c2EK9r5uQGsNlszM8O58bRk/nkuETyLQPZK2htD4JSjsiaX3q1Pc1TLSYithRRPAZUGtAC4DsBmU4UsYVSDpHYpNlxGL1BAKPE4sdRmg+JDkTmzIMt/tqbHbjfXizjEismXb/kwSjB1E1L7LkxmkbQlHmFWQ4Rhs+to+TiBGKdVDrfZ6eyEGiqhebnEGGrZSh2ReR5xwzIIxgrIe9Pa/RFj5IRPVhk11k2goZk3MWpRmJ8wnMMAIxH1u6V1MfOEhQDWCT7OTY85mat4Ch7jGJvzNmXocaZF3HOg56DxJQA9gkG7n2XGYXzGZM9sAwQmqY1a2b2OU5jD8aQJEVcu3ZLCiayuS80QkdMHOMKG8172Bz52E80QCKJJPnyGRRyQTmFg0MI6LGeKNpD2tbD9MTCSJLEnkON2cMGsPC0lEJHTAzjLQ+/Uo7Iml95iTC7yP8/4LI+8TzMgTx48Ay9AbicSxAyvwiktP49ICewuH1+Hx/IRx6S4eh4nAuICvrK7hcS1Ji+MJbaPb8g67gG8f9VqMvjwRUspyzGJT9RfLdxqdS9NQV3svBrgdp8L95XFg/zohvi6jkOycyKu8GyjPPTmlR6AhXs6XjCQ563kFD7Q3ln8godo5iSsEyxuQsTSmS0hZq4N22F9jSvRpVnMiIb7OolLoqOK3wAmYWnGF4lFhPraFWXmt+jffb3yciIse2b+DDrZxBrkGcVXoWC4sWYkuhOmlbuIvn6t/mjeZ1hLTwCQxFklGFxiBXIRcNXsx5ZadhT+GocHvYw+M1a3i+4QP8sfCxeY9nlLpyuaJiPlcMnYdLST4C0Rn289DhdSyv2YQnGuqXUezM4trK2dxQNYdMuzNpRlqfHaUdkbQ+MxJCgP9vCN9vSVzVtDe3JOt2yPxyUgusz3cvnp476XMGEjGysv+T7Oz/lxSjzfc4NZ0/os+p0ZcMaJRm30ZF3h1JLeJ13lfZ1PpjgAS1SOKM4dmXMbX4h0mVlj/iXcNrjT/rzZEwYsSzAEZlL2Fp2XdRDOqBnKwD3m08XPMrVBFL0HAuzhiXM4trh/4nDtn84rffu5/fHfgdES1iqqndhJwJfH3k18lQEp966tMBbw0/2fVXArFQQoaExLicSn48/t/IsutXLz1ZB72N/Ofm++iJBo45BkaM0dmD+c30Wylwmq8pcsTbxhfXPkxb2IuWYMmQkajMLuLeeTdSmpG+bn9elcz6nU5PTuuTrWNOCCQuJNabdOf7NfjvNY3w+f6Fp+cnxCMg5hg+7+/wen9lmtHme5Kazh8Qj0wkYsQXkxbv/Rzt+plpRr1vJRtbf4hANVEQLc6o8T7H1rafY/Z+pMa3nlcbfoImYiYY8TkPet/hjaa7ECYrbh727eKB6l8QE1ETDkKcsc+ziUdr70EV5grBHfYd5p799xDWwqY76+717OV3B35HVIuasq/2N/DDHX8kEAuaYggE+zw1/HjXnwipEVOMo/42vrrxH3RH/AmdkD7GIV8T39j0D/wxc43lGgPd3LTmftrDvoROCMSThGt8Hdy05v4Ba7SX1mdbaUckrU+sRHjdcU5Iko/13YOIbEhoF4lswdPzXykxfN7fEwq9mdAuENlHTef3UyAIWn0P0OF/IfFzidaxseWHpFJ3pNb7PDXeFSYY7bzacGcKtVIFh72r2db1TELLQMzLQzW/RHDqaRFjgmC/dyvvtCZmhNUwvz3wWzShJcXQ0DjoO8gz9YkZUS3Knbv+SlSLJVW6XUPjsK+efx5JzFCFxre33E9IjSTFUIVGrb+N/939bEJbIQRf/+BxPNGgKUfneEZTsJvvb07MSCuttCOS1idWwn8fqfd1UXofbyyf715S/zNQ8Hn/mtCq1fcgqVWUBZBp9vwjoVV1z1PEowOp7LRKHOh+MGFUZE/Py2giliIDtnU+iZYgYrGp6x0iWigpB+F4vd/+MrEEEYt1Hevwq37TkZDjJRC80/YOIdU4mrC2fTudkZ6UGW+2fIAn6je0W9e+n4ZgZ1IOQp80BG+37KQ1ZNz4cFNHLQc8LSn1qFGF4L3WQ9T42pN+bFqfL6UdkbQ+kRKxeoi8S+p9XVQIv4NQG/Ut1DZCwZcsMSKRdUSjB3UtYpqHdt+zFhgagegu/OEd+s9CC1HtedZCfxqBP3qU9tBmfYaIsbPreUv9afyxDmp9H+iOa0JjbfurKTshAEHVz66e9brjQghWtqy01CgvokVY17HO0OalxtUpl1KH+HvxZov+6wB4+ujalCq9Hq/n642jho9Vb7DEUCSJ5TWbUn58Wp8PpR2RtD6REsFnsP71lCCoH+IOBp7GSlO2uJT/3969B0dR7XkA/57uyTwzk5BJJg/IE7jJKq8QIEiweMUgAhrh4rpGl9dCSQWEVQsirItVgggLWmu0AKUWULBARUUUdSPyiBQskQglYAhciEICJDzyIJNkMjNn/wiJ5F47M5nusZPh96nKH5nT6d/v5DH9y+k+58Bu/1Cy9Wb9HnB4d7+/oxhV9TskWyvq98PJO/7v2RMGERdrpb9Xv90+hgZXtcwYAk5V75Fsv1h/BtXNVbJjHL3xv5LtZfYyVDRWyCp2GBj2V+6XbL9sv4aSujJZu+lycOy9UijZXtlYg/+7cc6n0ZBWbnB8ekm62Kl22PHdlV9kxXBxjl2/FsPplreJIwlsVIiQrsn1G+QXCQzceUmy1eksg/w/ATdczt8kW5ucv4JB3qJTgAuNzWWSrbebf5Mdg8OF245fJdtrmstlbJDXGsONasdlyfYbTVdlnb81xnXHFcn2ysZKBWJwVDVJF0xXG5W5FVHZeFPydlm5/YYiMaqb69Ho+uNbWVfsNV49nOpJvdOBmuYG2echgYsKEdI1cTsg4zZACzfQwUgB5/WQX+zwthVb/4iL18v677vtPB3EcHJl3uSbW1e3/aM2d6Os2xm/n0c6V4e7UXax03oeKU3uJtnn93SeBpcyMTg4HBLPuzR4OavGG3bnH+drVzBGvVO5c5HAQ4UI6ZqYCfJ/PYU755EIIQTD94dI284CQZCeIy+yYEUu4GIHMTTM+3UtOhLEpNeVCBL0ihRUWkF6fQytoJf1DEornaDvoE2ZRbY6Oo9BVCaGAAFaicXNjBrlFgszSZzLqFFu2fVgBfMlgYcKEdIlMU2iAmfhHZ5Ho+kN+aMuAjSaJMlWfVASOLxbd0KaCENQH8lWszYBHE5ZERhEmLXS/QjVxsouEhhE9NDFS7ZH6GNknb8lhoAIXS/J9mhDtAIxGKL0UZLtMQab7BgAEGWwSi6Y18toVaTAtWrN0EnsJRNjCIVG5sOwAGDW6BCiVaZYJoGJChHSNRmmKnSeKdJNhinwfXpwKxeMpqckW8OMEyHIHrFwISL4ScnWaNMYBAlmWRE4XEiy/FWyPc40FCaNVXaMfqGTJdsTjP+EMG0k5IxScbgx3Jol2R5njEOcMU7WRZyDY4xtjGR7jCEC91t6y5o1wwBMjJbeHThcZ8GI8GRZM1oEMEyNGy7ZHqI1YHzM/fJiMIZpCUNkz+4hgY1+O0iXxMQoQDcGctYRgS4TTIyUPkK0wmB4VFYMne5BaDoYdRGFYISb/iojhgCTdhCM2vukYzAtEi1TZTxfwWAOSkSYfqB0FkxE/9DHZF3AgzU2xJmGSGfBGDLCH/H5/ABgFM24P2RYh8dk2jJl3WbSC3qkh6V3eMykmFGyZs1omAZjIzvux9S4B2TNaAGAR3t2HOPJxKGyYnDO8URCms9fT+4NVIiQLouZZsH3WyfuO1/fMVPwHPj+wKoLpuB5Ho+ymaffuYD7chF3I8ryrMejkizTwJjGxxgcfwmd4XHfnPtCH4HIdD4XI4Ot/+xx35zBPUbDIBp9LqoejJgM0cO+OenWdFg0Fgg+xGBgGBc5DjoPz4EMtw6ATRfmc4zxUSMQrOl4v5l0a18kmGw+jTYIYHg4JhVWXccjaalhsegXGgPRh40RBTCMjUpBrCms019L7i1UiJAui2mHgplf8u1rzcvAtIM9HqfVDkBIqPd7xtzNbMmDXj/a43GGoD5ItL4BXwqeKMs8hBkf9nicMSgG6ZGt/ejcRSPJ8gTizNK3TNpiaMIwqdcK/L4bsbcYUixZ6B+a7fFIg2jCjISlEJjQqWKEgeF+yzCMivAcQyto8ULyC9AImk4VCgwM91nuQ3aM5xgaQcQr/eZBL2o7FUMAQ4olEbOSPMcQmIA3Bs9AsEbfqWJEAEMfczReTPEcgzGG/GFPIkxr6lQxIjKG+GArVg72HIMQKkRIl8ZMM8DMrTvWerq9IQJgYOb/ADP9q9cxTKanEBL6X2j5c/AmBmC2LEVw8AKvY1hNjyLJ+t93vt67GDGW59ArZLHXMaJNozA8ah0EaMA8xGht7xOSg4Hhi73eRbiXaTAm9XoNGqb1IkbL28t9IY9gbLT3OxXHm5IxO/E/oRU8X8RbYwwIGYF/ift3CF5ekOOMcViSvAQG0eBFjJa8B4UOwoI+C6ARvFuzJdYYhdcHLoIlyOTxeZHWGANDk/HK/fMQJDFb5u9FG8KwYdizCNeZvYjR8tE/NB75Q/4NBi9nxdgMFmx7cDZiDKFex0i2RGHryBkwB0nPYCKkFePebrupgs5sI0wCG3cUg9dvBpoK7rzC0HLbRkDbSINufEvhok31KYbDcRK3b7+LxoY9d53bjd//+3dDp89CcPBc6HQP+BTD7jiDq7X/g5v23XdmuohoWf699WLoRoh+NCLNsxBieNCnGLWOv+F89Yf47faXcPNmMIjgcLdd7DhcsBnS0TskB9Em32JUOy7j5M1d+KXmGzi5405B0Po9a4kRYxiAgWFTkBT8oNdFyN1uOipx+PpXKLq5Dw53I4R2/WBww4U441+QEf4IBoRk+BTjluMWCq4V4EDVATS4GiBChPuu75UbbiQYE5AZmYkHrA94Xei0j1GLLysO4esrhahz2iEyAW7OwcDAWMsGcfHGGEzuOQrjbOnQCJ1/nqjaUY9dl45g16UjuOWoh+ZODKBlVMPF3Yg3RWBa7AhM7jUUWi+LqbvVNjdg58Ufsf3iMVQ11rWLITAGJ3cj1tgDOUnpmJaQBr3EbBxyb+jM9ZsKEdKtcFcl0PAZuOvXlsXKmAlMTAAMj4OJEYrEcLmuo8H+MZzO83Dz2xCYCaImHkbjNIii/CmmAOB03cL1+k/R0HwOLl4HkRmh1fREuGkqdJpYRWI4XHW4dPsr1DSdQ7P7NkRBD6MmEnHBExGslZ5G27kYdpTW7kNV0zk0uW5Dw3QwBYUj2ZKJsA6m6nYqhrsRJ6sP47L9PBpc9dAIQTBremBQ6EhEGxIUidHsbkbRzSKcu30OdpcdGqZBSFAIhoUNQ4JJqRhOHLl+EqdqzuO20w6NICIkKBgZ4alINif4VEj9PafbhcKqM/jx5t9Q19wAkQkICTJidGQ/DAxVJoaLu3Ho2jkcrjyPGkcDBMYQqjViTFQy0sMTFYlBuj8qRAghhBCims5cv+kZEUIIIYSohgoRQgghhKiGChFCCCGEqIYKEUIIIYSohgoRQgghhKim85PJ/0StE3pqa2tVzoQQQggh3mq9bnszMbdLFyJ1dXUAgNhYZdZVIIQQQsifp66uDiEhIR0e06XXEXG73aioqIDZbFZ8kZza2lrExsbi0qVLAblGCfWv+wv0PgZ6/4DA7yP1r/vzVx8556irq0NMTAwEoeOnQLr0iIggCOjVq5dfY1gsloD9BQOof4Eg0PsY6P0DAr+P1L/uzx999DQS0ooeViWEEEKIaqgQIYQQQohq7tlCRKfTYfny5dDpdGqn4hfUv+4v0PsY6P0DAr+P1L/uryv0sUs/rEoIIYSQwHbPjogQQgghRH1UiBBCCCFENVSIEEIIIUQ1VIgQQgghRDVUiAAoLS3FY489hvDwcFgsFowcORL79+9XOy1FffXVV0hPT4fBYECPHj2QnZ2tdkp+0dTUhEGDBoExhhMnTqidjiLKysowe/ZsJCYmwmAwoHfv3li+fDkcDofaqcnyzjvvICEhAXq9Hunp6Th27JjaKSli1apVGDp0KMxmM2w2G7Kzs3H27Fm10/Kb119/HYwxLFq0SO1UFFVeXo6nn34aVqsVBoMB/fv3x48//qh2WopwuVx4+eWX272nvPrqq17tC+MPVIgAmDRpEpxOJ77//nscP34cAwcOxKRJk3D16lW1U1PErl278Mwzz2DmzJk4efIkDh8+jKeeekrttPxi8eLFiImJUTsNRZWUlMDtdmPjxo04ffo03nzzTWzYsAFLly5VOzWf7dy5E88//zyWL1+O4uJiDBw4EOPHj0dlZaXaqcl28OBB5Obm4ujRoygoKEBzczOysrJQX1+vdmqKKyoqwsaNGzFgwAC1U1HUrVu3kJGRgaCgIHz99dc4c+YM1q1bhx49eqidmiJWr16N9evX4+2338Yvv/yC1atXY82aNcjPz1cnIX6Pq6qq4gD4oUOH2l6rra3lAHhBQYGKmSmjubmZ9+zZk2/atEntVPxu7969PCUlhZ8+fZoD4D/99JPaKfnNmjVreGJiotpp+GzYsGE8Nze37XOXy8VjYmL4qlWrVMzKPyorKzkAfvDgQbVTUVRdXR3v27cvLygo4KNGjeILFy5UOyXFLFmyhI8cOVLtNPxm4sSJfNasWe1emzJlCs/JyVEln3t+RMRqtSI5ORnvv/8+6uvr4XQ6sXHjRthsNqSlpamdnmzFxcUoLy+HIAhITU1FdHQ0JkyYgFOnTqmdmqKuXbuGOXPm4IMPPoDRaFQ7Hb+rqalBWFiY2mn4xOFw4Pjx48jMzGx7TRAEZGZm4siRIypm5h81NTUA0G1/XlJyc3MxceLEdj/HQPHFF19gyJAhmDZtGmw2G1JTU/Hee++pnZZiRowYgX379qG0tBQAcPLkSfzwww+YMGGCKvl06U3v/gyMMXz33XfIzs6G2WyGIAiw2Wz45ptvAmIY7sKFCwCAV155BW+88QYSEhKwbt06jB49GqWlpQHx5sg5x4wZM/Dss89iyJAhKCsrUzslvzp//jzy8/Oxdu1atVPxyfXr1+FyuRAZGdnu9cjISJSUlKiUlX+43W4sWrQIGRkZ6Nevn9rpKGbHjh0oLi5GUVGR2qn4xYULF7B+/Xo8//zzWLp0KYqKivDcc89Bq9Vi+vTpaqcnW15eHmpra5GSkgJRFOFyubBy5Urk5OSokk/Ajojk5eWBMdbhR0lJCTjnyM3Nhc1mQ2FhIY4dO4bs7GxMnjwZV65cUbsbkrztn9vtBgAsW7YMU6dORVpaGjZv3gzGGD7++GOVe9Exb/uYn5+Puro6vPTSS2qn3Cne9u9u5eXlePjhhzFt2jTMmTNHpcyJt3Jzc3Hq1Cns2LFD7VQUc+nSJSxcuBDbt2+HXq9XOx2/cLvdGDx4MF577TWkpqZi7ty5mDNnDjZs2KB2aor46KOPsH37dnz44YcoLi7G1q1bsXbtWmzdulWVfAJ2ifeqqircuHGjw2OSkpJQWFiIrKws3Lp1q90WyH379sXs2bORl5fn71R94m3/Dh8+jLFjx6KwsBAjR45sa0tPT0dmZiZWrlzp71R95m0fn3jiCezZsweMsbbXXS4XRFFETk6Oan9cnnjbP61WCwCoqKjA6NGjMXz4cGzZsgWC0D3/j3A4HDAajfjkk0/azd6aPn06qqursXv3bvWSU9D8+fOxe/duHDp0CImJiWqno5jPP/8cjz/+OERRbHvN5XKBMQZBENDU1NSurTuKj4/HQw89hE2bNrW9tn79eqxYsQLl5eUqZqaM2NhY5OXlITc3t+21FStWYNu2baqMSgbsrZmIiAhERER4PM5utwPAP7ypC4LQNprQFXnbv7S0NOh0Opw9e7atEGlubkZZWRni4+P9naYs3vbxrbfewooVK9o+r6iowPjx47Fz506kp6f7M0VZvO0f0DISMmbMmLYRre5ahACAVqtFWloa9u3b11aIuN1u7Nu3D/Pnz1c3OQVwzrFgwQJ89tlnOHDgQEAVIQAwbtw4/Pzzz+1emzlzJlJSUrBkyZJuX4QAQEZGxj9MuS4tLe3y75nestvt//AeIoqietc8VR6R7UKqqqq41WrlU6ZM4SdOnOBnz57lL774Ig8KCuInTpxQOz1FLFy4kPfs2ZN/++23vKSkhM+ePZvbbDZ+8+ZNtVPzi4sXLwbUrJnLly/zPn368HHjxvHLly/zK1eutH10Vzt27OA6nY5v2bKFnzlzhs+dO5eHhobyq1evqp2abPPmzeMhISH8wIED7X5Wdrtd7dT8JtBmzRw7doxrNBq+cuVKfu7cOb59+3ZuNBr5tm3b1E5NEdOnT+c9e/bkX375Jb948SL/9NNPeXh4OF+8eLEq+dzzhQjnnBcVFfGsrCweFhbGzWYzHz58ON+7d6/aaSnG4XDwF154gdtsNm42m3lmZiY/deqU2mn5TaAVIps3b+YA/vCjO8vPz+dxcXFcq9XyYcOG8aNHj6qdkiKkflabN29WOzW/CbRChHPO9+zZw/v168d1Oh1PSUnh7777rtopKaa2tpYvXLiQx8XFcb1ez5OSkviyZct4U1OTKvkE7DMihBBCCOn6uu+NZkIIIYR0e1SIEEIIIUQ1VIgQQgghRDVUiBBCCCFENVSIEEIIIUQ1VIgQQgghRDVUiBBCCCFENVSIEEIIIUQ1VIgQQgghRDVUiBBCCCFENVSIEEIIIUQ1VIgQQgghRDX/D5jrWwVsUUF3AAAAAElFTkSuQmCC", "text/plain": [ "<Figure size 640x480 with 1 Axes>" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "plotPinPow(fuelBlock)" ] }, { "cell_type": "markdown", "id": "89553e58", "metadata": {}, "source": [ "As expected, our pin powers have rotated 180 degrees, with the maxima now in the south west direction. So what changed: the locations of pins or the pin power data array?\n", "\n", "This introduces the second key concept: with limited and documented exceptions, `Block` parameter data are **not** modified during rotation, the **locations** of objects within the `Block` are updated. See a discussion in \n", "\n", "If we compare the post-rotation pin powers and pin locations, this is confirmed." ] }, { "cell_type": "code", "execution_count": 15, "id": "5971c84d", "metadata": {}, "outputs": [], "source": [ "assert (fuelBlock.p.linPowByPin == pinPowerBefore).all()" ] }, { "cell_type": "code", "execution_count": 16, "id": "6bd98651", "metadata": {}, "outputs": [], "source": [ "assert (getPinRingPos(fuelBlock) != ringPosBefore).any()" ] }, { "cell_type": "markdown", "id": "1dd38ce3", "metadata": {}, "source": [ "## Component-level powers\n", "This gets a little trickier to explain because, in our example here, one fuel `Component` occupies the entire fuel lattice. Cases where that may not be the case can follow a similar pattern.\n", "\n", "The connection between block level pin powers and the related components is the `Circle.getPinIndices()` method. For a block with `N` pins, a given pin component will have a multiplicity of `M <= N`. `Circle.getPinIndices` will return an `(M, )` vector of integers that translate between the component and block level data.\n", "\n", "For the `k`-th pin reflected in `Circle`, with `0 <= k < M`, `kx = Circle.getPinIndices()[k]` is the index in parameters like `Block.p.linPowByPin[kx]` for that particular instance of the pin. And this `k`-th instance of the pin is spatially located in `Block.getPinLocations()[kx]`.\n", "\n", "To demonstrate, we'll present the trivial case for a singular fuel `Circle` occupying every lattice site in the grid. Here, we would expect the `.getIndices()` to return what is essentially a `numpy.arange` vector, since every position `[0, N)` is held by this fuel pin." ] }, { "cell_type": "code", "execution_count": 17, "id": "39131a19", "metadata": {}, "outputs": [], "source": [ "from armi.reactor.components import Circle" ] }, { "cell_type": "code", "execution_count": 18, "id": "9fed4c75", "metadata": {}, "outputs": [], "source": [ "fuelPin: Circle = fuelBlock.getComponent(Flags.FUEL)" ] }, { "cell_type": "code", "execution_count": 19, "id": "fb292c1d", "metadata": {}, "outputs": [], "source": [ "fpIndices = fuelPin.getPinIndices()\n", "assert (fpIndices == np.arange(0, fuelPin.getDimension(\"mult\"))).all(), fpIndices" ] }, { "cell_type": "markdown", "id": "5164c1ec", "metadata": {}, "source": [ "To help illustrate how to map between component data -> block data -> spatial data, let's plot pin power assigned to just this component." ] }, { "cell_type": "code", "execution_count": 20, "id": "cf7c081c", "metadata": {}, "outputs": [], "source": [ "from matplotlib.colors import Normalize\n", "\n", "\n", "def plotCompPinPow(c: Circle, **kwargs):\n", " blockLinPowByPin = c.parent.p.linPowByPin\n", " xs = []\n", " ys = []\n", " ps = []\n", " myIndices = c.getPinIndices()\n", " for k, loc in enumerate(c.spatialLocator):\n", " x, y, _z = loc.getLocalCoordinates()\n", " xs.append(x)\n", " ys.append(y)\n", " kx = myIndices[k]\n", " ps.append(blockLinPowByPin[kx])\n", " # normalize the color scheme against all the pin powers in the block\n", " # not just those for this pin\n", " norm = Normalize(vmin=blockLinPowByPin.min(), vmax=blockLinPowByPin.max())\n", " kwargs.setdefault(\"s\", 150)\n", " return pyplot.scatter(xs, ys, c=ps, norm=norm, **kwargs)" ] }, { "cell_type": "code", "execution_count": 21, "id": "1cd268d1", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "<matplotlib.collections.PathCollection at 0x1baf958fc50>" ] }, "execution_count": 21, "metadata": {}, "output_type": "execute_result" }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiIAAAGdCAYAAAAvwBgXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzsnWV4HEe2hr/qGY2YyUKLbEu2ZElmZmZ2Yggzb7Ib3DDtZpPdwIY2aCdOzMzMLLDAtmRZFlnMIw131/0xlq9B3dOgJE7S7/PoZq/rTH+DXadOnTqHUEopVFRUVFRUVFR+A5jf+gmoqKioqKio/HlRHREVFRUVFRWV3wzVEVFRUVFRUVH5zVAdERUVFRUVFZXfDNURUVFRUVFRUfnNUB0RFRUVFRUVld8M1RFRUVFRUVFR+c1QHREVFRUVFRWV3wztb/0EhOA4DuXl5fD09AQh5Ld+OioqKioqKioioJRCr9cjNDQUDCMc87ilHZHy8nJERET81k9DRUVFRUVFRQalpaUIDw8XtLmlHRFPT08A9hfi5eX1Gz8bFRUVFRUVFTE0NzcjIiLi6jwuxC3tiLRtx3h5eamOiIqKioqKyu8MMWkVarKqioqKioqKym+G6oioqKioqKio/GaojoiKioqKiorKb4bqiKioqKioqKj8ZtzSyaoqKjdSVtuIdSdzUVLTiFazBe7OOkQG+mBm/0SE+3t3iEZFfTPWH85BcVUDWk0WuDk7ITzQB9MG9UDnYN8O0aiu12PzgVxculyHFqMFrs5ahAR4YfKwHogJD+gQjfrGVmzdk4OLxTVoaTXDxdkJQQGemDiyB+KigzpEo6nJgO07s5FfUIXWFjOcnbXw9/fEuDE9EN8tpEM09M1G7N6WhfNny9HaYoJOp4WfvwdGjktE96TwDqkx1Npiwt7NmTibWYKWZiOcdFr4+ntg+ISeSOoT1SEaJoMZ+9anI+fERbQ0G6DRauDj74Ghk1OQPLiLw1oLYjAbLTi4/hTOHDyP5oZWaDQMvAM8MWhyKvqMSewQDYvZisNrjuP0rjPQ17WAMATe/p4YOK0v+k/uBY1Wo1hD5c8FoZTS3/pJ8NHc3Axvb280NTWpp2b+5BzPK8b3+9JwLK8YDENAKcBRCoYQEAJwHMWg+M64a1Qf9O8SKUsjLb8MS3eexuGcS/aJ54oGIQBDCFiOom98BO4Y2xuDE6NlaeQUlOPHLWk4eLrg6r/dqJHSLQy3T+yN4X3iZGnkXazCz+tPYv/RfHAUINdqMAxYlkP3LiGYN603Rg3uJmuSvXSpBj+vOoF9+8+BZTkQQsBx12t0iQvG7Jl9MHZ0DzCMdI3S4lqsWnYMe7Znw2Zj29WIignEzPn9MW5yMjQa6ZNsRWk91iw5hJ3rM2CxWMG0oxEeFYAZiwZhwqw+0DpJn2Rryhuw5n/7sGP5cZgMFjAaBhzLAQTQaBiwNg6dIv0x/e5hmLRoMHTO0teHdZWNWPvpLmxbehCGZmO7GoFhfpj2wChMvW8kXNycJWs01jRhzX+2YMuXO6FvaIVGa78uAGi0GrA2Fn4hvpj2yHjMfGIS3DxdJWuo/HGQMn+rjojKLQ2lFF/tPon/bj0KDWOfqPloG39y8mDcM7qvpAl22e50fLDqgEMNhrFPVPdN6o+Hpw2UpLF+Xxbe+3YPCIGwBiHgKMVtE3rhiQXDJU3iuw6ew9sfbQUIwLKOX8fUsUl4+sGx0EqYxA8fycfrb28EpRQsy/HaEUJAKcWoEQl47q+ToNOJn2BPH7+I159fCZuNFXwdhACUAgOHdsULb8yCi4uTaI3s05fwymNLYTHZHLwO+39TBsTi5f8shJu7+Ek8L7MYf1/8JQwtJrtjwCtidxi794nBq9/cB08fN9EahdmleGnOf9BU1yKsAYAwBLFJkXhr1ZPwCRR/Ty05fxnPjXsT9RUNojQi48Pwjx1/R0CYv2gNlT8WUuZvNUdE5Zbm692n8N+tRwEIT97Xjn+05Qi+23tatMbyvRn4YNUBURrclfGvt57AZxuOitbYfCAH//hmNzhKHWtcWRss356OD3/cL1pj35E8vPGfLWA5Kjh5A///Ojbvzsa/PtsJseuR4ycv4pU31l9xEIQnpLZr7jtwDu+8t/mqpiPOpBfh5b8uh8Vic/g62p72iSMX8OaLq66u0B1xPqsULz74HUxGq4jXYf87c6IQrzyyFBaLTZTGpXPleG7+pzDojQ4nb1zROJdehJcWfQ6T0SJKo6ygEn+d8p4oJwQAKEdRmFOKZ6e9j9ZmoyiNquIaPD3sZTRUOnZC2jTK8svx9PBX0VynF6Wh8udGdURUbllOXCjBJ1uPyHrsh5sP43RBmUO77EsV+NeK/bI0vtl2EoeyCx3aFZTW4J1vdsnSWLkzA7uOnXdod7miEW/8ZwukboBQCmzdm4NNu7Id2tbW6fHqG+sBSAuiUgocOJiH1WtPObRtbjLilb+tAMdRSInVchzF6eMX8dOSQw5tTQYLXn5kCVgbByrSOWrTyM0oxpKPHX+WFrMNL9/xJawWm2gHDAA4lsPFnDJ8+dpah7Ysy+HleR/B1GoW5SBcq1F2oRIf/2WpQ1tKKV6Z/k+0NLaKdvIAgLVxqCquwT/u+ET0Y1T+vKiOiMoty9J9adDIyC0A7Ns0S/anObT7aXe6rPwFwL69sXSnY43VOzMlOwhtEELw4xbH0Z31OzLBUSrRRfh/fl5/ymFUZMvWM7DZWEkOwrWsXHPKYfRh55ZMmIxW0RGaa6EUWLfipMOIxd4tmdA3GSU5CP+vQbF5xQkYDWZBu6Pbz6CuqkmSg9AGx1HsXnUKzQ2tgnand2Wj4lKNPA2Ww8H1p1FzuV7QLuvgWRRmFUtyQq7VOLUtA2X55ZIfq/LnQnVEVG5JLtc34fC5IofbGHywHMXBs4WoaGjmtalrbsXu9AuyNTiOIi2/DIUVdbw2LQYzth4+K1uDUoq8omqcK6zktTGbrdi4M0vWxNpGWUUDMnNLecdtNhbrN2Uo0qira8GJk/wRJI6jWL/KsUMkRIvehMP7zvGOU0qxftkxKDkEYzZZsXfzGUGbDd8dku3gAvZox86VJwRtNn61F4yMBN02CIDtS4UjSBs/3Q6NVr4Go2Gw+Yudsh+v8udAdURUbknWn8hVdCMHAAKC9Sdyece3HD8ne3XfhoYh2HCEX2PXsTxYbaxijY37c3jHD54ogEFkToGgxs4s3vGTpy6hsdGgSINhCDZtyeAdz8ooRnVlk2KNzev4o1QXzl5GycVqRZ87IcAWASeh7GIVzqcXKXLaKKXY8gP/tmTN5Xqk7cuVFQ1pg+MoNn+7n3e8uV6Pw+tOyoqGXNVgOWz9Zg9Yhb8BlT82qiOicktSWtuk2EkgBCir45/YSmsaFTs7HEdRVsOvUVbVAI3C2g0sR1FS2cCvUdEg6+jqTRqX+TUulzd0yHtVWsavUSGgL0Xjcin/dkOFwJhYKBW+Tnkxf4RMCtVl9bzRocqiWqmpOu3SVKuHmceJrS6uVeTotGHUm9Bc36L4Oip/XFRHROWWxGCxXD09IheOozCYrfwaJquiVStgnwtajPz5AkL6UmgRyEkwmqyyc1CuxSDwOoxGS4cU9TIKRG6MBotiZwcATCb+99xoUBY5asMsoGFqFc4fEQvHUVh4vj/GVlOHaACAsaX9a/H9uywNvbgTOip/TlRHROWWxN1ZB0bhxMcwBG7O/HUl3F10HbD9A3i48teVcHPRKbp+G54CBahcXZw6YnEMdyENV52i3I023Nz43w9XN51ixxCwP1chjY7AWUDDRUKdESEYDQMdz/fXzcOlQzQA8BYec/XsQA0vtbiZCj+qI6JySxIVpLyUOqVAZ4HrdA72lZ1E2gbDEHQO9uHXCPGFTWF4W8MQRIX58Y5Hhvk5PI0iSiOCv/hURISfYidBwxB0juQvXx8eqbz4FcMQRETxXyc8Snn5fEIIwjsLaMQEKtYAAUIi/XmjUCExQR0SofIL9oaOpwhccOfADinX7u7tBk8/D8XXUfnjojoiKrck0/v16JDrzBC4zqQBCbKPB7fBchQzhiTxjo/u3w0uEiqK8mlMH9GTd3xo/zh4KFyFsxzFtHHJvON9e0fD31/ZZMJyFNOmpPCOJyZHICTMV9GJFo6jmDqzD+94bHwoYuNDQBR87pRSTLmtP+94aFQgEvvHgtHI1yAAptwxhHfcv5MP+o1LUnZqhiGYct9I3nFPXw8MnzdQ8amZyQ+MhUaj9p9R4Ud1RFRuSTr5eGJYj2hFdURGJcUiyJt/8vT1cMW4Pt0UafRPiERkkA+vjburDpOH9ZCtwRCCHrGd0KUz/ypb56TF9HHJsreZCAE6h/shKT6U10ajYTBjaqqi7bKgIC/06c3fo4cQghnz+sm+PgB4+bhh0PBugjbTFwyUVMjsRlzddBg+gd8xBIBpdw4F56AqrBBaJy3GzOkraDP1vlGKkkkJIZiweKiwxsPjlZ2a4ThMeXCs7Mer/DlQHRGVW5Y7R/SWvR3AcRR3jOjt0G7hmFTZp3NYjuKOcfyr7zbmjksFCJGVUMpRikVThCckAJgxIRlaDSMrmkApsHBmP4eh/kkTk6Fz1sreErhtbj+HztLYiT3h7uEi26mafVt/aB1sJwyf2BO+/h6yNAgBpi0YCBeBHBEAGDg+CUHhvrIiFoQhmLhgIDy8hfvN9BrVHZHdQmRpMAzBqHkD4Bcs3LG6x6Bu6NY3DoyMqAijYTB4ej+ExARLfqzKnwvVEVG5ZekdG46/zhgu67HPzRyBlGj+FX4bCZHB+PuiMbI0Hp0+GAO7d3ZoFxXqh9cemiArofSOqX0xsm8Xh3adgrzxxt+mAYBkZ2TmxBRMGOl4K8zP1x1vvz4bDCNNgxBg3JhEzJjWy6Gth6cL3vrgdjAaRpKjQAjB4OHdMH/xYIe2zi5OeOuLO+Gk00rSYBiC1IFxWPzIaIe2WicN3lr6EFxcdZIcBYYhSOgVhXtfmi7ClsEbK5+Eh7ebNA0Ng+jECDz2r4UObQkheG3d3+AT6C3JGWE0DMK6hOBv3z0i+jEqf15UR0Tllmbx8F54dsZwEMDh9oaGsUcdnp85AguGpYrWmDEkEX9fNAYMIaI0AOCJmUNwz0THkYo2xg6Mx+uPTISGEa9xz4z+eHgef57AjQzuG4u3npsOjYZxqNE2Ac+b2htP3jtKdJSjV2pnvPPmHOh0Wmgc5EC0aUya0BPPPjNRtEb3pHD846OFcHZxcphn0aYxfEx3vPDGLNGORWx8KN779l64ebg41GjLJ+k/PB6vfLgQWidx+Q4RccF4f80T8PJ1F62RMrgr3lz6IHTO4vKKOkUG4INtz8G/k4/D106I/S+hbwz+seEZ0ad7AkL98OHhN9Gpc6Dj95fYnZfY5Ch8sP91uHu7i9JQ+XNDaEecyfuFkNJGWOWPTealcvywPx17sgsA2G+oHKVgCLm6tTImuQsWD09FcpTjSEh7nC2qxI970rHr9AVQSkEIAUc5uwbsSYrDe8Zi4Zhe6N01XJbGheIaLN+ehh3HzoNlOTAMA477fw2OoxiUHIX5E3qjf5LjaEt7XCqtxapN6dixPxdWG3tFg17J77B3/+2T3Blzp/TCoD6xsjTKLtdjzbo0bNuRBYvZBkbTpgGAACxL0TMpArNn9sbQwV1lbedUVjRi3YqT2LYxAyajBZo2DQYACFiWQ0JiOGbM64sRY3rI0qitasKGZcewdfUptOpN0GjtGoTYK/OyLIcu3cMwfeFAjJycLKtwXEONHhu/P4gtPxyBvtFwvQYhYG0couJDMP3uYRgzp59oR+damur02PT1Pmz+Zh8aa/TQaDWgHGffEiT2JnQRXTph2gOjMX7xEN5jwUK0NLZi0+c7seHTbagrb7BrXPnx2V8Hi5DYYMx4bCImPzAGzgLH2lX++EiZv1VHROWWwGKzASDQOdjfr2lqwcZTZ1FS24RWkxnuLs7oHOiDaX27I8BLePVl1wB0WuHVZn2zAZuPn0VRZQNaTWa4uegQFuCNqQO7I9jXU/CxVhsLjlI4OwlrNOmN2HbkHArLatFqtMDV2QmdArwwaWh3hAYK79vbbCxYjsLZwWkcfasJOw+cxcWiWrQazHB21iLI3xPjR/RARKjw8WibjQXLctDphHNCDAYzdu89hwsFlWhpNcNZp0VAgCfGju4ueFQXAGwsB5uVhbODvBOj0YL9u3KRf64cLXoTnHRa+Ad4YOS4RMTECecfsDYOVptjDYvZioM7c3A2oxitehO0Thr4Bnhi+PgkdOkRJqzBcrBabHB2cRLUsFpsOLI9C9nHC9DSZISTkwbe/h4YMjkF8amdBR/LshysZhucXYU1bFYbjm87g8yD56FvaIVGy8Db3wODp/RCj4FdBB/LcRwsRiuc3XQOnguLk1szcHpHJvQNLWAYBp5+Hhg0vS9SRiY61DAbLXBxc+6Q48cqty6qI6Jyy0MpxfHCUiw7mYlDF4pgudKLwkWrxaj4GCzon4JekaGKblaUUqQVXsbyo2dw4GwhTFa7I+Ks1WBIfDRuH5yMfnERijWyiyqx8sAZ7DlzAaYrnV+dtBoMjI/E/OEpGBDfWXHhtHNFVVi9JxO7T+bDeKXappOWQe/4CMwdk4pBPaMUl5IvKK7B2h2Z2H34PFqvVEDVahgkxYdhzsRUDOkTC63CUvLFJXXYsCUDO/fmoqXFXoFUo2GQ0C0Es6b1wtBBXeEkIyJwLWWl9di8IR07t2ehudl4VSOuSzBmzO6L4SMSRG998FFZ3oCtq09jx4YMNNbbu+QyDEF0106YPr8fho9PdJjQ6oiaikZsW3ECO1acQH2tHqAAoyGIiA3GtMWDMHJaL7gqPLZdX9WE7cuOYNvSQ6itbLRrMAShMUGYcvdwjJk3AO4Ki5E11jRj+3f7seWr3agurQPlKAhDEBIdhCkPjsG4O4bDS60z8odDdURUbmkOXSjCm1v2orS+CRqG3FRUrO3fYgP98NrU0egTJX0b5FRBKd5cuweXqhsENSL9ffDS7FEY1FX6NkjWpQq8+dNuFJTXCmqE+Hni2bkjMaKn9G2Q/OJqvPXtTpwvrm5Xg2EIOI4i0NcDT902HGP7Cx9dbY+isjq8+/kO5ORXCGr4ebvh4UXDMGmE9Bovl8sb8N6H23Emu/Tq9drT8PJyxb2Lh2D6FPE5Pm1UVzXjg/c2I+3UJUENdw9nLL5zKGbPc3xS6Ebqa/X48M2NOHEoHwy5WYMwBJSjcHXTYd7dQ3DbPUPBSHQQmxta8fHLa3B0R459C/JGDWI/6eTs6oSZdw/DoifHSd4yam024tPnf8aB9acBerNG2xEvnbMTptw9HHe/NEPylpGx1YTP/rIEu388DI7jbj4yfWX7S+OkwcR7RuLB9xZC10GViFV+e1RHROWWZW16Ll7esAuUUoenSBgCMITBv+ZMxITErqI1tmfm4fmftoFSCkenf9tyAd6YNxbT+4qfYA9kX8TfvtoMlqMOe+LYMzOAF+aPwrxh/EXDbuTk2RI88+F6+3aPyGPMj88bisWTxCfRZp2/jGfeXgOTxSZa4+45A3DffMenU9rIu1CJv764EgaDWXQl27kz++CR+0eKdhSKLtXgr0/9CH2zEazI+h2TpqbgqWcmiY5WXS6pw7MPfI/62hbR9TtGTkzC396YKbpCaXV5A55b+AWqyxtFawwY3R0vfrIYTiIL59VXN+H5WR/icmGVqFonhBCkDO2GV5c+LFja/lqa61vw/IR3UJhVLOp7RRiC7gO64O1Nz/GWnFf5fSFl/lZPzaj8auw9fxF/X78TnAgnBAA4Ctg4Dn9dvRXHC0tEaRzLL8Zzy7ZdcRAc21NqT3p9eeVOHDhbKEojs7Acf/1qM2wsJ6oxX5vFuyv2YkdaniiNvOJqPPOfdbBYxTsIAPDJykPYeDBHlG1RWR2eeXstTGZpGt+tPo6VW9JF2VZUNuKvL65EqwQnBABWrTuNH5cfF2VbW9OMZ/+yDM1N4p0QANi6KRPf/G+fKNvG+lY8/+AS1NfqJRUR27c9G5+9t01Un56WZiNevPMr1EhwQgDgxN5z+M8Lq0RpmFrNePm2/+JyYbXogmuUUpw5nId/PvStqFYCFpMFL8/4FwqzS0R/ryhHce5EAd6Y9x/Yrmyhqvx5UB0RlV8Fs9WGF9bukPVYSoHn1+wAywnfBG0shxd/3g5xbs6NIsBLy3dcTWjlfy4ULy/ZDo6TrkIAvP7jLhhMwh1gKaV44+sdsLKcrGJr/1y6G40tjrudvve/XTBZrLK6HH+yZD9q6vQO7T76bDcMBrOswnTfLD2Essv1Du2+/GwPmpoMsjRW/HQMF/IrHdot+WwvamuapVdLpcDmVaeQk+HYkV7+2R5UlNRK7htEKcW+Dek4deC8Q9s1X+xG0bnLkiuychzFse1ncHiTYwd005e7cf5kgXQNlkP6nhzs+uGQpMep/P5RHRGVX4XtufloNpllFfXiKEWVvgUHLxQJ2h04V4havUHW5E0BNBlM2H3leDAfJ/NKUVbbJGvypgCMFiu2nRaeMM5eqsSF0hrZVWVtLIcth3IFbS6V1uHMucvyG9kRYOOebEGTiqomHD9VKLuxIMMQbNyaKWjT0NCKg/vPSYqEXItGw2DThjRBm1a9Cbs3Z8ou2a7RMNi04qSgjdlkxbblJ2RrMBoGm344Imhjs7LY/O0B2Z85wxBs+na/oA3Hcdjw6Q7ZnZoJQ7D+v9s7pNOzyu8H1RFR+VX48Ximoj4lGkKw7ESmoM3Ph5VpMITgp8PCGisOZipqlEcI8NO+DMEb7ao9ZxRpUAqs2J0pOOGs26nsdXAcxdodmbBdOe3UHpu3nVF0WojjKDZvz4LJZOW12bYlEw4CZYKwLIddO7LRojfx2uzecgYWi/ztApblcGjPWfvJFx4Obj0DQwv/c3AEx3I4fTAPlaX8EaQTO7PQKPAcHGpwFLknLqL4fDmvTcbeXFQW1UDWigP2LZpLOaU4f+qizGep8ntEdURUfnGqmluQU14lK4rQBkspjhQUo9Xc/rZGs9GEEwWlijQ4SnGmuAI1zS3tjlttLA5kyV/hA3YnobCyHiU1jTzjFLtP5inSAIDKumbkl1Tzju86fF6xRmOzEdl5/JPS7n1n5UdcrmAwWJB+pph3fN/uXMWrZ6uFxckT/BPf/h3CkR8xcByH4wf484MObjmjqCMwYHekjwg810Mb0xV1BAbskZdDAtszB9ccV9StFwA0Wg0OrhaXH6Tyx0B1RFR+cepaDR12rQZD+7kPDSJyIsRSz3OtplaTIkfnWhr07b8nJosNFit/lEGSRnP7GhxHoW+Vv/oWowEATc0d85k0NvJrNDS0Kr4+IQSNAtdpqG2RvcJvQ8Mwghr1Nc2KOgID9t4zTfUCGtVNijoCA/atkyaB3KDG6mZF3XoBuzPeVNOs6Boqvy9+cUfk8uXLWLRoEfz9/eHq6oqkpCScPn36l5ZVuYVwlGQqBStPAlxHath4rtWhGnyvQ0Fbd7EalFLZHYdv0hCYdJS0qL9OQ+A6cnNDroUQ4dfRIZ/JlTLrvBoKJ+//vw6/E8t2hINLKWxWodfRURod44yr/D74RR2RhoYGDB48GE5OTti2bRvOnj2LDz74AL6+wuWlVf5YeLp0XM8Jb57+FV6uLh2m4cWn4dZxGp4813Jz0Ununsur4d6+hkbDwEVGr5F2NTz43xM3hVU/xWh4eCjX4DgKT09+DU+FlUUBgGMpPLz4Nbx83ZRrUAoPb/7n6uXvcbVQmXwIPHz4n6unr4ekTsDtKjAMPHzVZnl/Jn5RR+Sf//wnIiIi8N1336Ffv36Ijo7GuHHjEBsrr9GWyu+TcB9vBHgou9ESAFH+PvB1a/9G6+/phgh/b8X32SAvD4T6tl98x9XZCV3DAqFwKx/ebi6IDm7fGWcYguQuYYpLwrs6O6FrZCDveK8e4YqSVQFAq2XQPa4T73jv5M6ymsRdCyEEid35e7307hPtsAuwGJKSI3nHUvvHKP48KKVI6hXFO548IE6xBsdySOwbwzvec2BXEIW/ENbGImlgF36NYQmKI2GsjUXPoQmKrqHy++IXdUQ2btyIPn36YO7cuQgKCkJqaiq++uorXnuz2Yzm5ubr/lR+/2g1DBb0S1F0ogUAFg1I5a20SQjBgiEpiq7PEILbhyQL9my5fUSKqEJpQhpzhvaETqAp3rwxqYqSPDUMwdShPeAmUC579oRURcmqGoZg3JAEeAtUwZwxNVXRtoZGQzBkYBwCA/gbDU6d0VvR9gzDEPTqHYXwCD9em8lz+ir6PAhD0K1HGOLiQ3htJszrJ/v6gH17KSI2CIl9o3ltxtw2QFZn32sJDPVFn1HdecdHzB8IF4VRKq8ATwyeIb46sMrvn1/UESksLMTnn3+OLl26YMeOHXj44YfxxBNPYMmSJe3av/vuu/D29r76FxER8Us+PZVfkTm9ExVtOei0GkxPFl4lTevTHU4iS2m3B0MIZvZNFLQZ36cb3BRsa1BQzBqcJGgzolcsfDzkbwewHMXsUcKl5PslRyFYYIIXozFrfIqgTWL3MERF+sv+3FmWYubUXoI2sXHBSOguvzkix1FMn9VH0CY0wg+9BsTKjlhQjmLGggGCNv7B3hg0LlH2tgalwPQ7hwi+D54+7hg5u6/sKBVhCKbdN0Kwd46ruwsm3j1S9utgNAymPjBGdLl6lT8Gv6gjwnEcevXqhXfeeQepqal44IEHcP/99+OLL75o1/6FF15AU1PT1b/S0tJf8ump/IoEerrjnsHCN3whHh0xAB4Ock28XF3w0FjhG74Qd4/sA39P4S0kV50THpsmvs/KtRAAtw1PQai/cN8FrVaDx+cPladBgMlDuiM61F/QjmEIHrtjuCwNhhAM798FCQLbMvbnQvDwfSPlaTAEfXpFIVVgy6SN+x4aJcvZYRiCHonhGCCw1dDGnY+OAsMQyTqMhkFst04YMoY/itDGgsfHQqtlJGtoNAzCYwIxekZvh7bzn5wAnauT5KPCjIZBYKgvJiwa4tB29lOT4e7tJtkZYTQMvAO9MO2RcZIep/L75xd1REJCQtC9+/U/wISEBJSUtF/u2NnZGV5eXtf9qfxxeGr0YEyU0LyujXm9k3DfUHGh2vtG9cXsfsJRjfaYnBqPx8YPEmV72/AULBwlvFK/EQJgeM8YPD1L3OQ/dWgi7psuzakiBOiTEIkX7xoryn7UwG54dPEwSRoMIejepRNefXyiKPv+fWPw1CPins9VDYYgJjoQb7w0XVSkIzmlM/72wlR7A0OR8yvDEISF++HNf8wTVfciPjEcz787F4QQ0ZM4oyEIDPbCW/9dBJ2IFX50txC8/NmdYDSM6OiLRsPA298Db39/P1zcHDekC40Owms/PAKtk0Z0TRFGw8DD2w3vrHoSHt6Oc72CIvzx9qZnoXNxEu2MMFoGru7O+MeW5+Eb5C3qMSp/HH5RR2Tw4MHIy7u+iE9+fj46d5becl3l9w/DEPxrzkTcNcg+iWsEZg2G2Fefj44YgNemjRYdeieE4NW5Y/DgmP4gV67DR1uy5t0jeuOd2yeIvvkTQvDMrGF4fPpgMISI0pgztCf+dd9UaCWsEh+YOQjPLBwJhhGnMWlQd3z49ExJ21MLpvXFCw+Pg1bDCL7HbRrD+8fh41fmwlnC9tT0Kal4+bmpcHLSCDoKbVsGfXtH4+N/LYCbm/hcg7Hjk/D6246fV5tGz+RIfPL5XfCScCJm6JjueOvTxXC90oGW77W0aXTtHoaPf3gAfhK2wPoMj8c/fngQ7ldO8fA5PW0TfOeuwfh43RMIChV/ErHnoK7414Zn4OXrIUojLDoIH+94HmExQaI14vvG4aODr8Ovk8911+LTCAr3xydH30J0kuMImMofD0J/waL+p06dwqBBg/D6669j3rx5OHnyJO6//37873//w8KFCx0+XkobYZXfF0V1DVh5Khsr07Jvqpbq5eqM2/smY26fJIT5yP/cL9c3YdXxbKw8lgW90XzdmIeLDnP6J2HewJ6ICPCRrVHVoMeaI9lYdTALja3XF/BydXbCzEGJmDOkJ6I78SdDOqK2sRUbDmZj9Z5M1DVdX9zLWafF1CE9MHtUMmLDA2RrNDQZsGVfDlZvz0BN3fWVZZ20GkwYloCZ41PQLSZYtkaz3ogdu3OwZkMaKquuT0TXahmMHp6AGVN7Ib5rJ9k5Hy0tJuzekY11a07hclnDdWMaDcHQ4QmYPqsPEpPCZWsYDWbs3ZqF9T+fQElhzXVjhCEYPDIeU+f3R3KfKNkaJqMFBzZnYuPSwyg8V3G9BiHoNzIBUxcPQurgLoI5G0JYTFYc3pyODV/vQ37GDdVrCdB7eHdMvXcE+ozuITuvxGqx4cj6U9jw2Q7kHs2/aTx5RHdMf2Q8Bk7pBY2C/C6VWw8p8/cv6ogAwObNm/HCCy/gwoULiI6OxtNPP437779f1GNVR+T3BaUUudXVKG1sQqvVCk+dDjF+fugSwJ+vYLLakFVWiSajCYQAPq4u6BneCTotfyj7fFUNiusb0Wq2wN1Zh85+PogP5j+qarHZkF1SiUaDCaD247OJkZ3gInBypaCqFoXVDWg1W+Dm7IRIPx/EhwbyTixWlkVuUSUaWkyglMLL3QU9IoPhKrBCL6qqR0FlHVpNFrjqnBDq54UekcG8GjaWw9lLlWjUG8GyHDzdnREfFQwPnronAFBa1YCCy7VoMVrgotOik58nEmNCeDVYlsP5wio0NBlgtbHwcndB15gg3pokAFBR04T8omq0GC1wdtIg0NcDSV35jyBzHEV+QSXqG1phtbLwcHdBXGwQvAWiE9W1euQVVKKl1Qydkwb+fh5ISgjjnSAppSjIr0RdfQssZhvcPVwQExsEX4H6FHV1Lcg7XwF9ixFOWg18/TyQlBQOLc8ESSlFYX4V6qqbYTZZ4e7pgs6xQfAP5I+ANNa34nxOGfR6E7RaBj6+7khM7QwngdMsRfmVqC5vgNlggbuXKyJigxAY4sNr39zQinOZJdA3GaHRMvD2dUdinyjoBL6LJfkVqCqtg6nVDDcvV4THBiM4gv9329JkwNnTl6BvaAVhCLz9PNCjfyxcXPm3h8ryK1BxqRpGvRFuXq4Ii+uEEAHH1qA3IvdYPprr7Y6xl58HegzsCjeBk1oqtw63lCOiBNUR+X3QarFg47nzWJKegQt1dTeNp4SE4M5eKRjfpQucBRwMIUxWG7bk5uHHk5k4W3lzD5UeIUFY3C8FE7t3E3QwhDBbbdiZcwHLjmYiu/Tm1vBdgv2xcHAqJqfEw00n7+SM1cZib1YBfj6YiYzCm/u0RAX5YsHwVEzuEy/oYAhhYzkcOnMRK/Zm4vT5mxO+wwO9MX90KqYM6s5bWM0RLMfh+JkirN6RgeNZRTeNB/t7Ys64VEwZ3gM+XvJqyHAcxekzRVi7OQPHTl+8qRqsv58HZk1KxeSxSfCTWQCLUorMzBJsWJ+GI0fybzqm6+PjhunTe2HS5BQEyDxlRCnF2axSbFx5Cgf3nL2pzoanlyumzOmDSTN7I6iTvPwISinys8uw+efj2L8586ZKse6eLpg4tx8m3dYfIQJHlR1RkFOKLUsPY8+aU7Car28E6OrujPG3D8TkxYMRHis/claUW4bNX+/Fzh8PwWy8Plrq7KbDuIVDMeW+UYjqES5bQ+WXR3VEVH410i6X4/5169FkMoGg/ZYcDCHgKEWIpye+nzMLcf7CJzpuJKe8Cg/8vB51rQYwBO3W8WjTCHB3w9cLZyKhk/j9bMAeAXngm3Woam65eq0bIcR+TNLbzQWf3zUDyZH8dSHao6SmEQ9/thZldU38Glf+6+aiw4f3TUO/rtKOsFfWNeOx/6xFUWU9GIa0W/+iTcNZp8U/H56KwUn8tSfao7ahBU+/txYXimugYQhvPRJCAK1Gg9cfm4SR/aQlKTc2G/DCm+uQm1fuQINAoyF47vEJGD+yhySNlhYTXn1lLTIzi6HREN56JG2RnSeeHI+pU1MlaZiMFrzz4mqcOHwBGg3DW1eFYQgoBe5/cixmLRggaUvHYrbi/edX4dD2bGENjf37sPixMbj94VGSNKwWGz55fjl2rTzpQIMBx3KY+8gY3PX8FEnbRizL4Ytnf8TGL/eI0pj24Gg89N4ixUXzVH4ZVEdE5VfhaEkJ7l69FhyloprBaQiBi5MTVt4+H/GB/Fsp15JRWo47f1gNK8uJ1nDSaLD0jjlIDhfnKJwrr8YdX6yE2WYTVeSLIQQahuB/98xCv1hxjsKlqnos/vdyGMwW0RqEAB/dPx1De4hzFC7XNOHud35GU6tRlAYhAAHBOw9Oxpg+4hyFmoYW3PfyMtQ1torWoBT4+4PjMXm4uNNMjU0GPPy3ZaiobpJUSOypB0dj1mRxp5laWkx48okfUFJSJ0njvvtH4PbbB4qyNRkt+NtDS1BwvkKSxu33DMVdD48SZWux2PDSvd8gN71YUtO8aYsG4qEXp4pyRmxWFq/f8z+k7T8vqdPx2Pn98Zf3F4jSYFkO7971GQ6tPyW+wSABhs7oixeXPCI7T0bll0PK/K1+eiqyuFTfgAfWbRDthAAASylMVivuXLUGtSI68l5ubMb9P60X7YS0aVhYFvf/tA4VTfxdQtuo1bfigW/WwmQV54QA9p4eLEfx6JINKK5tdGjfbDDhwU/XiHZC2jQ4juKZbzfhQnmtQ3uDyYJH/71atBMC2B0ESin+/tVW5BRWOLS3WG146h9rRDshbRoA8M5XO5F+1nFdIBvL4bk31kh2QgDgwy/34NjpQhHPieLVV9ZKdkIA4Ouv9mP//nOiNN79+xrJTggA/PztIezYmCHK9sO/r0FumjQnBAA2/ngMG388Ksr2i1fXIG3/OUlOCADsWnECyz/ZKcr2+9dXS3NCAIACh9adwnevrZb0vFRuPVRHREUWX5w8CYvNJtpBaIOlFPVGI346c8ah7fcn0mGwWCRrcJSixWzB0hOOb+Y/HTuDRoNJlobZZsN3Bx13kl5zNBvVTeIn7zYo7BPz1ztPOLTdeuwcymqaZGlwlOKrTccc2u49kY/C0lp5peEp8OWqIw7Njp26iHMXKmWVVCcE+HLJAYcTZmZGMTIzi2WXbf/qf/scPjbvbDmOH7w550Qs3366x2FH3uKCKuzblCnZQWhj6ce7YDZZBW2qSuuw5YfDsrs1L/9oJ1r1RkGbxupmrPlomzQn5BrWfLwdjdVqO5DfM6ojoiKZJpMJG86eAyvz7sRRih8yMmFl+Vt9GyxWrM7Ika3BUooV6dkwWvlvtBYbixXHz0h2Qq5qcBQb0s+i2WgSsOHw80H5kwXLUezKvIC65lZeG0oplu/JkN3OjOMojmYXoby2SdBu1Y4M2f2COEqRlXcZhWXC0Z01m9Pll1KnQGFxLc7lC0d31q1PU5RXUFnZhIyMIkGbTatOKdJorG/F8UN5gjZblp9Q1OnW0GLGoe3Zgjbblh1V1IzPYrFi75pTgjbblx6Q/RsE7BW8d/xwUPbjVX57VEdERTJrcnJh45R12Kw3GrG74CLv+Jac8zBYhFdrjmi1WLA19+baBW3syS2wH+lVgNXGYmM6f6j+6LliVDW28I6LgVJg3fFc3vHMC5dRVFkvd0EJwF7/Yu2BLN7x/KJqnL1YqWjC0DAEa3fxR8JKyxuQnlWirOGfhmDdVv5IWE2NHkePXFDUjI9hCDasT+Mdb240YP+ObOUaK0/yjhtbzdi55rSiTreEIYLbM1aLDVt+OAJOQVNBANjwLX+UimU5bPxyt+StpWuhHMWGL3crer9VfltUR0RFMmmXbz52KhUtwyCtnP866aXlitvUaxiC9FJ+jYzicmgVJrkRYr8OH5mXlGtwlCL9Yhm/RoHy94rjKNLy+DXO5F1W1LQQsEd30s/x54nknLusTAD2RnkZ2fwa58+Vy45OtcFxFFlZ/BoX8ipuOj4rR+PsGX6NogtVDrdVHEE5igu5l8Ha2o9Mll+qQUuT41wuYRHgcmENWpvb356pK29AXUWjMo2r12lwbKhyS6I6IiqSaTQZFa2+AXsX2maTmXe82WRW1KYesN/M9Sb+iIfeaFY+KVGgSSCqojeYIXvP5BoaW4U0TLIreF5Ls4BGi8HcIScT9C0CGq3mDnkdrQb+71WLwGuUgsFg4R1r1XeMhtXKwmKxtTvmKO9CCi08z7eFx3mQpdHU/rVamvi3HCVrNHbctVR+XVRHREUyThrlpZgJCJwE9rftvU8Ualw5yiuk0RFOgk6gNLWU3jJyNaT0lhFC6DpOWgayMxavga9KaZtGR1QT0Ah95h30Xgnlf2gFqqR2lE5HlkPne74d9V4Ja8grPti+RsddS+XXRXVEVCQT6O4u2LBODJRS+LvxV9wMcHeHhijdNiHwc+fX8PeQV/HzWjQMEbyOv6ebopwHwF5TJNCLv3Kon5cbWIU5O4QQBHjza/h6uyuOUAFAgEAFVF8fedVRb8TPh//zECrxLgVvIQ0/jw7R8PB04XVEfP07RsNJp4Gbe/sVfH0EStVLgTAEXjzvu09gx9WH8glQa039XlEdERXJTO7WTfZpljZYSjG5Wzfe8Yk9uipOiGU5DpO68xfqGt+zq+LJleUoJvTkfx1jU7sqSvAE7Dki43vza4zq1QVEYWiHUooJ/eN5x4f0ioFWqzyfZtygBN7xvqlRcJHQ1bd9DYJxI7rzjvfsGQFPT3ll7dtgGIIxY/iruHbtHirYb0aUhoZg1MQk3vHOXYIRFhWgKGqo0TAYNrEn73ZYcLgfuvSM4O3OKwZGw2DAuCToXNr/XL38PZA8LEHR6R9GwyBleAK8Osg5U/n1UR0RFckMi45CiKf8Gy1DCHqHhqJbIH+32N4RoYgN8JM9vRIAXYMCkCJQXTUhNAhJEZ1kH0klAMJ8vTAwjr91eWSgDwZ0i1R0BNLPwxUjEmN5xwN9PDAiNU5Rwqq7qw5j+vI7bd4erhg/OEGRhpNWg4lD+Z0EN1cdJo9LUqTBEGDyWP4JXKfTYtq0Xoo+D46jmDqFv9S7RsNg+rx+ivJdOJZiyuy+vOOEEExfNEhRrhbLcpi6QLhK7LR7his60cKxHKbeNVTQZvpDYxSd/uFYDtMfGiv78Sq/PaojoiIZhhDc2StVft0KSnFHL+GeHYQQ3NE/VfaNlgK4o1+Kw8lg0eBURRGLhYNSHU5qtw9Lkb09wxCC+UNTHOaazBuVIju6wzAEs4YlwcVBI7/ZY+VraBiCScN6wMNNuJHfjInKNEYM6eZwi2fylBRZ1wfs71X//rEIdtCcbvz0VGg08n4hjIYgMSUSnWOE2yCMmp4KZ2cnWXlODEMQEx+CrknCjeOGTUmFh7errMgLwxCEdA5AymDh9gEDJqXCN9hbVuSFMAR+nXzQf2KK9CeocsugOiIqsrgjNQW9w0Il54owhGBi1y6Y1M1xb5M5qYkYHBMpOWLBEIJhcVGYmeK4Cdqknt0wpkesZA0NIegdHYbbB/Z0aDs8MQZT+yZIvplrGIL48CDcObq3Q9ve3cIxd2Sy5DlJwxBEdfLDvVMGOLRNiOmExdP6SVSwawQHeOGheUMc2nYO98f9i4RX0Hwavr7uePSekQ5tg4O98cgjYyRrMAyBp6cLnnxyvENbH193PPHCFFkarq46/OXv0xzaunu44Jl350quSEoYAp2zE/76j7kOHXWdixP+9vEdkPrlJcSeUPvcf+90qKHRavDCdw/b7aTIEIBhGDz/7UMdmryr8uujOiIqsnDWavG/mTPQIzhI9CROAAyL6owPJk0U9Rgtw+DjuVPRKyJU9H2QEKBv5zB8OGeyqPodDEPwz9smYVCXSNH3QIYQ9AgPxid3TINORNY/IQSv3j4WI5P4t1fa04jt5I9PH5oBVweRijaNv94+EhMH8udg3KTBEIQH+eC/f5kFD1fhSEUbD80bgpljkkVraBiCIH9PfPLiXHh7uop6zKK5/XH7LPEOj4Yh8PVxx4dvzkeAyETRmbP64K67xTs8bU7Iv96/3WE0pI3x01Lx4F/GidfQELi6OeOd/y5GeGdxHaqHTkjC46/PtDcwFPEFZjQEzi5OeP2LOxHdTVxTyH6je+Cv/1kEhiGitpsYhsBJ54RXvrkP3VI7i9JIHpaAv//wKLRajajICGEItFoNXlr6CJKHif/Oq9yaqN13VRRhslrxjwMHsSI752rJ9mu/UAyx19pw1znhrl698MSggZILfFlsNvx77xH8fDoLZpvtJg1y5f93cdJiQZ9k/GXUYOgkHjG2sRw+3X0MPx7JgMFivdo19loNwJ7nMLdfEp6eOBQuEo8LchzFVztPYOneNLSYLGAIuWlbiBC7Aza1X3f8bdZwuDnrJGlQSvHDjtP4futJNBvM7WrYO/sSjO/fDX+7fSQ83aQlb1JKsXpnJr5ddwyNzUYwDLlp64m5srod2a8rnrlrFHy9pJ9Q2rwzC98sO4y6htb2NRgCSimG9I/DXx4ciwAZyYp79uTi66/2o7q6mVeD4yj69Y/FU0+OF+2EXMvhvefw1ce7UHm5od329oyGgGMpUvtG4/HnJyMsUpwTci0nD5zHV//cgrJLtTwaDDiWQ2LvKDz6ynREde0kWSPzSD6+eGUNivMq2tVo+7f4XlF49O25iEsS15n6WnKPX8Bnf/0BBZnF0GiZm/rttP1bXEoUHnl/EXoM6CJZQ+XXQcr8rToiKh1Cs8mEtWfP4qfMLFxubobZZoOLkxOifX2xODUZU+Pj4eqk7EREi9mCjdnn8PPpLJQ0NMJstcHZSYvOvj64vU9PTE1KgIfEiftGDBYrtmaex/LjZ1BU0wDTFY0wXy/M798T03p3h6eLuOgBHyaLDTsz8rDi0BlcrKyHyWKFzkmLTj4emD24J2b07wFvd2UnOyxWG/amF2Dl3gwUlNXCaLZC56RBoI8HZgxNwrQhifCT4Rxci83G4mDaRazemYG8S1UwmqxwctLAz9sdU0YkYvrIJAT4KjvJYGM5HD9diLWb03E2vwJGkwVarQY+3m6YOCoRU8f3RLDCI6AcR3HqVCHWr09DTnYpjEYLNBoNvL1dMXZsIqZMTUVIiI8iDUopMk5ewsZVJ5GVVgSjwWKPsni7YuT4JEyZ3UeWA3KjRs7pImxadgzpRy/A2GoGYQg8vFwxbGJPTL6tPzrHBSvWOJ9ehE3fH8KpvbkwtJhBALh7u2LIpBRMvmMIYrqHKdIAgPz0S9j0vz04tiX9alVWdy9XDJzcC1MfGI2uvaIVa6j8sqiOiIpiTDYbthbm4fDlYjSZTGAYBn4urhgXFYcREdHQOIhqUEodhnEtLIsdBRdwsKgIjSYTCAAfV1eMjonB6JhYh5ETMRpWlsXeC4XYf+ESGoz2G5qPqwuGxkZhbLc4h5ETMRosx+Fg/iXsPVeIhlYDOAp4uzpjYFxnjO/RBc4OIidiNDiO4mheMXZnXUBjqxE2loOXuwv6xUVgfEpXh9s3YjQopTiVV4pdafmo1xths7HwdHdGSmwYJvWLh5uLsJMnViMz/zJ2HctDXVMrLFYWXu7OSIwLwcTB3R0ms4rVyM2vwK6D51Db0AKLhYWHuzMS4jphwsge8PIQdvLEaABA/oVK7Nydg5paPcxmG9zdndElLhgTxibBR6DOiBSNSxersWPLGVRXNcNsssDN3RnRsUGYMCUFfg4iQG23dkc6JYXV2Lk+HVXljTAZzHDzcEFkTCDGzeiNQAcRILEaly/VYNeqk6goroWh1a4RFhOI8XP7IzjCr0M0KktqsfPHI7h8sQoGvRGuHi4IiQ7E+IVDEBoTJPhYlY5HdURUZFNjaMXXWafx87ksNFvM0BBytWaIljCwUQ6d3D1wZ2Iq7uyRCjcn6RGIBqMR36anY1nWGTSaTNdpaAgDlnIIcHPD4uQU3JWaCk9n6REIvcmMJacysOx0JuoMRmgYcvU0Rtv/9nV1xYLePXFnv17wcZUegWg1W7DseCZ+Op6Jan1ruxpeLs6Y168n7hzUS1YBNZPFhhVHz+CnQxmoaNBDwzDgOA70Gg13Zx1mD0jEHSN6I8hbegTCamOx5lAWftqbgbKapus02rYmXHVOmD64BxaN6Y1Qf+m/RRvLYeP+bCzfkYHiinporly3TYNyFE5OGkwe2gMLJ/VGRLCvZA2Oo9i2LwcrN6fhYrF9i4LjOFD6/9s4Wo0G44Yl4PYZfREVLj0CQSnF7n1nsWbtaeRdqGxHw/7fUSMSMH9OP8TKmAAppTi47xzWLD+BczmXb9aAfatw6Ih4zF04EN0SQiVrAMDx/eexZslhZKcVXdGgdgeJIVe3OweOiMecu4aiewr/EXUh0g6cx5qv9iHjcD4Yjb1yLuWuaBACjuPQd0QC5jwwCj0HxsnSOHM4D6s/2Y5Tu3PAMO1osBxSRyRg9qPj0Gd0oiwNFemojoiKLPLqa7B4y2rUGQ0OC5YxIIj3D8D3k+YgyE18tcpLDQ24Y+0aVOr1jjUIQbSvL5bMmo1QCXVLLjc14+6f1qKkodHh0VyGEIR6e+K722ejs5+PaI0afQvu/34dCqrrHGpoiL366td3z0ZckPjJr6HFiEe/Xofc0iqH1dU1DIGXmwu+eGAWEsLFT356gwl/+XwjMgouA1T4AIaGIXBz1uHjx2cgOUb85GcwWfDCJ5txPKvo6gQnpKFz0uL9v0xHnx7iJz+zxYY3PtyCA8cv3JTf056GRqvB23+bhoG9Y0RrWK0s/vWfbdi1JxeEEMFy9BqNfRL8+/PTMHwofzG6G2FZDv/993ZsXpfebs7KjRqUAn99aSrGTnR8eqsNjuPwzX92Ys2Sww41GA0DynF47KVpmDxPfAIxpRTLPtyBZR/tuJqfIqTBsRzufXEaZt8/QnT9FUop1vx3J75+dbVojUXPTcXCZ6d2SE8jFWGkzN/qqRkVAEBxUyPmbVwhygkBAA4UefW1uG3jcjSZxTX5qtDrMX/lClFOCGCvN1LU0ID5K1agziCuC2hdqwELl65EqQgnpE2jokmP25euQGVziyiNJqMJd3y9ChdrHDshgL2KbF2LAYu/Womy+iZRGgazBfd9vhrnyqpFtXhhOYqmVhPu+XQlCqvqRGmYLDY89sk6ZF4sB3XghLRptJoseOg/a3C+pFqUhs3G4q//3oCT2cUAxGmYLDY8+a+1yMoX1+WZZTm88v4mHDxRYNdwIMJyFFarDc+9uw6ns4pFaXAcxT/e34Lde3OvaAiLsCwFy3J4/e31OHLsgigNSik+fn8btqxPv6rpSIPjKN57cyP27swRpQEA3/x7B9YsOSxKg2PtkZhP3tqIbatPidb46SO7E9J2DUcaAPDNOxux7psDojXWfrYLX7+6WpLGj//chGXvbRKtofLroDoiKuAoxT3b16LFYpZUup2lFMXNjXj2wA6HtpRSPLRpIxqMRskalS16PLVtqyj7p9ZtQZW+RbJGg8GIx9ZsEtV07aU1O1BW3ySp8BZLKVrMZjz8w3pRGm+t3ouLlXWSNDhKYbLa8MhX62ETUanywzUHkVtUJanYGkcprCyLx/+7DmZr+51hr+XLNUeRfr5UUtE4Su0T7NMfrEOLkb+TbhvL1p3E0dMXJTXMo9T+98K769EgotX9+k3p2Lv/nKS+f222r7+9AdXVzQ7td27NwtYNGdJ7CxLgvTc3oqzEsQN6eFcu1iw9IlHAzsdvbcTF8xUO7dIOnMePHzq+J7THV29vQO7pQod2uccL8NXLq2Rp/PjPTTi9R7zjpvLLozoiKjhUVoSLjfWy+sewlGLnpQso1Quv9NMrKpBdVSVb40hJCS7UCd9oz1fV4ERxmWyNrPJKZJVXCdqV1DVi3/lCeRocxcWaehwvLBW0q2luwdb087IqvrIcRXl9Mw6eFb6ZN7easO5IjiwNjqOoazZgV1q+oJ3RZMWqXZmymvZylKLFYMb2I+cE7axWFis2pcmqwEupPfqyZU+28HPhKJavOiFDwe6MsCyHTVszHT6XlcuOyesdQwEKig1rTjs0XfX9Idm9YxhCsOGnYw7t1ny1T3bvGA3DYL2IqMjaz3dBI7P3EaNhsPazXbIeq/LLoDoiKliak6Gomy5DCH4+myVo88OZTEUaGkKwLOuMoM3P6VmK+pRoGIJlaZmCNitPZcnuTdOm8dNxYY21x5Wt1hhC8NNhYY1Nx8/CdqXui1yN5fuENXYdPw+j2SpbAwBW7swQjHQcPHkBTXqj7OtTSrFmW8ZNNTGu5VRaIWpq9LI1OI5i4+YMWK3873duVilKimplOW2AvTfN9k2ZMBotvDYXz1cgL7tMdu8YluWwd8sZ6Jv53+/yohpkHM6X3TuGZTkc3ZGNuir+hU1dRSOObsm4qcaIWDiWQ/q+syi/JG57UeWXR3VE/uRUG1qxt0TeCr8NllIsO3eGd8JoNpuxNT9fscaqnBxYeCZPs82GtVm5irrpshzF5tw8tJjbv5lzHMWq0znKXgdHse98Iepa+LcDVh7NUtT/hqMUJy+U4rJAPsrqg1mSS4PfqHG2uAoFl2t5bdbuzVLUHZYCKK5oQE4B/3bAhp1nFDWwA4CauhakZZfwjm/eqlyjWW/C0eMFvONbN2ZCo6ADLQCYTFYc3MsfQdqxLk2xBmtjsW8L/4Jg58qTijrptrFbIB9l1/KjCntN26MiO348rPAqKh2F6oj8ySnTNynq4NlGk9mEFmv7E3ilXg8bJ7+7ZhtGmw31xvYn8NpWA8w2+Sv8Nmwchyp9+0mrzSYz9CbHOQuO4ChFeWP7OQNWlkVNc6tiDQAoq+N3RC7XdsznXlbDr1Fa1Sh7hX+dRnUjv0Z5g+yGgtdyuZJfo6S0XrEGwxBUCGiUFtcKRmXEoNEyqChv4B2/3BEaGgYVpfW84+XFNYq69QL28u0VJfwObnlhteztpTYopai4VKPoGiodh+qI/MkxWJWFzq+llccR6VANS/vXMlj4Q9LSNXheR0dq8ERdlG5lXIuB51o2lhOVzCoGvtcBAGaez0oqBiP/dUwm5RoMQ2AQ2NIQ2u4QrUEIDAb+6wiNiYUAMApcp7VFuRNNKWBo5b+OocUsKWm4PTiOwijwXE2tZsWOIeUoDHpxp/1UfnlUR+RPjpvCsuvX4uHUfuGxDtXQtV9AzZ3n32Vp8JSJd1dYPv46DZ4qpa7OHfde8T1frYaBUweEzwH+1wEALiKa9YnB3ZVfw9VBtVcxcByFuxv/ddwExkRrUCp4HXd3ZW0D2nAV0PDwVNY2ALBXN3UTeK7uHi6KoxUMQ+Dqwa/h4u6seKuMMATuXuKaMKr88qiOyJ+cSC9vRcmXbfi5uMKdx+EI8fSEk8RGd+3h7uQEP9f2bx4B7m6Ke9kAgE6jQbBn+9VJPZ2d4S2jAuuNMIQgzKf90tlOGg06+Ygv3sYHARAZ4MM7HhHkq3ifHQAiBSqgdg7165DCUZEh/BpREX6KJyUAiAjlLzMe1TlAsQbHUYSH8Wt0jg5QnL9hs3EIj+AvmBcRHag8R4RlER4VwDseFhukKC8IuPJeCVSkjejSSXFEhBCCsFhlfXdUOg7VEfmTE+DqjrFRcYpPzSzqnsI76Xg6O2NafLziUzPzk5LgxNMbRqfVYk5yD2UaDMH0pATe6ArDEMzv11PxqZlxPbrA151/NTZ/sHKNQfGd0cmX36GZO1x8Jc72YAhBz5gQRHfin1xnj05WFKYnBIgN90dCNP+EMX1ciqJJiQDoFOSF1B78nWKnTlKmAQA+Pm4Y0I+/iuuk6b0U52+4uekwdGQ87/j4Wb0VazjptBghUMV1/Nz+HeIkjJndl3d8zG2DFDu4HMdh/OIhiq6h0nGojogK7uyRqugkCADcniA8sS1KTlF8amZBz2Th59Crp+ITLQt6Cb+OuX2SFE2uLEdxe3/h1zGjX6KiVSXLUdw2OEXQZnL/BOichBv+CcFRittGCmuM7tdVcFvFEZQCc8emCk46g/vGws9BgzlBCDB7YqpgxCM1pTNCHDR/E4JhCGZMTYVWy/9+x3cPRbSCaALDEEya3gvOAlt7UXHB6J4SKTu6o9EwGD01Be4CWzzBEX7oMzxBfh0RDYOhk5LhE8DvRPsGeWHItN6yozuMhkHfMYkIFogeqfy6qI6ICgaGRiDeL1BWNIEhBJNjuiHEQ3g7IblTJ/QODZWloSEEI6OjEeMr3AgtLtAfQ2M6y9boExGGHiHC4dowXy+MS+wiK2KhYQjiQwLRJ0q4Tbq/pxum9e0hW6NzgA+GJEQJ2nm4OmPOsGRZE5+GIQj29cCoFOEmZc46LW4b30vWFhDDEPh4umL8IP4VPmDPd7ltGv/qWVCDELi56jBplHAjNIYhuG3eAFkahBB7M7+Jws4nIQS3LR4k65QRIfbJddqs3g5t590zTHbEggKYfvtAh3azHxgpu44Ix1HMvHe4Y41Hxsp+HRzLYc5j42U9VuWXQXVEVEAIwTcTZsLH2UXSJK4hBF19/fHusHGi7D+bMhWB7u6SNSK8vfHvCRNF2X8wYxLCfbwlawR5euCT2VNE2b81cxxig/ykaTAEPm6u+GzRdFFh5RdmjkT3iCBJBdo0DIG7sw6fPTATGhE5OU/MHIJeceGSHB6GIXB20uK/j8+Ezknr0P6eGQMwMDlKmgYhcNJo8OHfZsFNRDLqbdP6YOSgbpKcKkIIGIbgvZdmwdvTcdLi1EnJmDg+SaKG/e/NV2chwN9x3s+ocYmYNV98Y7k2DUqBl96YiZAwxx2LB4yIx4IHR0jSaOOZN2chqovjvIqUQV1w7wtTZWk8+uZsdEvp7NCuW+9oPPb+Alka9742B8lDhR1clV8X1RFRAQCEeXph9YwF6OTu6XDSIFf+egZ2ws9T5/OeZLmRQHd3rJp/GyJ9fERrdA0IwIp58+HtIi5J1MfVBcsWz0VcoL+olThDCDr7+WL5nfPh7y4uxO/urMP398xF97Cgq8/TkUaItyd+vH8+OnmLS0R10WnxxYOzkBptj56I0fD3dMeSx+cjQiBJ9VqctBp89Oh0DOze+co1HGgwBN5uLvj6mXmIDeVPWLwWrYbBu09MxfDesVefpxAahsDdVYdPX5gjmBty4/N65clJGD+8h2gNF2ct/v3KHCQnhIvSIITgmScnYOrklKuaghoaAicnLd5+fTb69o4WpQEADz4+FvMWDhSpwUCjYfDyW7MxZLj4iXXxI6Ox6OFRdg0H2xsaDQOGIfjrW7MxekqKaI3ZD4zEvS9Ou3oNRxqEEDz21hxMXjRYtMbku0fgsfcXghDisNx72/h9r8/BnMfFLZxUfj0IVXro+xdEShthlY6h0WTE0txMLM3NQK3RAC1hwIFemXAJbJRDtLcv7kpMxfz4nnDROl4V30iz2Yyfss5gSWYmqlpaoGWYq5VEGUJg4ziEe3nhztRULEjqKes0jMFixc/pWfjxdCYuNzW3qxHi5YFFfVJwe6+e8HCWfnzSbLVh1elsLDueieK6xnY1Aj3dcXv/ZNzeP1nWiRurjcX6k7lYdigDhVX19kgHpaDXaPi6u2Le4J64fUgK/Dyk50vYWA6bj5/Fz3szcOFyLTQMA2pvYAKGIbCxHLzcnDF7WE/cNjIFgd7tnyoSguModhw7h5U7M3C2sOpKxIaCu6LBshzcXXWYMTIJ88f1QrCICMKNUEqx92geVm9JR/b5cvsESK/XcHF2wpQxSZg7uRfCOvnI0jhyrABr1p1CZlYpNBoCUFyn4azTYvy4JMyZ2QcR4fzJvEKcPFaAdStP4vSJQjAMASEEHEfBMAQcx0Gr1WDMhCTMmt8fUTGBsjQyjl/E+mVHcfJgHgi5UYNCoyEYPqEnZi4ehLiEUFkaOScvYt03B3BsV479HsIQcCwFo7FrEEIwdFIyZt47XFQkpD3Ony7Eui9249CGNFD6/8+fYciV5oYUgyalYObDY5A4sKssDRXpSJm/VUfkTwbLcTBzNrhqnAS3CGwch93FF3GkrBiNZhM0hMDXxRXjo7ugf0i44GM5SmGyWeGqFdZgOQ4HiopwoOgSGk0mEELg6+KC0TGxGBQZKbiytWvY4KLVOrQ7eqkE+y4UotFoAgWFj6srhsVG2fNJBLYwKKUwWm1wcRLWoJTi1KUy7Dl3EQ0GIyil8HZ1wYDYSIzoFgOtwIrwqoZWK7gCppQis6gcu7MK0NBigI2j8HJ1Rr+4CIxMiuU9TXSthrNW4/D15hZVYVdaPur1BlhZFl5uLkiNC8Po1DjBrRh6pfOvzoEGAOQVVWPX8fOoazLAYmXh6e6MpLgQjO7fVbD2CKUUZosNWq1G8D0FgMLiGuw8dA51Da0wm23wcHdGQlwnjBkaL1h7RIpGSWkddu3JRW2tHiazDR4ezoiLCcaYUd0F64JQSmGx2KDRMIIJrABQXlaPXduzUV3ZBJPJCg8PF0THBmH0+ER4OqiDYTHbQBh7jooQVeUN2L0pE1WXG2A0WODu4YyImCCMmZoCb193BxrWKxrCC5LaykbsXnMKFcV1MLaa4ObhgrDoQIyZ3Q++gcJOp8Vs7/KscxbWaKhuxu7lR1FWUAWD3ghXDxeERgdizG2DEBAqvG1ltdhAOQqdS8fV8fmzozoiKtdRadBjeUEGll/MRI2xxb6aBkGMlz/u6NYbM6ISeYuRiaXG2IrleVn46XwmKlr1V1fsnT19cEdCKmZ3SYS3s7IaHPVGA1aezcFPOVkoa7aXKCcAwr28sSgpGXMTEuHLU2dELE0mE9Zmn8WPGWdQ0tB4VaOTlyduT+mJ+T0TRW/h8NFiMmPDmXNYdvIMiuoawFF7xCnQ0x3zeidhbu8kBHtJjzpci8FixdaM8/jpSCYKKuuuavh5umF2v0TM7Z+EEF9lvymTxYYd6XlYfiATeZdrriYP+ri7YvrA7pg7pCfCRW4T8WGx2rDn9AWs3J2Jc0WVV3sJebm7YPLg7pg9MhmdOznOjRDCZmNx8FQBVm3PQE5+xdUjrh5uzhg/NAEzxyUjJkLcVhSvBsvh2ImLWLspDVm5ZbBdadjm5qbDqGHxmDE5FV0U1rVgWQ6nThZiw9rTyMgohu1Kkz1XVycMHR6PaTN6I15mZKMNjuOQcfISNq48gbRjF2G12J0EZxcnDBoRj6nz+qF7zwhFx2sppcg+WYhNPx7FyX3nrnNE+o1MwNRFg5DUL0axxrnThdj03UEc23YG5ivVc52cteg9ojum3jMcKUO7gemA+kd/Vm5JR+Qf//gHXnjhBTz55JP48MMPRT1GdUSU0WQx4u8nt2NbyXmA4KZGagT2THgXjRZ3deuLp3sOh1biD6/FYsYrx3Zj/cWzoBTgcLMGYC8Utig+Bc/3HQGdwOq9PYxWK944tA+rz+WC5W5U+H8dDcNgXvdEvDx0BFy00lY2ZpsN/9x/CMszs2G90ljvRh2GEBAA03sk4NWxIyVXc7WxHD7ccwQ/nMiAxcavAQATenTBa1NGw0vidg7HUXy++zi+P5AGo8V69TO+UYOCYmT3WLw2Z4zk7RxKKb7bdQrf7DyFVpPlasLkdRoMAeUoBnePwisLxiLIR5pjRSnF8l0Z+HrjMTS3msEQctP3V8MQsBxF34QIvHzPeIQESL9HbNidhS9/PoxGvfFqSL89jeSEMLzw4DhEChQ+42Pn3lx8/s1+1De0tq+hIWBZioRuIXj2yQmIiZK+1XLwwHl89sku1NboeTQYsCyH2LhgPPPsJHTtFiJZ48ShfHz23hZUljdevV57Gp1jg/DkS1PRIzlSskbG0Qv476vrUF5U264Go2HAsRzCogLw2BuzkDJQ+ORWe+SevIhP/vYTivMqBDWCI/3xyNvz0G9skmQNlVvQETl16hTmzZsHLy8vjBw5UnVEfgUqDXos2PMjSlsaRdXWIACGh8bi86Gz4awRl/dRa2zFgm0rcKGxTlS3WAJgQEgEvh07G25O4ibxZrMJi9evRk5NtSgNhhD0DArGkulz4CUy76PVYsF9q9fjdOllUY3gGELQNcAfS2+bAz83cREYs9WGx5ZvxOGCYlEaGkIQ6e+D7++cIzo6YmVZ/G3ZVuzO5u/yep0GQxDs7YnvHpqDMD9xdTJYjsOrP+7E5pP8XV5v1PDzdMNXT8xBVLC4SZxSivd+2IvV+/i7vN6o4enmgs+enYMuEeImcUopPlt2CMs28nd5vVHD1UWHD/8+G93jxE/iS346im9FdnllGAKdTov3Xp+D5CT+Ams3snrlSXzx6W7RGhotgzfenou+AgXWbmTr2tP4+J1NAG52OtvTYBiCl/4xD4NGJojW2LshHR88u+JqXocQ9hNJBM+8Nx+jpvcSrXFkayb+8eA34FjO4fFf+5qA4PF/3Y6Ji9TiZ1KRMn//4nGnlpYWLFy4EF999RV8HdSBUOkY9FYz7tz3s2gnBLCvmg+UF+KvxzaJmvCNNivu2rkaBSKdkDaNE5VleHTfRrAiuvGaWRvu27weuSKdEMAe9cmursKDWzbAwjruxmvjODy+fjPSyspFd6PlKMWF2jrct3odTFabY3uO4tm123HkYoloDZZSlNQ34r4f1qJFoLFcG5RSvL56N/bkiHNCAHvhs6omPe7/31o0GcQ1AHt/7QHRTkibRr3egAc/WYM6kV2Fv1h7VLQT0qbRbDDh0X+tRmVd+12Nb+THDadEOyFtGgaTBU+9tQalFfwdbq9l3eYM0U4IYP+emM02PPvqahQWiesMu3N7tmgnpE3DZmXxykurkHe+QtRjDu89i4/e3nTFQRCpwXJ467mVyM4oFqVx+mAe3n92BTiOiioYSKld5/1nVyDtUJ4ojexjF/DuA1/DZmNF1SBpc4g+/ttPOLIlQ5SGijx+cUfk0UcfxeTJkzFmzBiHtmazGc3Nzdf9qUjni9xjuNhcJ7nKKAXFlpJz2HP5gkPbb3PTkFtbLVmDoxR7SwuxodDxZLY8Jxunyy9L1mApxfHLpVh5Ntuh7cbcczh4qVi0o3OtRnZlFZamO75B7T5fgB1nL0jX4Cgu1tTjm8OnHdoeu1CC9afPSi6IxXIUlxua8Pmu4w5tsy5V4Of9mdIErmjUNrfi441HHNoWlNXi280nJGtwHEVzqwn/WX7AoW15dRO++OmQLA2jyYIPvtnj0LauvgWffCneQWijLZH1/Y93OLRt0Zvw4QdbZWgArI3De+9ucjjpm4wWfPDaeska9pNEFO+9vAacg0WHzcri/b8tv3kPUaTOv/66/Go+DB8cx+H9x5fYHRAZOv9+6geYOqBDskr7/KKOyPLly5Geno53331XlP27774Lb2/vq38REeLDkyp2zKwNPxWkS5702tAQgqV5whMfy3FYcjb9pnwQsTCE4Puz6YI2lFJ8f0bYRggC4PszGQ5vtEvSMmX3daEU+CEt02F058cTmbJ74HCUYvnprKs5JXz8fCRTUvGza2E5irUnc2CwWAXtVhw8o0hj6+nzaHYQeVm9V5nG/vQC1Da2CNqt23lGdodYlqM4mVWMsspGQbstO7IgIujXLhxHkXu+HBcvVQva7dyRBauDCVhIo7ioFmdzLwvaHdiZA0OrWZYG5SiqK5qQfrxQ0O7Y7lw01bfKap1AKUVTfSuO7c4VtEvffw7VZfWgcqqxUsCgN+HghjTpj1URxS/miJSWluLJJ5/EsmXL4CKyGNULL7yApqamq3+lpaW/1NP7w7K9NA9NFnFh9vZgKcWRqiJcaq7ntdlXVogqg/DNXgiOUpypqUBObRWvzYnLZShqapTp6tgXPRcb6nGqnP9Gm11Ridwq8ds+7VGhb8GhS/zh58KaepwsKlPUA6fBYMSe8/xbLhWNeuw/V3j1RIkcDBYrtmXyh7gbWozYnpanSMPGsth44izveIvRjM1HchVpgALrD+bwDpstNmzYnaWoMRvDEGzYzb91ZGM5rNvs2AkWQsMQrN+SyTtOKcW61adl/z4Ae5LshnX8kyulFOt+Pq7odAqjYbBxpXCEa+PSI4q6GzMMwcYfhKNtG789ILv/DWCvf7Lhm32yH68izC/miKSlpaG6uhq9evWCVquFVqvFgQMH8PHHH0Or1YJtZ//e2dkZXl5e1/2pSGNryTkwChu8awjB9tLzvONbLuUp6nLbprG1iH/i21KQBy1R9vXUMgy2FuTzjm/LuyCqFLoQGoZg23l+jZ1nLyh+rxhCsC2HX0NKXggfhEDQETmQXSgqr0cISoHtp/k1TuQUw2xxnHMjBEcpdhzn/+5mnC1Di0HeCv+qBkex8zC/xvm8CtQ3iMuH4YPlKPYc4N++vFRYg4qKRnnbGW0aLMXB/ed4nbKq8kZculClyKHiWA4nD1+A2dR+tK2pvhU5py8pcgw5jiLn1CU01bf/npsMFpzekyu7/w1gj+4U5pShqqRO9jVU+JFeFlMko0ePRnb29Xv0d999N+Lj4/Hcc89BI/EIp4o4qo162VsmbRBCUGcy8I7XGFsVd+slIKgX0KgzGsFSZRMfRynqjfwa9QajuOw7AViOos5g5B2vazXYV5RKbuaUorZF4HW0GKBhGNiU3GgpUKvnnzwbWgxXj7IqQShhtV5vaPe4sVTqm/nfqwaBMSk06fk/84bGjtFobTWDZbl2S6Q3KHR02rDZOBgMZnh43By1buSZ2KVCKUVzkwGBLjefzGqskx9ZvZGm+hZ4+91cgE3f0KLImbqWxlo9giPVrr0dzS/miHh6eiIx8fqulu7u7vD397/p31U6DpvCVSsAgAI2yr/3bOPk7UvfICH4XPnqhUjSoBRWAY0Oea8AWAXeD6UTdxtCJ4CURiraEHJk7GPK3QTBz5zl0G5REokIvR831oyQrSFwnY76PNqu1Z4j0lGvA7Anrrb777+ChpIohVgNvn+Xg81BrpaKPNSycX8wfJ2VVf1sw1vHn9fj6+wGonD7hwDwEtDwdnbugO0fRrCaq7eLs6L9b8C+beIjkAPl5aqsYm0bfu789Uo8XZwVhbbb8HHjfx2ers4OTz+IwUtIw82lQ1auHgLvuadA6XUpuAloeHSQhpOThresvqensirF19JeNAQAPLw6UIPnWh7eyiohi7mWh0/H3BPtGh13LZX/51d1RPbv3y+6mJmKPAYEd1acI2KjHPoF8VdF7B8SAcUrY8qhfyf+U1H9wyIUb//YKId+YfzdVftFhCuOinCUom8Ev0bfKOUahBD0ixLQiI1QlHAL2B2q/nH8n3mfruGKI1QahqB/N36NlK5hChXsGv2682skdg1VlBjZptEnkf+7261LJ4f9XRzBMESwqFl0TBBcXaVV9m1PI6F7KG/n2rAIf3j7Kpt4CQEiYwLhzuPs+Ad7ISjUR5EGAASF+sA/uP2cQncvV0R27aR40eHt74GwmCBF11BpHzUi8gdjXmwylPzeCIBIDx8MCo7itZkV1wM6kdVX+QhydcfoyFje8cldusJTYgn1G/FydsbkOP5um6O7xMJfZGVUPly1WszowV89cmB0JMJ9vBS5hhpCMDuVfzuzZ2QndOkUoOhzB4C5A/hLWceFBCAlJlT2UWfAvk01b2hP3vGwQG8MTIxS5CiwHMW80Sm84/4+7hjRv4vsI8JtGnMmpvKOe3q4YNyoHu1uqYiF4yhmT+WvGOri4oRJU1LAaOS/Do6jmDm7L++41kmDKXP6Kvo8KAVm3j6A1wlgGAZTFw9W5CQQQjDtjsG8fWEIIZh+30hF0TaGIZhy1zBoFTqYKu2jOiJ/MAJc3DEpMkHRtsZd3foK3hi8dM6Y06WHbA2GENzZvZdgXxsXrRNu69FTtoaGECxMTIazlt9h0jIMFvVKkT25agjB7J494OHM7zAxDMGi/vyTlkMNhmByUjf4CmzNEEKwcEiK7NQKDUMwonsMOvkId0G9fXiK7MgLQwj6do1wWOZ9/pgU2dtMhAAJUcGIjxJuHjdnQqrs3B0CICLEFykJ/BEqAJgxOUVRjkWAvwf69xEuwT51eio4Vv7k6unlgiHDugnaTJzZW1EkzMXVCSMnCPdqGTe7jyKnTaNlMHZWH0GbkbP7wllBBIkCmKCWef/FUB2RPyCPJw6BjtFKzuPQEIJID1/MieFftbbxUM/+cHfSSZ7ENYQg2M0DixJSHNrem9Ib3s4usjR8XFxxV7JjB2BhajIC3d0lOzwMIXDX6XBfv94Obef06oFwX2/Jq3CGEOg0Wjw4rJ9D2ym9EhAb7CdZo61Z4CNjBzi0HZUSh+6RQbI0CCF4bMogh7YDk6KR2i1c1iqcgOCxuUMd2iXHh2FgarQsDQrgscXDHK7gu8Z1wqhh8bJX+g/fO8Lh5Bwe4Y9JU1JkR8Lue2AkdDrhyGZgsDdmLRgoTwDAHQ+NgqubcM6Ml687bntklGyN2x4eBS/fm0/LXIuruwvueG6qbI2ZD4xCQIiP7MerCKM6In9A4rwD8OXwOdAyjOh8EQ0h8HV2w9JRt8PDyXGyXaSnD74dNxtOjEb0JK4hBJ46Z/w4YR58nB1viQR7eGDJ9Nlw0Wolabg6OWHp9NkIcnfcLM7PzRXfz58Fd51OkoZOo8HXc2cg3NtxszgPF2d8e8cs+Li6iJ7EGUKgZRh8vmAaYgIcN4tzcdLiy/tmIcDTXZKGhmHw78VTEB/meO/bSaPBfx+eiRA/L9Eabc3J3rlrApJjHLegZxiCD56YhqgQP9GOQtvH9tLdYwXzQ/7fnuDNv0xB16ggyc7IX+4eiaF9xHV8ff7pSUjqHibZGbn/zmEYM6K7KNsn/jIeffrGSHZGbl84CJOniovU3fvEWAwZnQCp+4vT5vfDrIXinJgFj43BmJmOnfobGTOrNxY85rh9CADMfHAUpt4zXJoAAQZPScU9L8+U/NxUxPOrdN+Vi9p9VxnpNWW4/+AqNJiNYEDarS+iIQQspejqHYjvRs5HiJu09zm7thJ371yDGmNru63ar9WI9vLF0vFzEenlI0kjv64Wd21ci4oW/dVr8WmEeXrh+2mzEOcn7ax/UUMj7l25DsWNjbwaba8vyMMdX8+Zge7B0hLXyhub8eCy9bhQXedQw9fNFV8snI7kcGnt2mubW/HIt+tx9nI1b92PthOyni7O+PiuaegbK7zNcCONLUY89b+NyCws59eAPXrg5uyE9+6ZjCE9oiVptBjMeO7TTTh5tkTwdYACOict3nhgIkb16SJJw2iy4tWPt+Dw6YuCNVIIAbQaDV54aBwmDBPnILRhttjwz/9sw54D56DRELA8WymE2DvWPvXIGEybmCJJw2Zj8eEH27F96xkwDOHd2mpzuh54eDTmzHMcZbsWluXw5b+3Y8PyE2A0hHdLiGEIKAXueGgkbr/XceToWjiOw5J/78DK/+2zvw4+DY39Nc57YCTufHo8b25Ie1BKsfzD7fjhvc0gBPzvlYYBx3KYdu8IPPDGHEVbR39WpMzfqiPyB8dks2JLyTksyTuNnIbK68YIgBGhsbijax8MDYmRnSthZm3YVpSP73PTkFFzc0fPIaGdcVf3XhgVESu7kqmVZbGzsABLsjLaLdvePywcd/ZMxZjoWDjJLJZn4zjsu3gJS9MycKz45vYCqaEhuKN3CsZ1jRPMPRGC5TgcLijGspOZOHSh6CbXsEdoEBb3T8WEHl3hwnN00xEcR3G8oAQ/H8nE/nOFN+WOdOkUgEVDUzExpRvcdE6yNCilSCu4jBUHM7Ens+AmBzQ62Be3j0jF5L4JcHeRtzdPKUVWQQVW783ErlP5N+VchAd5Y/6YVEwe3B2eAseCHXG2oBJrd2Ri55FzsN1Qc6JTgBfmTEzF5BE94O0pP7H5wsUqbNiaiR27c2C5oT9MgL8HZk3thUnjkuDrI7zFIETRpRps2pCO7VuzYDZfX8nUx9cN02f0xsQpKQgIEM4FEqKsuBZb1pzGtnVpMN7QBM7L2xVT5vTFxFl9ENTJcaSQj4qSOmxdfgLblh9Hq/76dhXuni6YeNsATLqtP0IUFBarLqvHth8PY8uSQ9DfUBzO1cMZExYOxqQ7hiI8VjjfSIUf1RH5E1BvbkV2YxmaLEZoCAN/Z3f08usseJrlQlMNLrc2w2CzwNPJGbFeAQh1539fmyxGpNeWodFiBEMI/Jzd0DcgEi5a/snrYmMdSlua0Gq1a0R7+yHCk/+m1GwxI626DA1mIwgIfJ1d0S84HG5O/JNXUWMDipua0GIxw0PnjCgfH3T29uG1b7VacKriMhqM9mqYPi4u6BMSBk8d/xZUaWMTLtU3oMVigbtOhwhvL8T482+RGK1WnLps1+AohY+LC3qFhsJboMZIeWMzCmsb0GI2w03nhFBvL8QF8d9czTYbTpdcRn2rESzHwdvVBcnhIfATOPlT1dSCi1V1aDGZ4eLkhE4+nujSyZ93pWqxsUgvvow6vQE2joOXqzOSwjshwJN/gqxtbkVBeS30RjOcnbQI8vFAt7BAXg0byyGj8DJq9QZYbTZ4ubqge2Qwgrz5t9Iamg24UFYLfasJOictAnzcEd85iFeD5ThkFZSjpqkVZqsNnm4u6BYRiBB//u97c4sR+ZeqoW81w0mrgZ+PO+Jjgnm3bziOIqegHNX1LTCZrfB0d0aXyCCEBvF/31tazci/UAl9iwlaJw18vN0Q36UT74qbUopz+RWorGmG2WyDu5sO0Z0DERHqy6thMJiRn1cJvd4IjYaBt7cbusWHQKtt30GnlCI/vxIVlU0wGS1wc3dG50h/dO4cwKthMlqQf7Yc+iYjCEPg5e2KbolhcOJxnimlKLxQhcul9TAZLXB1c0Z4Z39Ex/JHFS1mK/LOlELfZK9U6+nthm7JEdA5899/ii5UouxSLYytZri46RAa6Y+Y+BDe74nVYkNeRhH0Da3gOApPHzd0TYmCixv//afkQiVKC6pg0Jvg4qZDcIQ/uvSMUHw8+I+G6oj8QaGUIquhFMuLTmJ7ec5NJdC9nFwwt3NfzOncB2Fu/DcqR2TXl+PHgtPYWJJ7U9VQD60O82N64fbYXojydJy7wMfZ+mr8eD4Day7mwMxe31/ETeuEeV16YlG3VMT5yF/1XKivxY+5Z7DyXDaMtus1XDRazI7vgcWJKYj3D5Stcam+AT9lncHK7By0WK5fIeo0GkxPSMCilGQkBstfWZU2NGFFehZWpGej2XR9nxQtw2Byj25Y0CcZyWHyayVUNOqx6mQ2Vpw4g8YbOuRqGIKxiV2wYEAyekVJz3loo6apBWuP52DFoTOo019fBp0hBCOSYnDb0BT06yL/pl7fbMCGIzlYsfcMam7owksIMDgxGvNHpWBA986yj6U2tRix+UAuVu3MQGVt803j/ZI6Y+64VAxKiZYdAWxpNWPHvlys3pyGyxWNN42nJEZg9pReGNwvDlqZ2wYGgxm795zF2rWnUdJOD5UePcIwc0ZvDB3aTXZdFJPJin07crBhxUkUFtzc5LJrQihmzO+HYaO7Q+csLwJoMVtxcHs2Ni47hgs5N0dLY+JDMG3hQAyf1BMuMk/OWC02HNmWhY3fHcS5tEs3jUfEBWP6PcMwcmYfuPHUTPmzoToif0CMNgueT1+NfVXnoSEMbx8Whtj3aJ9MGIO7Y4dIuqGbWRueP7UJm0pyBTXa8hse6z4ET/YYLknDyrF45fgu/Jx/hjdP4lqN+3v0xQt9RkraNmI5Dm8fPYBvs9JEaSzsnozXh40WPE58I5RS/PvIUXx24oQojRkJCXh3/DjoJGwbUUrxvyOn8J99R8AIaVzJbxgXH4d/zZgoeUvnxyMZ+OeWAwDAezy3TWNIl87498IpcBc4stwea4/l4K2Vu0GpY43esWH48L5pglVY22P7ifN47fsdsLGUt2ZEm0aPqGB89MQM+HpKK9h18HQBXv7vFlhsNt7j0m15GrERAfjwudkI9HWcNH0tJzMu4eV/bIDpyvZKezptGhGhvnj/tbkICZa2FXImqwQvv7wGLS1m3qr6bRrBwV745z/nIzJC2qIg7+xl/P2pn9HUaODVIAwB5Sj8AjzwzkcLEdNFmsN+Ka8Cf3/ge9TX6K9ei0/Dy9cNb3xxF7olScuJKrtYhZcWfY7qsgbeHJy21+fu5YrXvr0fif35ayT9WVAdkT8YRpsF9x/7HjmNlyU1tLs7dgj+0n2cKFsLy+LeQz/jeHUxqASNBbG98HqviaKcERvH4cG9a7G37KKk2gQzYrrj30OniHJGOErx1O4t2HiBvzvqjRAAY6Pj8Pn4aaJWsJRSvLRrN1bc0NTRkcagyEh8M2um6ByWf+46iG+P87dpvxGGEKSEheC7RbNFOyOf7T6GT/ccl6TRtVMAlj44T7QzsmRvGv694aBoDQ1DEBnoi6VPzRftjKw5kIV3ftwjuhuOhiEI9vPE9y/cBn8vcXkZ2w6fxeufb5Ok4evthm9fX4ggf3F5GQeP5eOVf24EBUQV4NIwBB4eLvjivYUICxEXBT11+hJefHEVKKWiarYwDIGrqw4ff7wI0VHioofZGcV4/vEfwdo40Ro6Zy3e/+JOdE1wfLoKAPJzyvDcnV/DYrGKqqfCMAQarQbvfHMPEntHidIozqvA0zM/hMlgEdUXh2EICEPw+vcPovfweFEaf1SkzN9qKvDvgJcy10p2QgDgu4uHsarolCjbl9O2SnZCAOCni+n4Jv+EKNu3T+2V7IQAwPrCs/go84go2/+cPCLJCQHsk8quSwV459gBUfb/O3VakhPSpnG0pASv7tkjyv6n02ckOSGA3QnLvFyB5zfuEGW/Mf2sJCekTSO/shZP/7RF1ES550yBJCcEsFcuLalpwJNfbRQ1iR3LLca7y+zvq9jvFstRVNXr8eTH62EV0cgsM68Mb365XbJGQ5MBT/xjNUwWq0P7vItVeO39TaDgj+i0p9HSYsLTr65Cq8Hs0L6oqBavvLIGHCfOQQDsuTBGowXPPrsCTU38XYfbqLjcgJefXi7aCWnTsJhtePGJZair0Tu0r6tuxssPfC/aCWnTYG0sXn1oCSrL6h3aN9W34MUFn4l2Qto0OI7izfu+RsmFSscPUAGgOiK3POeaKrC74qxkJ6SNT/L2wMrZBG2K9PVYU3RGshPSxse5B2CwWQRtKlv1WHIuXXaVxs+zj6PJbBK0aTAZ8XnGSVnXpwC+y0pHdatwW/JWiwWfHDsmW2Nldg6KGxsF7Sw2Gz7cJ87xuhGOUmw7m4/zVTWCdizH4d/bD8vWOJxfhMySm09IXQulFB9uOiSrvD3LUaQXXsaxvGKHtv9dK+91sBzFueJqHDhz0aHt/1Ydla1RVF6P3cfyHNp+v/wIOI5KrpDLchSV1U3YvjfXoe1PPx+DzcZK1uA4ioaGVmzenOHQdvWyYzCZrJIr5HIcRYvehPUrHf+GN/x4FC3NRsmVZTmOwmSyYu33jr8zW388ioZaveQOwZSjsFpZrPx0l6TH/ZlRHZFbnBWXTkBD5H9MjRYD9lYKRwiWXUxTVBLeYLNic4nwTfCn/EzJBZGuxcqxWF0gHIVYdS5HcQv25eeENTacO39T4qsUGELw85ksQZsd5wrQZHK8uuVDQwh+ThPWOJh3CTX6VkEbQQ2G4OdjZwRtTheUoaSmUbbzqWEIlh/KFLQ5W1SJ8yXVssvbMwzBir3CGkXldUg/V6qg9DzByh3pgjZVNc04euqioi7KqzenCUZSmpoM2LfvHG8tE0dwHMW69WmCpesNrWbs3JQpefK+VmPL2jRYLPy/MYvZim0rT8p+rziWw861aTC08v/GWBuLzUsOtZtzIlbjwIYMNNULL2xU7KiOyC1Ms9WITZfP8CaNioEBwU+X+MPvJpsVKwszFHW6JQCWXuDfArJyLH48n6G4Q+z35/hvtByl+D5bfsSl7RpLszMEu+UuychQ1MCOpRTLs7JgFnBmfjiVoay5HKVYdyYXLWb+G+1PRzMVOZ8sR7EjOx/1LQZem+WHzihuLnfo7CVU1N98MqWNVfuzFGlwHEV6/mVcqrj51Egb6/Yo06CUIr+4Bmcv8ofqN+3MUnT8k1LgckUj0rNLeG22bctS5OgAQH19K46f4I8g7dmWDbOAEyGGFr0Jh/ae4x0/vCMHLc3C0VFHmE1W7NuUyTt+cs9Z1Ffzf+/EwHIcdq0Ut239Z0d1RG5h8poqbzo+KxUOFGcaSnkn8ILmWrQ62FZxBAVwvqn6pmO4bZTqm1Bvdry37EijtIX/OrWGVpS3ON5bdkSt0YByffs3oFaLBQV1dYqcHQDQWyworG9od4yjFFnllYqdNrONxfmqWt7x9OJyRc4nYE8+zr1885HMNtIulsluLtcGpUBWMf8WUFqecg0AOFPAr5F+tlSxBkMIsvJvPlp6VT9XfsSlDY2GQc45fo2cnDJFHWivamSX8Y7nZpUqcqLbNHIz+R2q3PRiaLTKpi6GITibwb/tl3uqEBqeuitioZQi91Shomv8WVAdkVsYvVWZ198GRymMbPvORnMHaQD2Amjtalg6UIMnT6RZYPX/e9JoNVtkbzPcSLOpfQ2W42CyKlu1ttFk5H9PWk3KHNw2mgWSMFsE9MXCMATNBv7vqL5V+feXYQj0Aq+jWa9cgxBAL7Dd0NSsbDHQRovA+9HSbFTsUFFqT8Dl1dAr1+A4Cr1A4m1rsxHi05J5oEBzvfztzz8TqiNyC+PEKPPIxVzrj6Iht6hTuxo8x2udZBanav9av93rYAhRvGr9fw3+5yu3mNfNGvzfn47oAUIphZPA6pevIqk0DTjQ6ID3yoGG3KJk10KI8Puh/RU0nJy0sjsO/78I4KT7ZV8HADg56G6sYkd1RG5h/J2lFULiw02jgxPT/g8iwEV+b4tr0RIGXk7t13wIcJVWNIoPAsDPpf1y5n4ubopyN67Fn+f5eru4KMqruJYA9/Y1XLRauMjsYyNWgxACbwW9Wa7T8OD/bP0U9Ga5Fn+BomP+Xsq/W5QKXyfAx13xd4vlOPh68b8fAX4eip1DlqPw9RZ4r/w9ZFeTbYPjKHx9+DV8/Tw6wDkk8PHjvy/5+HuAKPxENAwDH3/++6tPoKfiyCSjYeAX/OetfyUF1RG5hYn37oRQVx9F19AQBpPDe/KOR3n4oYtXoKKftYYwmBCewLsCDnbzRK/AUEU3Wg0hGBEeA3eeHjRezs4YGhGlyFFgCEHfkDAEurV/E3TSaDCuS5xCDSAhMBCR3u1XwiSEYEpiN0XJkQRApK834oP5i09NSYlXpAHYnZDkSP7iU5P7JCieXD1dndGvawTv+KQBCYpXx85OWgxKjOIdHzcoQXFekEbDYFjvON7x0UMTFOcFARTDB3XlHR0xIqFDtjRGjEzgHR8+trvgqRoxsCyHEWN78I4Pm5jUIRrDJ/LfF4dNSZV98qcNjuUwbGovRdf4s6A6IrcwDGGwILq/Iu+fpRzmdeZv+U0IwZ1d+iq60bKUw+IufQRt7krorehGy1KKO+N7C9rcmZSqKAGToxR3JQnfOBanpCjUAO5MTRU8IbGgT7Li5MhFfVMENeb376lIgyEEtw9MEdxKmjMoSfb1AXtexexBSXAWqBI7dVAPRVtAGoZg6uDu8HDlb4A4fnACXBSE2DUMwdgB3eArEHUZMbgrPNz5n4MYjUF9YxEcyL8CH9A/Fv4CUQBHMAxBcnKkYKn35N5RCA33le0cEkIQFx8iWF21a2I4YhNC5Z8yIkBoZ38k9Y3mNYmIC0bSgDgwGvn3Xr9gL/Qd1V324/9MqI7ILc70iFTZuQkaQtDTJxzdvDsJ2k3rnAg3rU6Wu8MQgjivAPTyF+7fMKFzN/g6u8pyqhgQhLl7YVgY/40DAEZERiPE3QOMDA0CAn9XV4yL5l+1AkC/8HDE+PrKWukTAB46HabEdxO06xESjKSQYFmRFwJAp9VgZrLwDTA60A/9YyNkR3cIAWb3TRS0CfbxxPDEGNmRF0op5jpwZnw9XTG+n/wIEstRzBmeLGjj7qrDlBGJsrc1WI5iztgUQRudkxYzJqYo0pg1WdiJ1mgYzJzRW/YEznEUM2cKLwYIIZh5W39Z1wfsn/mMefwLpzamLx4k+wQQATB90SCH78P0e4ZJLph2VYMhmHbXsA7JYfozoL5LtzjeOje8ljxD8uMYELhonPBGykyHtm5aHd7vN02yBgGBE6PB+/2nO/xR6zQafDRsqmQXgcC+Evto2FSHk7+GYfDxuCn2fg8SNQgBPh47xWEfGEII/j15EpwYOe4O8MHEiXB14m9j3sa708bB2UkLqfMSBfDO1HHwcnGcA/LazDFwd9HJcqpenj4agZ6O84temDMSPu6ushyFp6cPQ3iAj0O7p+YMQ6CPhyyNB6cOQJdw/nb3bTwwZzDCgnxkaSyY1BuJXRz3T1k0ewCiIgIkaxAA08Yno09yZ4e2s2f3QXy3TpIdHkKAUSMTMHQI/9ZPG5Nm9kZy7yjJGgxDMGBoV4ye6DiSNmpqCvqPiJel0bNfDCaJcHYGjk/C8Om9QKRqaBh0S+mMGfcOl/S4PzOqI/I7YEp4Mp5PnARAXHFSDSFw1erwef87EOMprknVuPB4vNNnChiRMQsNIXDRaPH1kPlI9A0RpTEsLBofDZ8KDSGiohYMsTs6X46ciT7B4jpm9g0Jx+fjp0HLMKImWAYEGobBJ2OnYHC44xs5ACQFB+OrmTPgrNWK0iDE/lr+OWE8xsSJ68rZJSgAX90+E65OTqKiFuTK3ysTR2FKorhmW5H+Pvj63tnwcNFJmvyenjAEc/uJ23YJ9vHEl4/MhrebiySNB8b3x+IR4vbX/bzc8PnTs+Hv7S5JY+GYXrh/6gBRtl7uLvj4+TnoFOgtSWP6yCQ8dru4CcnNTYf3X5uDiDA/SRPsqKHxeOrBMaIiHc7OTnjnnXmIjQ2SpDFwYByefXayKA0nJw1efW8+4hPDREdfCLFv67z49mxRUQSNhsHzH9yGnv1ixGswBPHJEXj5k0WiTsUwDIOnP1iI/qP581VufgxBdEIoXv/+ATi7SutQ/WdG7b77O2Jf5Tm8n7sDpYZ6aAhzU8XVtn/r5x+Nl5KmIFqkE3ItBysv4u2MXbiorxXU6O0fjtd6T0SCj7S23QBwvLIEr53YjfMNNdC0096+7d+SA0LwxoCxSA4Q5+hcS0ZVBV45uBvZNVWCGt39A/Ha0NHoFyqtNTgAnK2uxqt79iK9vFxQI87fHy+PGIEhUeIcnWu5WFOH17btxcnisvY1rrS17+zng+fHDsOortLbj5fUNeLN9XtwtKBEUCPM1wvPTByK8UmOV8U3Utmgx9ur9uBQ7iUQQm7KF2rTCPbxwONTBmNqX+l763XNrfjHsr3Yn2Gv/HmjRlsLd38vNzw0fSBmDeNPVuSjqcWID5bsxZ7jeaC0HY0rr83H0xV3zxiAeeOF84Hao6XVjE++3oudB85e6T3T/uvwcHfGgln9sWBWP8mRAaPRgi++2IvtO7Lb7T1DCAGlFG5uOsyZ0xeLFw2WvM1gMdvwzad7sGXdlZLtN2nYTyy5uDph+ty+uPOhkZKPStusLJZ+sgublh2HyWi5es3rNADodFpMnNcP9zwzATqJ+T4sy+Hnj3Zg3Vf7YGgxgzDk+tLvxB4d1jppMHZeP9z/8gy4uMnP9/mjIGX+Vh2R3xmUUpysu4Tll07gRG0hDDYLGELg5eSKiWFJmBfVD9EejkPNjjTSakvxQ8FpHKosRIvVbNfQuWBieAIWxvVGV+8gxRqZtRX44Xw69pZehN5qL8TkpXPBhM5dsbBbKhL9pTs5N5JTU4Wl2RnYeakAeotdw1PnjNFRsbgjMQXJwdKdnBvJq6nBsjNZ2Jafj2azGRyl8NDpMCwqCotTU9A7VEFi3RUu1tbj57Qz2JqbjyajCRylcNc5YWB0JBb1TUG/zuGKNYprG7HqZBY2ZZ5Dk8EEG8fB3VmHPlHhWDAwBQPjIhUf/7xc14Q1R7Ox6dQ5NLQYYOM4uOl0SIkOwW3DUjA4IUpx/ZHqhhasPZiFjUfPor7ZAJuNhauLDj2igjF/VAqG9oxRXK+lrrEVG/dnY+P+bNQ1tsJqZeHq4oSunYMwZ1wqRvSJU1x/pLHJgK17srFpZxZq6lpgtdjg4uKEmM6BmDkpFSMGd4VOIJFXDHq9Cdt3ZGHz5kxUVzfDYrHB2dkJkZH+mD69F0aNTICzs+OtRCFaW8zYsy0Lm9acRmV5IyxmK5ydnRAa4Yepc/pg1PgkuLopix4YW83Yt+UMNv98HOXFdTBf0egU7ovJt/XHqGmpcPdQdmTdbLTgwMZ0bF5yGKUFVTCZLNA5OyEozBeTFg7CmLn94SlwtPnPhuqI/M4o0F/GrqqTqDE1wsxZ4a51RYxHKMZ36gdfnafgYymloiagAn0VNl1OR4WxESbWAg+tC2I8gjA9vDcCXYTf27aviCOdQn0t1hZnorS1EQabBR5Ozojx8MfsqFSEurV/XFWqRom+ESsLsnBJX49WqwXuTjpEe/phflxPRHj6dIjG5ZZmrDifhcLGBrRYzXB30iHSywdzuyYixsevQzQqW/RYlZuLC3V1aLFY4OqkRbiXF2Z374Gu/o4dSTGfe21LK9Zk5SKvuhZ6sxmuTk7o5OWJmUndkSBwtFeKRkOrEesycnG2vBp6kxkuTloEeXlgWnICksKFk6TFajQZTNh0+iyyS6qgN5rhrNUgwMsdk3vFIzkqxOHjxWi0GM3YevI8zhSWQ28ww0mrgb+nG8b36YZeXRxvMYjRMJqt2HH0HDLyLkPfaoJWy8DXyw2j+3ZFn+6OnTwxGmazFXuP5iEtpwTNepO9Xoa3K4b164L+qdEdomGx2HDwSD5OpV2CXm8EYQi8PV0xaGAXDOgX69DJE6Nhs7E4cjAPJ48VQH+lIqynlyv6DYzD4GHdHDp5YjRYG4uTh/JxbP956BsN4CiFp7cbeg+MxeAxPRxGTkRpsBzSDp7HsR3ZaKpvBcuy8PRxR/KgLhg2ORnOLn/c7RvVEfkdQCnF/uoMrCk7gDx9CTSEAUcpKCgYEFDYw7zDA1MwN2IU4jzDZGnsqzqLHy8dxpnGmzXaGBncHXfEDEWiD3+9BiEOVF7AN/nHcKK26GYNYn8eI0O64t4ug9AnIFKWxuGKIvwv9wQOVVwCQ4g9LH5FgxB7iHxYaDQe7N4fg0KiZGmcqCjF/86cwt6Si3YN2K9LQMAQ+xHiQaGRuL9nX4yMjJGlkVFRjq/S0rDzYsHVf7Nr2D9vllL0DQ3Dvb16Y1yc8AkePnIrq/D18dPYfu7C1Wj4VY0rWyDJoZ1wV79emJTQVVYkJb+qFt8cOo2t2XlgKQcCco0GA5bjkBAShDsGpmJacoKsSMql6np8vy8Nm9LOwWZjr27pXPs64jr5Y9GwVMzoJ+8Yb1lNI37YnYaNx87CYrWBXNn2uPZ1dA72xYKRqZgxJNFhMnN7VNXpsWzbaWw8kAOj2Xp1Gwqw5zqwLIewQG/MG5eKWaN6yopy1Da0YMXG09i4OwutBku7GkH+npgzKRWzJqbCRUaUo7HRgFXrTmHjlkzoW0xXr3uthr+fO2ZM7YXZ03vDTcb2hL7ZiHWrTmLj2jQ0NRra1fD2ccO0Wb0xc24/eAoUiePD0GrGhp+OYdPyk6iv1ber4entislz+2LmokHw9pVe9NFktGDz0sPYuOQwaioaodFe0aD2ZFaO5eDm6YKJtw3ArPtGwC/ojzW/Aaojcstj41h8lL8S2ytPggEBJ1DFQwMGIMBz8QsxMlh8cRyWcvj3ua1YXnzMsQZhQCnF3xNnYHqEcD2Qa6GU4j+5e/Fl/pF2cwuu17BPIi8lT8DiWMcZ69dqfJpzDO9nHhSlwVKKZ1OH4+EeAyRNsN9kn8Zbx/ZddQYcaTyeOhBP9xksSWN5dhb+vncPCCCo0ZZncHdqL7w0bLikUy2bcs/j2Y3bAQLBOiFtGvNSEvHahNHQSpjEd58twNMrt4CjVFCjbb9+clI3vDNrHHQSKsYeOV+Ep77bBCvLCmvAngMwskcM/rl4Elx14ifYtAtlePLT9TBbbQ41AKBffCTef3Aq3CWsYnMvVuCp99ei1WhxrEGAnl1C8f5fZsDLXfw2woWiajzz5mo0iujzQghB1+ggvP/3WfD1Fj/BFpfW4a8vrEBdfYsojc4RfvjXO/MRGCAc0b2WivIGPP/UT6isaHSowTAEnUJ88I8PFyAk1Fe0Rm1VM1586HuUFtVen+fBo+Ef5IV3vrgTEdHi8+0aa/V4+e6vcDH3ssMjxoyGgZevO95e8gBiuktfbN7KqI7ILQylFO+d/wl7qk5LLiL2So+7MTTQcYIdpRT/PLsJq0qkt6B+veccTAlLFWX7Qc4e/C//iGSNV5InYmFsX1G2n2Yfxb8yD0rWeC51BB5OFHci4vucdLx2dI9kjcdSB+CvfYeKsl2Vm4Pndu2UdH0C4M6UVLwyYqQo+23n8vHkui2SNWb17IF3Jo8V5VQdyL+ER37cAEqp6O8vIcC47l3w73mTRUVGThWU4v4v1tgjayJFGEIwqFtnfHLvdFH5H9mXKnDfv1eBZTnRhfYYhiA5JhSfPzFLVNTiQkkN7nvjZ1hsrOiKpgxD0DUyCF++NE9U1KKkvB73P/cjjCaraA0NQxAR6ocv310AdxFRi8qqJjz4+BK0tJhEF8HTaAiCAr3xxcd3wFtE1KKuVo/H7vsW9fUtomt3MBoCPz8PfPrNvfATUaitudGAJxZ+geqKJtFVUxkNAw8vF3zy08MIDvVxaN+qN+Hp2R+hrLBGkoaLmw4frX8K4THKcu9uJaTM3+rx3V+ZzeVHsVuGE0IAvHN2KSqMdQ5tt5WfkeWEAMDrWWtwUc/f2r2NPeV5spwQAHjzzDZkN5Q7tDtaUSTLCQGAf2bsx/Eq/lbibWRUl+N1GU4IAPw34zj2llx0aHe+tgYv7N4l+foUwPeZGdiUd96hbUlDI57ZsE1ybRMKYE1WLlZm5ji0rW5uwZM/b5bkhAD2qMiO3AtYeizdoW1TqwmPf7MBlEK0EwLYt56OnC/CV7sdf++NZiue+HS9JCcEsBf0yrxYjk83HnVoa7Ha8JcP1kpyQto08our8e9l+x3asiyHv729FiYJTghgj5SVlNfjvS8cO8aUUrz42hpJToj9uVFUVTfhnX9tFmX/5str0SDBCQEAjqVoqG/BG39fI8r+Xy+tRnVFo6TS7RzLoaXZhNefXCaqgNrHL65E2cVqyRomgwWv3vO14tL1v1dUR+RXhKMcVpbulfXYtnyFzeXCN0FKKZZcOii7LDwhBCtLjju0++bCUVkVTAF76folBY4njK/OnpRd+VNDGHx19qRDu2+z02T3Q2EIwZdnHGv8kJkpu0g/Qwi+Sjvt0O6n9CzJDkIbBMDXx087vNGuOp0NK8vKbgfw3ZE0sJzwjXb9qVwYLFZZ7QAogB8PZsBiswnabT15Dk2tJnkalGLVwTMwmCyCdvtOF6CmoVVWbxeOUmw5lIsmPX+begA4nnEJlysbZZXq5ziKfUfzUV2nF7Q7k12Kwks1sjVOnCpEaVm9oF3++QrkZpWClVHFlGUpcrNKcSGvQtCutKgGpw5fkFUplWM5FOZXIie9WNCupqIRh7ackfeZsxzKi2uRdsDxouOPiOqI/IpkNlxApUn4RykEBw5byo/Cwlp5bXKaylCgr4K8KcmeW7KpLAMtVhOvzYXmaqTVlQrmnTjS2FaWi3pzK69NaUsj9pcXyu7rwlIOe8sKcLm1idemxtCKrYV5sjU4SnGiogwFDfxRqmazGWvOnVWkkVNdjayqSl4bk9WGFRnZsjUogOKGRpwsKeO1sbIsfjp5RlG/oGp9Kw7mX+Id5ziKZYcyFHU9bTaasetMAe84pRQ/78tQ1CjPZLFh6ynhCWPlrgxFR51ZjsOmQ7mCNmu2ZShrXEiATbuyBE3WbUqHRkG/FYYh2LglQ9Bm07rTikqhazQMNq1LE7TZuuoUGKUay4UXT9t+Pqboe8VoGGxcelj+BX7HqI7Ir8jWiuNgFL7lrawJR2qzecfXl56GhijTsHBW7Kzk11hdlKFYg6UUG0r4b4KrLmYr7txKCMGqAv7Xse5CrvKuqoRgRR7/69iclwcryyrXyOHfOtmVX4BWi/AKXZRGBv97dfhCEepbhVfoojRO8WucvliGigbhFbojGEKw8ij/53GupAqFFfWKnB1CgNUHzvCOF1fUI6egQlGnW0qBNXv4Narr9DiZWaSocSHHUazbkck73qw34tCRfFmRims1Nm/Pgo1ny8FksmLPjhxFWxIsy2H39myYTO0v0Fgbi+1r0xR102VZDod350LfZGh3nFKKrcuOKvrMuStHfWsqGmVf4/eK6oj8ipQaqsBB2R6ghjAoN/GvwItba26qhipHo8zAH7kpbqnvAA2C0tZGfo3mBsVOAgFQ0sKvUdTcKHt7qQ2OUpQ080ddipsaJJ1IaQ+WUhQ1NvBr1Dd2iMaleiGNJsWOIUspimr5NUrrGhVdH7jyeQhp1PB/VmKhFCir5b9OWXWjYg0AqKxt5t0uK6/sGI3GZiPM5vYn8KqqZkUTaxtGowV6nm2mulo9rFZljjoAWK0s6mvbd2Kbm4wwGpQ56oDdqaquaP9zN5usaKrnj/CKhgJVpfKj5r9XVEfkV8TAmhVfg4DAaOPfNmm1KdcAAIPAdVo6QIOCCj7XVptF0TYAYJ+UWq38NyCD1SJ7C6sNClyt2NoerRarYocKAPRmfg2D1aLQnbLTYhZ4r8wWxY4IAMHIjcFs7RANg4V/69LIM+lKxWThz0Mx8qzMpcJRCrO1fZ2O0gDAm+9idJAHI0mDxxHoCAfh19QwGtr/HZpaO+a+CwCGVv77+x8V1RH5FXHTKCsxDNgncDct/3U8BMbEQ+Cu5T/W59kBGsSBhoeTs+xE1TYYQuDuxF/zwd1Jp7gsOoG9ZDwfHjpdhzgJns78Gm5Oug5xdjyc+d8rN2edYsfQsYZTh2i46fg1XBWWKxdzHVeXjtFgGAJnnmPCbh3YUM3Ntf3vlmsHavAdE3ZTWNr9Og33X17DjUfD1aPjesu4KSxF/3tEdUR+RTq7d7IXKFMASzmEu/IX14n2CFKcv2GjLDq785cZj/b074AcEQ5RHvzl0mO8/BRPrvTKdXg1fPwUh54ZQhDtzV9QKdrXF1YHJ0UcoSEEcX78ryPa3xc2pRoMQVyAP79GgK9iJ0HDEMQE8mtEBYkvTMUHQwhigvnfq6hg5RqEAJFBPrzjkZ06QANAWJA3r6McFuKjKDGyDX9fdzjzlDLvFOStKIm0DXd3Z3h6tj+5+gd6Sm5C1x7Ozlr4BbRfS8TTxw3uPPpS0GgYBIX4tK/vouuQ6qiEEIR2VtYr7PeI6oj8ikwOHQhWYY6Ip9YNAwMSecdnRvRRnL/hqnHC2E78bd7nRKUq1tAyGkyL5C/ONjc26aZunZKhV67Dw8wu3RU3cWMpxW3x/K9jctducJVQUZRXI5FfY2zXWHi5KFuRsRzFban879WQuM4I8pRe6vomjX78r6NXdBgi/L0VRZA4SjFvEL9Gt4ggdAsPhJKPnVJg7vBk3vGIYF+kdgtX/N2aMzqFdyzA1wMDe8UoOjVDCMHMCfwanp4uGDksXvGpmWmTUngdGmdnJ4yb1FPxqZlxk5J5m/NpNAwmzu4DRsHr0GgYDJuQCA+B4myTFw5S9JkzGgb9RnX/Q5Z7d8Qv6oi8++676Nu3Lzw9PREUFIQZM2YgLy/vl5S8penpHSsYzXAEA4IpoYPgxPBPbAneYYj3CpVdR0RDGEwL7wNXLX84M8YzAP0DomTv52sIg6nhifDR8f+oQ9y9MDo8VkEdEYJxkV0R7MZfYtrPxQ1TY+IVaQwJ64wogYiIh06H2d17yNZgCEFyp05ICOT/3ui0WsxPTZL9eRAAsf5+6BUeymujYRjc3i9ZUQ5HiLcnBsd25n8ehGDhUHFVffnwdXfFqKRYQZvbRqZCSSDMzdkJ4/t0E7SZOzZFUbRNq9Vg8pDugjazJqYqOjVDCDB1tHCl5ulTUhWfmpk6KUXQZurM3opPzUyd2VvQZtKcvrJqiFynMa+/oM2E28RVcuaDYzlMXTxY0TV+r/yijsiBAwfw6KOP4vjx49i1axesVivGjRuH1tYOyC7+HUIIwfzI0fIeC0DLaDEldJBD27tihslKwiRX/uZFCv/gAODergNlh+opKO6Ic6xxf/f+sjU4SnF/guMy8vf27CM78MJSigeSHWvcmZIKQuS5hhyleLCPY40FvZLhxMg7A0QB3D+wj8N8mTl9EuHspJXtjNw7pI/DFePUvgnwdHWWrXHH8F4OG9ON79MN/l5uslavBMD8ESkOe9oM7xWLTgFesiIWhAAzRibB00G/mX7JUYgK95OlwTAE44Z1h7+Dhm6J3cMQ3zVEtsaQQV0QyrOd0UZMXDBSekfJ+jwYhiC1dxSiY4VLo4dG+GHgSHnNFxkNg66JYUhIFm4K6hfkhZHTe8nWiIgLRurQrpIf+0fgF3VEtm/fjrvuugs9evRAcnIyvv/+e5SUlCAtTbj4zB+Z8Z36iXImroVc+b+v9LgLQS6O95/HhiThjmhxPVCu1aAA3kqehygPx1Gb4Z264ImEEZI02ngrdSoSfBy3iO8XHIG/95HnuL3adwx6B4U7tEsMCMa7Q8fJ0vhb36EYFh7t0C7Wzw8fjJ8gy+F5qE9fTIjr4tAuzNsLH82aDACSnZGFvZIxM0l49Q0AAR7u+GzhNBACSfkJBMD0lAQs6M+/ndGGl6sLPr1/BjQMkeSMEEIwOikW94xy7LS56LT472Mz4aTRSNJgCEH/hEg8NHWgQ1utVoOP/joLLs5OkiYmhiFIigvFE7cNE2X7r5dmw8P9/9g77/g4qqvv/+7MNvXei2VZlmQ1S5Z7773hijE1QAgBUiAhEEIJIY0AgUAahNANNu69914kWbKsZsnqvWt3tW3mvn+s5Mi2ZnaKIOR5/Xs+PJ/n8b2a7+7s7txzzzn3HL0sQ4FhCOIGBeGZR13/tggheO3lu+Dr6y6bERnhh+eeWSBp/q9eXYagEG9ZYSCWJQgK8cYLry6TNP9nv1mGiEEBsgqbMSwDX38PvPzneyQltj/52goMHhYuKwzk7Gfjht989CgYlcfw/1f1rb7rjg7nGWx/gcQ7q9WKzs7Om/77vyZCCJ4cuhxLI5yGgqs9LEsYaIgGr6R8D2MCXC8WvXoqYTa+N2SKZAZDGPxu+GrMChPOE7hVP0ychJ8mTeu5hhQGwe9GLMbymHTJjIeHjcJLI2eA9FzDFYMAeGXUTDyYKL2L8OrENPx+0mwwhEh4H87xX4yejB+mu/bq9GpRQiL+PHceWBmMp8aMwc8nTJTMmD50CN5bvggalpHMeHD0CPxq9lTJp4fGxkbjH/cuhV6jcbkw9S7yKzJT8NrS2ZIZ6THheP8Hy+Gmk86Ymx6P1++bL3nRT4gKxgdPr4Snm04yY3JaLN78wWKXHpdexYT7458vrIavp5vL19V7a0YlRePtn0lrqgcAYcE++NtraxDg5ymJQQCkxIfj7VdWwU1iF+HAAC+899a9CAnxkXR/CQHiYoPxzp/ugafAKZNb5ePrjrf//gAiowIkfU8IIYiMDsTbf38APr7ukhgeXgb86cOHERsf6rzfLjAMQxAa7ou3Pn4UARLzNgzuevz+i8eRmBEjyWDv7fD7xtdPIiRSOMn6/7q+te67PM9j8eLFaG9vx8mT/ZexfeWVV/DrX//6tn//v9R9t6/ONF/B5urjyGkvuVFxlYL2tGjnoWU0mBUyCssipyDaI0QZo6kE68pP4UxzCZiehZyn/2FoCIu54cOxNmYChnq79lL0p3NN5fj42lkcqSvuCUE4W8yzPa3mGUIwLzIZD8aNQYqfcB6CmC41VuNfBRewr6oYgNO44noZPb6GedEJ+N6wUcgMUtZOO7epHh/mXsSusiLwoDcYvc9enlLMHBSHh1NHYmy4uJtWSAVNTfh31iVsLyqEg+fBMgw4nr+x2PGUYkpMDB7KyMSkQcL5FGK61tSCjy9kYWteAewcB5ZhwFPqfChSZ0hpfEw0HhidgWlxsYoY5S1t+OxMNjZn5cNqd9zMgDMxdeSgCNw3LgOzkuIUHZOuae3A58ezsfnsFZhtdmhuYhBwPI/hg8KwdnIG5qbHK2I0thvx5ZFsbDqRB2O3tV/GsOhgrJmWgXmjE8Eq2LG2dJjw9YEcbDp0GZ0mC1iWcbagJ84FleN4xEUFYtWsDCyYmASNRpqh01ftnWZs3puDLXtz0NZhdjJ6Hu29jEER/lg+PwMLZ6RKNnT6qstowfad2di8IwstLcZ+GeFhvli2JBOL5gknj4rJbLJi17YsbN14AY0Nnf0ygkO8sXTFKCxcmgk3BUdzrRY79my6iG3rzqKuuhWspufzAEAYAs7BIyDIC4vWjMHClaNFE1SFZLM6sH/DOWz7+Diqy5qcDEoB+h+Gb4AnFt4/AYvumwhvFyGy/0XJ6b77rRkijz/+OPbs2YOTJ08iMrJ/l7nVaoW1T+Gmzs5OREVF/U8aIpRS2KkdDBhoRJJLAaDa3ISDDRfRZG2HlbPBQ2NArGcEZoZkwkMj/iOwcjawhIWGEX941ZhbsasmG3Xd7ejm7PDQ6DHEKwQLwtPhoxPfUVg5Z6EpsSRZAKgzd2BL5WVUm9phctjgpdVjsGcA7ho0HP568R+ajXMABNC5YDSajdhYloeKrjYY7TZ4anWI8fLH8iEpCHYTbwVu5ZzFofSsOKO524TNxfkobW+F0W6Dh1aHaC8fLI9PQZincPKr831w4CmFwcVJmbbubmwpKEBJazOMVhvctFpEeHtj+bAkRPr4iP6tvYehd8HotFiw7Uohihqb0GW1wU2rQZiXF5akDkOMv3iIz85x4HgKvYYVXeBNVht25hbiam0jOi1WGLQahHh5YtHwRAwJFj6qCwAOjoed42DQakQZZqsde7OLcKWqHp3dVug1LAK9PTB/RCISwsXDiFIZVrsDB7KKcbm0Fl1mK7QaFgHe7pidmYCkQeKbAI7nYbNzMOjEGXYHhyMXS5BdWI1OkxVaDQM/L3dMHz0UKUPCRP9WKsPh4HDyQikuXalEZ5cFGpaBj7cbpowZirRhEaJ/y/MUVpsDBr04g+N4nLtQhvOXrqOrqxuEEPh4u2HCuKHIGB4tgWGHQa91Oe/S+TKcO12Crk5nVVYvbzeMGT8UmaNjRT0zlFJYLHboXYTFKKXIOV+Gs0cL0dluBqUUXt7uyBwfh1GT4kVP8lBKYbXYodNrRMMplFLkX7iOU/ty0dlqAsfx8PJxx/DxcRg7MwUarfBzm1IKa7cdWr1mQI5Rf9v6zhkiTz75JLZt24bjx49j8GDXMfVeyXkj3wU5eAdy2nNwsPEgrhmvgaPO0sVurBvG+o/FtOBpiHJXtpPuFUd5XGjNx46a47jScQ2OGww9JgVmYEH4JMR5qWPwlMe55hJsrDqDC60lsPNOhoHVYWpwMlZEjUOST5SqYmCUUpxrvo51ZedxovEabLyjh6HBtNBErBk8CiP8xR9qUhgXmqrwWfElHKotgaWPITI1bAjuG5qJcSGDVDOym+rwaUEW9lYUo7un86uOYTEpIgYPDBuBSRHKTxj1Kq+xAZ/lZmPXtWKY7c6qmlqGwbjIKNyfloGpgwYr2q33VWFjE77IvowdBUUw9lRA1TAMMiPCcX9mOmbEDVFdSr60qQVfXsjF9twCdFqsNxhpEaG4d0w6ZibGQafAI9BXFU1t2HA2F9suXUW72VmlkmUIhkUE457x6ZiTFi9YKEyqalo6sOlUHraeuYJWo3OhZBiC+PBA3D05HXMyE1wmtLpSQ2sXthzLxdbjV9DaYQLtYQwO88eqGRmYOzYR7hLDK0JqbjNix+E8bD2Yi+ZWo5NBCKLC/LBs9nDMm5IMT4FiZFLV3mHGrn252L4nBw1NnaDUGbIID/XFkgUZmDczBd5e8r0OfdXZ2Y19e3OxfVsW6mrbQXu8WsEhPli0KAPzFgyHr686r4PJaMHBnZexY+N51FS2guedjIAgb8xflom5S0cgIFB8o+JKZqMVR3ZkY8fnp1FV2ug8eUUA/0AvzFk1GvNWj0ZQqK8qxrel74whQinFU089hS1btuDo0aMYOtR10l1f/S8ZIsebjmNj9UZ0ObrAgLmtp0zvv8V5xuF7Md9DmFuYbMbRxkv4sGwrWm0d/TJYwoCjPOI8o/Dj+HsQ6yk/RHGsMR9/LtyBBks7WDC31T35DyMUzyUvR7KPfKPnREMJfpu7G1XmthvX648R6xmIV9IXITNAfojibEMFXry4F6WdLWAJua0zbe+/DfL0w6sj52JSmHQDuVdZjbV4/tQ+FLY1iTIiPL3x67EzMCta3vcfAK42NeIXh/bhSlOjKCPEwxO/mjgFC+MTZTOutbTg+T0HkF1b1y+D6QmxBbq749mpE7EsJVk2o7K1HS9sO4ALFdWiDF83A348fTzWjHKd2Hqr6tq78PLX+3G6pFKU4WXQ4wczx+D+SSNkG6HNnSa8+uUBnLhyHaTnejczAJ46j/g+NGsUHp41WvYpinZjN37/yUEcySq5EebsK0KctUwMOg3umZ2J7y8dJ9sINZqteOPDgzh4ugigEDydptOyWD4nHY+vmSQ7ZNRtseEv/ziEfYfzwfP0tr45PdEvsCyDhXOG44ePTBMsriYkm9WBf/z9IHbtvAyO4/ptZkgIcZ4SmpOKJ56aJbtirMPB4d/vHcT2DRfg6Cm5fyun9zOeMjsFTz23AB4yq6NyHI/P/3IAWz46AavFfuMzvpVBAYyflYwfvbrsOx/O+c4YIj/84Q+xbt06bNu2DQkJ/zl37+PjAzc31xbw/4IhQinF5prN2Fm3U9J8Bgx0jA7PJDyDOM84yZwNlQfwSfkOiQwCLaPFy8nfx3A/6cfBvq48jbcKt0tmsITB79Pvw4Qg6YvflopsvJSzHVTCAWMGzpMTr2cux5wI6Yvfzoqr+OmZbaAUN/JHhOTMWSP445gFWB4rXlOhrw5WXsMPDm8FR6nLI8a9y9Cr42bh/mHS62ScqqrAIzu33gj3SNFzEybjsRGuT4706lJ1Db63cQssdsdtC7eQnhw/Bj+ZKP3k15XaBjz86SYYbTbJdS8eGDsCz82ZLNlQuFbfjO+9vxEdZotkxorRKXhp2UzJhkJlUzse/cvXaO40SWbMy0zAb+6bC41E13p9Syd+8PrXqG/plMyYnD4Ef/jhQmglGgot7SY89eoGVNa1Sap1QggwMiUarz+7FHqJXp7Orm48/cIGlF5vlMggSE4Mx59eXQF3iR4Yk8mK55/9Clev1go2B+wrhiEYMiQYr795D7wl5n1YLDa88vRXyLlQJqljM8MQRMYE4o9/ux/+Er0jdpsDv/vR5zh7uEDSfIZlEBLhiz9+9hiCXByN/m9Kzvr9jQae/v73v6OjowNTp05FWFjYjf/Wr1//TWK/Ve1v2C/ZCAEAHjysvBVvFr2Juu46SX+zt+60ZCPEyaCw8Xa8kv9PlBlrJP3N/rocyUZIL8NBOTyf8xmutFdK+pvDdYV4MWcbeIlVTpwMHj+/tAlnm8okMU7WX8dPzmxzGggSKLSH8+y5nThUUyKJcbGhBj84vBUOnpdkINCe/148cwA7yqQ9bPKbGvDwjq2wOhyyaqn84dRxbLiaJ2nutZYWfG/jFnTLMEIA4L3T5/DxxSxJc6vbOvDwp5vQZZVuhADAJ2ez8M8T5yXNbegw4pEPNskyQgBg4/kreGdv/4nzt6q1y4zvv7tRlhECAHsvFeH1jUckLZRdZgueeGOTLCMEAE5cLsVvPtovidFtsePp321ClUQjBHDuzC9dqcJL7+wCJ6GVgNXmwHOvbJJshDgZFFeLavHi77bC4XDdjdfh4PDKS5tQUCDNCAGceSelpY144fkNsFmFmxb2iuN4/OGFTbh88bokI6SXUV3Rghd+9AUs3a6b7VFK8edfbsS5I4XSAHAWPmuoaccvH/wXjJ39dzX+X9M3aohQSvv978EHH/wmsd+aWqwtWF8l36iioLDxNnxa8anLuR22Lvz92teKGA7egbeL17mca3JY8Pv8TbLrT1A4c1ZevbLB5cPAytnxQvZWmYQeDqV4PmuLy7LyDp7HM2e2S35o3Kqfn91xI6FV7LU8fXwXOF5+yTgC4NmTe0U7Avcyfn5wH+w8p6j2yK+OHEJbt+sH1K/2HYLFLs/Q6dXvjhxHfZfR5bzf7D4Co1VZJ+W3D59GeUuby3lv7jqONlO3oiqjHx69iIKaRpfz/rbrNJo6jLIZFMCGk7nILnW9Ifj3znOobmqXz6DAnjMFOJ133eXcL3deRGlls2wGTylOXCzFkbPFLudu25WNq0W1sivL8jzFxewK7D10xeXcfXvzkHWpXBGj4GoNtm93bUifOHQVZ44VyWdwPMqvNWDj56ddzr1wrAhHtmdLNqb6Mmorm/HV3w/L+rvvqv73UnG/QzrWdExxKXUePAq7Cl16RfY3nFXc14UHRamxCiVd4h6LvbXZsPLK2tXzoKgyNyO7TfwhuK/2KjrtFsWMRksXTjSIeywO15agyWJSVFWWAmi3WbCvSrwFwam6SlR0tUvytvTHMDvs2FZ6VXTe5YZ6FDQ3Ka4q6+A5bCzIF51T0tyCi9U1sjwht2r9ZXHPS3VbB46XXFfMYAnBVxdzRee0GM3Yl1usuNQ5yxCsP3NZdE5XtxU7zl9VxzghzrDY7NhyLE9xWXiWIdhwKEd0jsPBYdO+HMXfK4Yh2LhXnMHzFJt3ZCnuE0UIsGl7lujCTCnFlk0XFDf9oxTYsvmCy3u9bf05xb1jeJ5ix9cXwLnw7mz/7JSsAms3MTiKPevPw2qxK/r775LuGCIK5eAdONJ05LaEUTliwOBo01HBcY7y2FlzQtHC2iuWMNhdK+x+ppRiQ6Vry90VY1PVGdE5X5Sdc1lYTZxBsK5M3FX/afFFxT1dAGcy4yfFF8UZBVmqGATAv69eEn3QfpaXo4pBAXySmy264KzLyVXF4CnFF9mXYeeEH7QbLuWpOpHEUYqvs66g2yb8oN18/opiDxjgrHeyPasAnd0WwTm7zhfAZncdLhBjHMwpQXOncGuLgxeKYZLgyhdjnMkrR01Th+Cck5fK0NZpVszgeYrcohqUVTULzrl0uQJ1DR2Kn1iUAmXlTSgoEt6gFVytxfXrTao+9/q6DmRnlQuOl19rwNXLVar6BbW3mnD2hLAHqa6yBZdOFoNX0WfHbLTgxB5xY/1/QXcMEYUqNhbD6HDtmhYTDx5nW84Kjpcaq9Bsa1fF4CiP403CbsgKcxMqzU2qGt1ylMfRhitw8P0/rBu6O3GlvVaRF+E/DIpTTaUw2a39jnfaLDjdUKFqh89TiuyWGjR29/+52jgOByqvqWJQACXtLSjv7D/kQCnFrpIiVQwAqOnqxNUm4ZDDjquFqhmt3d3IqhFeMHbmFSrefffKZLXh7PUqwfHdOeoZNgeHk4XlguN7s9Q36uR5iqN5pYLjB84XqTLaAGfC55FLwl7Dw2eLVHcEZhmCwyLhmaMnClXXvGBZBkdOCudMHDtWMCCMY8eE87VOHLqqqlsv4PQgHT8o7Jk8fSBf9bF+whAc333HEPn/Vl32rgG5jtFhFNwdt9vUGTq9svA22Pn+d5Vt1oFh8KAwOvrfVbZaB67JYZut/x1di1X5Tu+2a1n6v1a7tVv1oterZgFGt8MBq4iXQY5auvtn8JSiwyLsAZDFMAvf9zbzwCTStZqEGS1G9Z87IQQtIozmTpMqQx0AGIZBm1H4fjR3mGTnCdzOIGjrEma0tJtU7fAB571qF/GqtHWYVXXSBZzGeHuH8PtobzOrvlccx6O9Tfh9tLeZVBuGPE/R1iz8fG1vNaruLUN5itam//1WKHcMEYXqLVamVrzIGZKBYgCAQyDPRGn+Sf+M/l+vEHsgGVKy+VUzBvCku0Pg9Qr9uxLZBa5FqZpg380Se70Ddb+E3gcAxXkbfUXgrMAqyFC5sALO3IdvmgGIM8TGpIq6uA7nGKD3IZJbMWD3yhVjAL6+dhEGP1D3SkXY8LuiO4aIQrlrpDVaciUDY7jRA+ZWeboo7y5VLGFgYPov4uOlHRgGAHgJvF5vrbziPmLyFni9PrqBYwhdy0enrsKkFIanTqcik+YWhr7/18syDNxUVha9wTAI3xMvvbqqn1IY3iJjUsVTCm834e+Pt4f67xbHU3iL1Mfw8VT/O6SUwlukyZyvyuqlvfISuR9eXgbV4R9CCLxECoJ5eupVeysYhsBL5H54ehrkt7HuR2IN+Ty8Dao9OwC+84XNpOiOIaJQQzyGgCXqSlEzYJDoLVwMbIhnFHSMujLRDBgkeQ8R/OHGeATDU6PuQUtAkOAVDj3b/2uNcPdFoF68D4xrBhDjEQA/gb44gQYPRHv6qn52hLh5IcLdp98xd60OSf7BqpJuAcBXb8AQ3/47bTKEYGR4hKpEUgBw02iRFBgsOD4mKko1Q8swSAsTbpQ4dnC0rNbx/YkhBCOihSsEjx2qngEAIwcLM8YkRKteXCmlyBzSf48tABg5LEp1vgDHU2TECzMykqIUnzS5weB4ZCQJM9JTo1SHfziOR3qqcMXm4emDVHtFeJ4ibXi04HhaZoxq7w4hQNqIGMHx1NGxqt8HwxAMHztE1TW+C7pjiCiUl9YLo/1H3+iaq0Q8eMwMnik47q4xYGbIGLAqGYsjJguO61ktlkaOUbW4UlCsGjRBcFzDsFgzeJTqBXxt7BhBg4oQggfiR6q6PgOC++MzRctlP5g0QlXSLUMI1iakizbeeyAtQ1VYgyUEq5JS4KET9kjcNyJdNWNRUiL8RCokrx09XFXohGUIZiQMQYi3sBG7epxKBiEYGxeNQUHCTQBXTkhTtbgyhCBlUAgSo4QNw6WTUlXtwAmAmDB/ZMQLG1TzpyRDw6rbPAUHeGHscOF2CDOmDIObQd3mycfbDZPGCVeEnjgpQXJlVCG5uWkxY6ZwtebMcXEIClFXzVujYTFrUbrgeMrIwYgcHKTaOJy3arS6C3wHdMcQUaEZwTNUHd8N0gVhmPcw0TkLwife1u9Fjny1XhgTkCI6567IMaoWVw+NHjNCxMujLx8kv69HX+kYDRZHuWAMToPWRRdiMTGEYGWseI+TxbHD4KFVHnKglOKeRHHG7Ng4+BuUP2g5SnFvqjhj0uBBCPdW3qCLoxT3ZogzMqLCERfkr3h95XiKtaPFGYnhQUiLDlXsTeAoxT0T0kXnRAX5Ymyicq8ITynWTBEv7R/k54mpGXGKvTsUwN0zM0R/Y96eBsyZmKiYQQjByrkZovfBzaDDgjlpihkMQ7Bkfjq0Il1ptVoWi5eMUPx5MCzB/AXpMIgYTCzLYPGq0YqfWSzLYPr8NHiJGEyEECx5YILiJy/DMpgwOwX+wd/N9idydMcQUaFYj1hk+GYoLmq2KmqVYH5Ir2I8wjEteJRixvdil7gMIYW7+2N51FjFC8bjcXMFwzK9CjJ44aE46f1JbmMkTIGni1wTb50BT6VMVMx4LGksAg3i8VY3jRY/z5yk6PoEwAPDRiDSs//QT6+0LIvnJgh7sVwxViQmI84/QHQeQwiem6qMwRCCOfFxomEZwPmgfXa2csaE2GiMGey6qeJP5yn7PFiGIH1QGCYnum54+MSCCWCI/F8hyxAkRgZhVrrrhoePLhkLDcvI3iGzDMGgUD/MH5/kcu79d42BXqeVbbixDEFIgBcWz0h1OXf1XaPg4aGXbSgwDIGvjzuWLRrhcu7SZSPh6+uuiOHhoceKVWNczp2/LBNBod6yjwoThkCn12D1g66fRTOXZiIyJkh2UTNCCDQaBmuemCHr776rumOIqBAhBI/FPobBHoNlGwp3R92Nkf7SQgk/jr8bqT5xshlrB83DjBBpbrufJCzC+MBE2Q/aewZNxvLocZLm/njYdMyT0byuVysHZeKRodIMjB8mjcfq2HTZjMWDkvHT1CmS5j44bAQeTpYXBiIAZkbH4cUx0yXNX5mUgh+NGiubMT4yGr+dPkvS/PmJ8XhuqrxFnCEEw8NC8eaCeZLmTx46GC8tmH7j9UllJIQE4p3ViyTtSEcNicRrq2aDEHmM6ABf/PWhpZIa0qXGhOIPD853dnKVCGEZghBfL7z3+F3QSUgOjosMwutPLgbDMJINBZYh8Pd2x3vPLIeb3nVIJDLUD3/6xVJoNIzkRZxlCLw8DHjnVytEE1V7FRzkjdd/vRI6nUYWw82gwxuvrYKfr+vkSz8/D/zxjTVwc9OBlVjvg2EIdDoN/vD63QiW4EXw9HLDH/56Pzw89ZINBYYh0GhY/ObtexARJb4ZAACDuw6//ehh+Pp7SDZ4GIaAZRm8+Nf7MThBfhf376K+0e67avW/0H0XAKycFR+UfYBL7ZfAgBEM1xAQMITBgzEPYmKgvJ27nbfjneIvcaTxIljCCB67JT0da78/ZBkWhsvbjTp4Dm8VbseW6nOijN5cj8eHzsXaGOkdUgHnceE38w/gk9Iz/bZqv8EgBJRSPJ4wBT9MmCqLQSnF23nH8V7+KTAijF7+9xPH4tn0abJ2iZRS/D33HP6UdQKAcBv1XsZ9iel4ZexMaGTWDfj4chZeO3HU2ZzPBWN5YhJ+N302dDLzAL7OvYKX9h+Cg/KC1Sp7GXPjh+KNBXNhkHnqZldeEZ7fuu9GJdb+MCxDwPEUk+Ni8OeVC+Ah89TNkfxSPLtuNyy9rdpFGKOHROHt+xeKnpbpT2cKK/CzD3fCZLH126q9LyM1JhTvfH8J/L3knbDLLq7Gz97djk6TBQwB+ktP6WUMjQrCOz+5C0F+8pLBr16rx89f34K2DjMYQvr9bvUyBoX7463nlyEsWNyTd6tKy5vw7Etfo7nFCIYh/ebZ9P57WIgP/vSblYiK6D+JW0hVVS147tmvUF/X4ZIRGOiF37++GrGxwrk6/am+tg2/+tEXqCpvBsMS8NztDMIQUJ7C198Dv3n7HsQnCefq9KeWhk689Oi/UVZYB4Zl+q22SnqeiV4+bnjp7w8gZaRrT95/U3LW7zuGyACq3FSOQ42HcLblLBz05uZp/jp/zAiegUmBk+ClVR6bv26swe66kzjYcB62W4qU+em8sTB8EmaHjoO/Tvn9Kjc2YnP1WeyouQgLd3PZaV+tB5ZHjcXiyNEINsh7MPVVhbEFG8ov4uuKLJgcN1dL9dYacHfMKKyMyUS4u69iRrWxHeuuZWPdtWx02m8u4OWl1ePuIem4J24EBnkJJyq6Up2pC+uKcvB5QQ5arTcXYXLXaHF3fBrWJqYjztf17khITSYTvrqah89ys9F0SwExg0aDFcOScW9qOhICAhUzWsxmbMzLx2dZObc1s9OxLJYmD8M96WlICQ1RzGg3W7D18lV8di4bNe03F2HSMAwWpibgnlHpSI0IURyb7+q2YkdWAb44lY2K5vabxliGYHbqUKwZn46MmHDFDLPVht0XCrHuWDbK6ltvGmMIwbS0Ibh7cjpGDo1UzLBY7dh/vghfHcxGSVXTTWOEABPTYrFqRjpGJw1SnCthtTlw5Fwxvt6TjYLS+tvGx6QNwoq5GRibMVg0gVtMdjuHE2eKsWl7Fq4U3N74LyMtGssWjcD4MXGSPFP9iXPwOH26GFs2X8TlnNv7aiWnROKuZSMxcVKCaO6JKIPjceF0CbavP49LZ2+vkJuQHI4lq8dg0owk6CR4pvoTz/PIPlWC7Z+fxoWjRbcd7Y1NDMOSByZg8vzhMLgNzNH4b1J3DJEBFE85VJuL0OVogYO3Qc96INQQC1+dsFVtcphQYa6A2WGGhtHAS+OFwR6DBfNBeMqj3FSKNnsLbLwNbqwbItyiEaQXfuibHd0oMVbB6OgGSxj4aD0R7xUtmA9CKcU1YwWarK2w8Fa4swZEu4cj3E2MYUVhZzU67d1gCIGP1gPJPlHQCCSEUkpR0lWD2u4WdHM2eGgMiHYPQoyncC6BhbMjr60GHbZuEAL46tyR6hsBncipkuLOelSaWmByWOGh0SPaIwDx3sIMK+fA5ZZatNu6QSngp3dDmn8YDBrhB0ZJRxNKu5phctjgrtEh2sMPSb7CC6Sd55DTVIc2i7P6qq/egLTAULiLJLaWdrSgpL0ZXXYb3DVaRHr6IC0gVJDh4HnkNtSjpdsMjqfwMeiREhwCL5H6JuUdbShsaUaXzQo3jQbhnt7ICAkTZHA8j7z6BrSYzbBzPHwMeiSHBMPbIOw5qO7owNWmJnRZrdBrNAj19MSI8HBBDxPPU+TXNaDZaIaN4+Bt0CMxNAh+7sKJffWdXbhS14guiwU6VoNgLw+MiAoXXCAppSioaURTlwlWuwNebnrEhwUhwFPYO9HUaUReTQM6uy3QsiwCPT0wIiYcWgEPE6UUxTXNaOwwwmKzw8tNjyFhAQjyEfZOtHSZkVdRhy6zFRqWgb+XO0bERkCrEV4gr1U3o6G1E91WB7zc9YgJ80eIv/Bmpt3YjdyyOnSaLGAZBn5ebsgYGgG9iBervLoFdU2d6LbY4OGuR3S4P8KChJ+5XSYL8kpq0WG0gBACXy83pCdEwCCyCFfVtKK2rh3mbicjMtwP4WG+gvPN3TbkXq1GR0+1WB8vN6QlRcJdZBGurW1DTXUbzGYr3Nx0CI/wQ2SksJfFYrEjL68KHZ3d4HkKb283pCRHOOuICKihrh3VFc0wGZ2MkHBfRA8OEpxvszqQl1OBjnZn1VkvbzckpUbB20f4+95U346q0kaYOi3Qu+kQHO6LmHjhZ5zd5sCVrAq0txjBOXh4ehuQmBYF3wB1ZRPU6I4hMgAyOzqR3XYAF1p3odN+e5OnOM9MjApYgCGeGWAU1hMxO0w403IcR5v2o8XWdNt4glcypgbNRopPuuKaJWZHN441ncfuuiOotdzeeyTJOw7zQqditP9wQQPDNcOKQw3Z2FR1AuWmhn4Y0VgWNRGTg9OgY5QV0rJwduytzcOX18+isPP2/ibDvMNxz+CxmB2eAoOLxFkhWTkH9lYX4LNrF3C5tfa28XjvINw/dBQWRafAXaNsR2LjOOyvKsbHhZdwsfH2HWKstz8eSszE0thkUQNDTA6ex8HyUnySl4Uztbf3aYn29sWDqRlYnpAMH72yGjIcz+N4eTk+zcnBifLy28Ig4V5euC89HStSUuAvcsRXTDylOF1Wic8v5uBocdltjGBPD6wdlY4V6ckI9FRW1IlSivNl1Vh3NgeHrpbeFqLw93DDmrHDsXJUKoJFjhG7YuRcr8X6k5exP+f2TsE+7gasmpCG5eNTEean7DlHKUV+eQO+PpqDveeLbqt+6ummx7JJKVg+JQ2RQb6KGABQVN6IzQdzsOfU7Y0A3Q1aLJ6airump2FQuLwQS1+VVTRh694c7D50BVbbzZ5lg16DedNTsHRuOmIHCS/+rlRZ1YLtO7KxZ+9ldHff7FnWalnMnpWCJYtGIC5OuQewtroVu7Zcwu5tWTAZb/b6ajQsps1JweLlI5EgM4zTV4217dj99XnsWn8eXbeUxWdYBpPnpGDRmrFIyohWXQROru4YIip1teMktlT/GTx1CJZfJ2BAwSPMMARrYl6Cp0aeez+vIxv/KnsXDmoXZPTmm4QZIvBE3LPw18lz7+d1FOGPhf9ENyfcV4QBAQ+KUEMgXkx6CqEGeT/uK+3leP7yh+hydIOg/7h8LyNY74s/ZTyKQR7yftxX22vwxPnP0Goz3biWECNA74m/jb4fiT7ykrhKOprw0Il1aOjuEmT0vj9fnRven7gaGQHChZ36U3lnG+4/uAGVxnbBuHzvo8JDq8P705ZhfOggWYyark7cv3MjSttbBXNwehkGjQZ/m70Y0wbFymI0Go14eMsWXG1qEs3zIXCeAPrzvHmYGy9cF6I/tZq78fj6bciprruRq9CfGOLMifrtollYmub61EhfdVms+NHnO3CurMolAwBeXDwdq8eIHyG/VWarHb/4dBeO5193yaCgeHrxZNw3Vd5Rd6vdgZf+vQ8HLxWLMtieXInHFo/DIwuEa/L0J7uDwx8+PIhdJ/JdMjie4r6Fo/D4qomywkYcx+MvHx7G5t3ZkhjL5mfgRw9Pl3Wqhecp/vXvY/hq/VnBnBLAefyW43jMnZOKp38yFxoRj9WtopTii49O4NMPjjp/5y4YU2Yk4ecvLYVOL32TRinF5k9O4V9v7AUhcMkYPSUBv3zjbhjcv72Qzh1DRIWy2w5gR827kuczYOCpDcDDsX+Cl1baLuBi61l8VP5XABA0Qm5leGi88GzCrxGgl5YHcLE1D38s/Cdoz/9IYbhrDPh96s9FwzV9ldVagmdz/gWe8pLqkDBgYGC1eDfzCQzxCpfEuNxWiUfPfAQ75SQ1nGMJgZZo8MG4h5Dm5/roJwBcba/HmiOfwsrZJRX5YkDAMgw+mrQGY4JjJDGudbRg2Z7PYLLbpDF6jor+a9pyTIuUVjmxqrMdSzevQ7ulWxKDwJkA9+6shVgwJEESo8FoxLJ169BkMklmAMAf5szBimRpJ6ZaTWas/ugr1LR3yiq69tLcaVg7Kl3S3M5uC+795waUNbXKamT49JyJeGTKKElzzVY7HnnvaxRUN8piPDprNJ5cIFwgsK9sdgeeeHszsq/VyioXfvf0dPxstbQkcIeDw8/f2oazeeWCycz9aeHkZLzw6GxJDI7j8cqbO3DsdLHkuhoEwJTx8fj1zxZLMngopfjTG7uxd3+eRIIzH2f0qFi89uoKSQYPpRR///M+bN1wXgaDICU9Cr9/517odNKMkY/fOYCv3j8qmcEwBHFJ4Xj9o0e+NWNEzvp95/huH103XsbOmvdk/Q0PHl32FqwrfwWcQIfbmxima/i4/O+SDYRehsnRhXev/QFWEe9Gr8pN1Xij6ANQkYZ6/THMDgt+nf8XmByuO5pWmZvwy9yPwEk0QnoZFs6Gn+W8jzab6+7FteZ2PHHuM9h5aUYI4CxQZeMdeOL8Z6jv7nA5v9lixEPH10k2QgBnp2GO5/H9UxtQYWx1Ob/DasF9B9ZLNkIAZ1iCpxQ/OLYVhW23h+1ulcluw707Nko2QgCnd4dSih8f3IWchtvDXbfK6nDgwc2bJRshNxgAnt+/H2erbg8T3SoHz+Oxr7bKNkIA4Dd7j+BYyXXXr4lS/PiLnbKNEAB4a99J7MktksR4/rPdso0QAPjgwHlsPSfcPr6vXv30ALKv1cjuWfLV4Rx8dThH0ty3Pjsq2wgBgJ3H8/HxdmkL8gdfnMBRGUYI4PxeHT1djPc/Py5p/hfrzsgyQgDnqajzF8rw7l8PSJq/dcN5WUaIk0FxJacKf/7dDknz92+5JMsIAZwek2tXa/GHZ9fL+rtvS3cMkT463PA5lNRZpuDRYC1HQedpl3N31G6EkraOPHg0Wutxoc014+vqPeCo/FqpPHi02NpxqME1Y135Ydh54dCVMIOiw2bCtuozLud+fv00zJxVdtVXHhQmhwVfXHfN+OzaRbTbpC/efRlWzo4Pilwzviy5jAazUTaDwnmk+q95rhmbi66iorNdEYNSincuuv7M9xQXo7i5WXFp+D+fOuVyztGS68itbVDMeOPwSZeL8rmyKpwrq5JtIPTqrX0nXZZ8v1LZgKNXyhQz/rLzpMtuuWW1LdhzrlC2gdCrf2w/DcstORi3qrapA1sOXVbM+GjrWZjMVtE5be0mfLX1gjIAgK+2XURbu0l0jtFkwefrXH/H+xOlwI6d2ahvEN/YWC12fCLTQPgPg+LQ3jxUXhffdDjsHP791j5FDJ6nOHukEEV51Yr+/pvUHUOkR/Xd11HTXQSqsJw6AYPzLTtF5zRZG1DYdUVxWXgCgiON+0QftK22dpxvyVHMoKDYXX8UvEANEQDosptxoD5LsM6IK/Gg2Fp9Cg5euH212WHDlspLihckjlJsqryI7luOH/eVjeewrvSS4sWCoxRbynPRaRP2UnE8j08KLykuoc9Rit0VhWjqFn7QUkrxcV6W8lLqlOJo5XVUdYo/aD/JzlZcSp2nFBdra1HcfHvid199dj5bcTM+CqC4sRm5tbcfRe2rL87kqGqUV9PWibNltx8T7av1J9UxWrrMOJZfJjpn47FcVQxjtw0HLxWLztl2OA9EBcNu57DnVIHonJ0H8/qtlSJVPE+x69AV0TkHDuTD5sLoEhMhBLt25YjOOXboKswmcaNLTAxLsGPzJdE5Z48Wor1V3OgSE8sy2PnVWcV//03pjiHSo0ute1Q1sKPgUd1dhAaLsGv4RNMhlQyKOksNykwlgnMONpxW0TXGqSZrKy63FwqO7627qNgI6VW73YSTTcIPj721uTCLGBFSZHJYsa9WmHGgphDttm7BcSmy8xy2VOQKjh+vvY46s+swlJgoBTZcE2ZcqKtBaXurqs+dIQTrrl4WHL/a2IjchgbFRhvgzN9Zd1mYUd7ShrPlVeqa8TEEX1wQZjR0GHGkoEx1M751Z4QZ7aZu7MkqUsVgCMGXJ3IEx80WG7aduqKKQQjBV4ezBcftDg6bD19W1fCPAtiwL1tw88RxPDbvFh6XxKAUm3dlCXaypZRi89aLiq8POI2d7TuzYbcLb562bTiv6mQKz1Hs25GNbrPwc2/7F2dUdYLmOB5HduWiq911+P3b1B1DpEflpjxVDex6VWUWXsBLjIWqGQQMykzCu5iCzmuywyW3iiUMCrtuL9rTq7x213F4KYwrHeWC49mtlWBd9OGRwshpFd65XmquhkYlg4Agq1nY1XmxqUY1gwfF+Qbh/IoL9TWKvQi94ijF2X6O+vbqYk2Nyt7JPYxq4XuVXX37kWnZDJ7ifIUwI7eqTpUx1cu4VC7MKKhudBlWcSW+58ivkEprW1yGVVyJUoqCCuHXWlXfhi4VO/xeVda3wSgQnmluNaK51djvmBw1iVzHaLSipqZNNaOry4Ka2v6v43BwKCmqU2VQAc66JuVlt5dZ6NXVnEpVhiHgDO+UFKj/rQ2k7hgiPbJwyt1dvWLAwMIJ/6jMA8IgMDuEr9MlMiZdRDRhtdNuVm3sAIDRIeyN6LJbVHtdeMqjyy7OUPs+eFBRr4pY2EaO2qwiDKtFccikrzoswq+102pVXF1TMsNiHZD30WUVXjw7LeoXVgAwWoV3rV3dA8OwOzjYHP0bG10u8i7kSOhaA8oQMGi6TAPz+xC7ltE4cAxjV//XurVOyDfBsNkccIh4ZOTI1Dlw92QgdMcQ6ZHSomR9RQGwRPj4ldKiZHIYmgFgOK8jzNAqLHx223VEGYzijsO9IiCir9XpqVC/8OlFertoGPndVPuTWP8YHcsOgFkIwQqivWMDcdLfFUOttwKAaD8fMf53jQFA0PhTWg69P2k13zxDI8gYuHslVJ1WiK1EGoES8d8GQ24XYCWM/5buGCI9kluQrD9R8PDQ+AqO+2j9VC+uPDh4a4V7vPjrfFQzKOXhqxMuIe2v91YdNgEAX51wpUp/vafq3TEhBP56YUagQVk1zr5iCYMAvfB1Ag0eqnIeAGe+QIibyPtwcwfHq/MeMYQg2EPkfbi7q34fBBBneMhrDqfkOoEiJd7lyE+EESCzyZ2QvN30goaIv/fAMLQaFh6G/utK+Puo/30Azu+Wj2f/1XX9fAbmfQCAr8A98fZ2U5VX0Vd+fv0z3D30A7a4+/n3f99ZloGnl7JKyLfKN2BgPtuB0h1DpEepvlOgdnfMEi3ivYSLHY30G6c6FEDAYLivcAv6CYEjByTcMDZghOD49ODhqsMmHOUxLWS44Pic8NQBYcwJTxEcnxeVNCCM+VHCFT0XDEpUvcvnKcWimGGC43Nj41WXb+YpxZI4YcaM2FhoByA0szgxUXBswpBBcNMqK8/fK4YQLE4Vfh8jB0fAR2bH3f4Yi9KF30dydAiCRfrNSBHLEMwfKfw+hoQHIDrEV9UTi2UIZo8U/u6EBXojcbDyBoS9jEkjYqEXKNTl4+2GEanRqgwFhiEYkRYNH+/+jR29XotxY+NUMxLiQxEi0IGYEIKpM5NVeS0IASIHBWCQSN+aaQuHg1HpGQkM8UZ8iryq0N+07hgiPRruN0M05OFKDFik+UyFgRV+AI3yHwc9o6x/iJPBIMN3FHy0voJzRvsPh5dG+UPQyUhCqEG4guuogAQE64Vfg2sGQYpPDGI9hcuwZ/hFI9YzSPGDloBgqFcI0nyFq6sm+YZiuH84GIUUAiDSwxcTQoRLpMd4+2Fi2CBVyaQBBnfMjBoqOB7i4YnZg+NUMbx0OiyME66u6uvmhiXDhqli6FgWdyUJG20eOh1WZCSrYhACrMgQNj51Gg3uHpOmyttGKcWq0cKl3lmGwd2ThqticDzFqgnCDEII1kzPUHz9G4xp6aJzVs3OUBWS43iKFbPEX+fyBRmqEjB5nmL5fOGNEwAsXZKpmnHXUuENIAAsWj5S8OSOFFEAS1eOFjX8Ftw9BrwKBiEEi+8ZN6BhnoHQd+vV/BflxnoizWcqiMJbwoPDyID5onN0jB4TAqcpPsLLg8eUoFmic7SMBnNDJysOz/DgMS90iugchjBYFjVRBYNiWdRE0TmEENwzeJxi3w4FxT2Dx7rczd0/dLTiGh8AcH/cKJcLzoOJIxWHNRgQ3J8wQjQnAQAeSMlQzGAJwZphaaLdiAFg7fDhqhjLkpPhpRc3xNdkqmPMHRaPABchnpWjUxVdH3Du8CcnDEaEi+Z0d41NUbwDZxmCEbERGBIq3ltq/thh0Os0in6FDEMQHxmE5Bjxdg4zxsTDy0OvKM+JIQQRwT4YmSzeamH8qDj4+3koMtwYQhDg54Hxo8TbIGSkD0J4mK+iz4QQwNPTgKlThL1gAJCYHIHYoSGKGXq9FjPmifcyiokLQUrmIMVeEZZlMHtZpqK//SZ1xxDpoxmhD8BHG6jIUJgYtAphbq57giwIW4ZgfagixtSg2RjqJf5jAIClEbMQ4xEpm0EATAsahxF+wjvKXi2LmogUnxjZ3gQCginBaZga7Lp52F1RIzAucIhsBgOCiUFDsTjS9Y5xYVQyZkckyGawhGBkYDTuGeL6Rz0jcgiWx6bIXjBYQpAcEILvJ492OXdseBTuS05XxIj19cdTI8e5nJsWGoofjJLWZ+VWRri3N56Z4Lp/ypBAf/x0mrQ+K7cyAj098NysyS7nhvt647kF4sZ2f2IIgbfBgF8tnu5yrr+nO15cNVMRw12vw8t3i284AGdH3VcenCPbjCaEQK/R4NXvzXFpqOt1Gvz68XmyNx2EOBe9V5+Y75KhYRm88vRCECIvOE4AEIbg5acXukysZRiCF55fDJZVkjxO8MvnFrnsA0MIwbMvLYVWp5EdzqIU+PlLS+Dh4dpj/pNXl8HgplVk8PzolaXwFchB+W/qjiHSR+4ab9w3+DV4aQNkLeKj/BdgWvBaSXPdWHc8NfQXCNQHy2KM9Z+EFZH3SpprYPV4cdiTiHIPlbXAjg3IwA+G3CPpR6RjNPjd8Icw1CtSMoMAGB2QgF8mrQEjIdlVw7B4c+QapPtHS34QEhCMCIjBnzJXQyPhdA9DCN4csxQTQmIl3ykGBCl+YfjHhFXQs67DeYQQ/H7cXMyOFg6v9Pe64n0D8fGMlXBz4anoZbwycTqWxkvvQMsSgkE+vvhs4Qp46aSFDJ+ZOBH3pEnvQMsSgjAvL3y2YgX83PqP4d+qxyaMwiPjxN3gtzICPN3x8b3LEewlLSx57/gMPDXTtfF1g8EQ+LgZ8OHDy116Q3q1dEwyfrbUafBI+W6xDIGHQYe//2AZYoKlJc/PGhmPF+6d4VzEJUAYhsCg0+Cdp5ZiaKS0Ttvj02Px4mNzwDBEMkOrYfH6TxcjeYi0LtgZqdF49dleQ8E1hBDiNHR+vggZqdGSGMOGheM3v14OrYaVtIgT4nwvz/9iIcaOkdZ4MnZoCH771hroDRrJDEKAHz+3AJOnS/vtRsYE4ncfPAQ3Dz0YVsIH0jPl+8/Ox+y7xENY/y3d6b7bj0yODuyp/eeN3jG3ln0nYJwnZFgfTA5eg5H+82RbwGaHCRuqPsXFtjP9NsD7D8MTc0MXY3qwfEY3Z8FH1zfiaNM58PT2BngEztbj7qwblkTMxLKIOZIMhL6ycnb8/doO7K4939N75mb1MtxYPVZETcIDg2dJMhD6ysY58JeiA9hQfgG2nsaCfTmk5/83sFqsHjQaTyXOhJaRl+/j4Hn8Jf8YPrl2HmaH/cY1+zIA59Hl1bEj8GzadBhYeYmVPKV4L+80Psg/jy67DQzIbWEhAufR0OWxKXhx1Ax4aOV1yqSU4v2cC/hb9nl09NQXuTVZliEEDAgWD03EyxOnw0cvL3mTUorPcnLw7tmzaO3uFmQAwLyhQ/Hy9OkIcJd/OuLr7Ct45+hpNBlNYAm5LWTDEAJKKWYkDMFL86YjRKIR0lc7cwrx530nUdfRJcqYFB+DF5fMkGyE9NXByyX48/YTqG7p6Le9fe+/jYmPxgsrp2NQkPwTfCfzruOtDcdQ0dAmysiIi8Bza6cjLkJaF+++uphfibc+O4Ky6hZRRsqQMPzswelIHCyti3df5RXW4O33D6G4rAEsS8BxtzB62trHDwnBTx6dgdTECNmM4uJ6vPPefhQU1IoyBscE4okfzsSIjBjZjOuljXj3T7txJafyxvX6Y0QOCsAPfjwbo8dL36T0qqaiGe++uh05Z0vBsMxtuSO9/xYa6YdHfjYPE2dJ6349UJKzft8xRETUZW9BVtt+5LQdhNHRDo7aoWPcEOYWh9H+CxDvPVpVgquT0YFTLcdwuvko2u1tcFA79IwBEW7RmBo0C+m+o6CRuajezjDicOMZHGg4hWZbG+y8HQZGjyj3MMwNnYLxgSOgY9SdVuiyd2N//UVsqz6DBksbbLwdekaHKI8gLI0cjxkhGTCw6tpPmxxW7Kq+jPUV51FtboWVs0PPahHl7o9VMaOxIGI4PDTKk4EBZ4+bnZX5+KL0Iq53tcLC2aFnNYj08MWa2EzcFZMKL626UxcWhx07KwrxWWE2Sjqa0e1wMsI8vHDP0HSsjEuFr16a90BIVs6BvWUl+CQvG0WtTTDb7dCxGoR4eODuYWlYlZiCQHd1Llo7x+FgaSk+y8nBlYYGmO12aFkWQR4eWJmcjNWpqQj2VHd6xMHzOFZyHZ9fyMHlmnqY7TZoGRb+Hm64Ky0Zq0ekIsxH+Ki5FPE8xcmScqw7k4OsilqYbDZoGAZ+Hm5YnD4Mq0anIdJf+Mi8FFFKca64El+dvIwLJVUwW+1gGAIfdwPmZyZi5YQ0RQbIrYzskhqsP5KDc1crYbLYwDAEXu56zB6ZgBVT0hAbLp53IoVx5VodNh7IwenL12HqtoEA8HQ3YProoVg2YziGDpLmaRFT4bV6bNmdjRPnr91omufhrsek0XG4a34GEuNCVTNKSxuwfUc2jp8ogtFkBaUU7u56jB0zBEsWj0DSsHDVp9Eqrjdh56aLOHYoH12dFlBK4eauR+boWCxeOQqp6dGqGTUVzdj51Xkc3XUZXR3d4Hkebu56pI0ejMVrxyF9TKxqhhLdMUQEZOOMuN61Gy2WfNj4TrBEB4MmAIM8ZyPIMNzlh0UpdTnHxplxtfMwasz5sHJGsIwW7qwPErynIMp9gBi8FVltp1BqLICZM4IlGnhovDDcZwzivVJdejWkMew413IR+Z0FMDpMYAgDL40nMv3Ske47MAw778Cp5lxcbC1Ap90MQgi8te4YG5CCsQHJLgvASWE4eA7HGwtwsrEIHXZn1VlvrTvGBcVjWkiSS8+JFAZHeRyrL8Hh2mK02UzgKYW3zg0TgmMxJyLJZfhGCoOnFCfqyrCnsght1m44eA4+OjeMCx2EhYOGuQzfSGFQSnGmthI7y4rQYjHDznHw0RswMjQCS+OSXHpnpDIu1NZgR3Ehmswm2DkO3noDMkLDsDQxCd4uklmlMnJq67EjvxANRiNsDg5eej1Sw0KwLDXJ5fFdKQwAyK9twLacAjR0dsFid8DLoMewsGDclZEEfxcJs1IZxXXN2HYhH3VtXbDY7PA06DE0LBB3jU5GoLe4Idn7aHfFKatrwfYz+aht7YTZaoeHQYfYsAAsHZeMED9xI08qo7K+DTtP5KO6sR1mix0ebjpEh/pi0aQUhAeJG3lSGXWNHdh5OA9VtW0wddvg7qZDRIgvFk5PQWSYuJEnldHY1IU9+3NRXtkCs9kGNzctQkN8MG9WKgZFixt5UhmtLUbs3XUZ10sbYTZZoTdoERzigznz0zB4SLDo3/ZyXDHaW004sD0bpYV1MBkt0Bu0CAz2xoxF6Rg6LNwlQ4ruGCK3XsdWicL2L1DWtRs8tfeEC3g4AwcMKDj4aAcjwXc1Yr0XgVHg5eiw1eNC60Zcad8PB7XeCK04gywMeHDw1YZjhP9SDPebD5bI90C021pwtGkXzrYchpW39FzX6Y7r/b/9dUGYFDgXEwJnQcvI90B02Duxu24/Djceh5nrFmD4YXbINMwKmQ4DK98D0Wk3YXP1UeysPYUuh/kmBksYcJSHn9YLiyImYmnEFHho5HsgjHYLviw/ha8rz6LNZrpx3b4MX607lkePwZqYCfDRyQ8dmBw2fF56Hl+UXkCjpatfhrfWgNWDM/Fg3FgEKCigZnHY8VlxFj4puogaUyc0Pdd1Vth1hhI8tTqsjkvHo8NGI8RdvnfAxnFYV3AZH+VfQkVne78MN40WqxNS8UjqSER6yfcOOHge6/Pz8HFOFkrbWm9j8JRCx7JYNiwZj44YiRhf+d4BnlJszruKj89noaipGSzDgOedjN4Qi5ZlsTg5EQ+PyURcoHzvAKUUO3ML8cmZLOTXNoJlCHie/ofR837mpyTgexMzkRAq3ztAKcX+3BJ8diwLlyvqnAxKQSluOlkyK20oHpqWieQoZd6BY7ml+PTgJWRdqxFkTEmLxf0zRyJ9iLLF6UxeOb7YcxHn8yt7GM73x/RkplKeYlxaDO6bPwqZw8RP1wgp60ol1m2/gDNZ1515GdT5XSCEgCHOI8Sj0gZhzeKRGJM+WBHjytUafLXxHE6dLXXmyfRlMADHUQxPjcLqZaMwfmycIkZRYS2+XncWJ44WgsIZpuV52pOr4gzlJKVEYvnq0Zg0NVGRl6OsuB5ff3wSx/dduXHtWxlDk8Jx19pxmDY/TZUn5Y4h0kf15gs4XvdzcNQGCrE6/c5vV7j7BEwM/S00jHTXeI05H5uqfgU7b7ktn+R2BhDlnoalkS9Dz0pfmKrMZfhn6e/RzZlcNs4jIIh2j8Ojsc/CQyN9Yaoy1+APhX9Gp71LIiMSzyb8GL466QtTjbkJz+X+Dc3WdpfHZhkQRLgF4XdpjyPYIH1hqutuw5MXPkK1qUUSI9TNF++N+h6iPKQvTI3dXXjk1Be41tnkksESggC9J/498V7EeUtfmFotZnzvyAbkttS5PBnBEgJfnRs+mbEayf7SF6YOqwXf378F5+udTdzEOCwh8NDq8NHc5cgMkR6bN9lseGL3DpyoLJfE0Gs0eH/hUoyPkpaECABWhwNPb9+D/UXXbsvv6Y+hYRm8d9ciTI2TvjDZHBxe3HYA2y8X9JsXcxODISAgeHPlfMxOlh7/53gev9t8BBvO5EpiUAr85u7ZWDxSepIyz1O8s/UEPj14SRKDpxTPr56OlZOFCxDeKkopPth6Bv/aehZMj7EmxuB4ih+tnoy18zIlL36UUny5/SL++tmxfnNW+qr3NTy8ajweWjlO1gK7bWc23v7bATBEGuPuFaPx/YemyDrVcmBPLt74/U4QgttyVvpjLFw6Ak/+dI6seiAnDuTjj89/DUohWu+EMASUp5i5aDh+/NISaLXKUgPkrN//p0/NNHbn4EjtT+CgFhdGCND76Kozn8Gxup+Dp9I6W9Z3l+DryuckGCG9DIpqcx42Vb0ABy+tzX1ddxXeu/YqzBKMECeFospcir9dew1WTlpzowZLI35z9XVJRsh/GDV4reAN0QZ5fdVkbcczOX9Bs7VDUu0OHhQ13c14JucvaLdJ69DZajXi+2ffR425VTKjwdKBR8/+E42WDkmMDls37jv+CUq7miUxOErRYjVi7bGPUW2S1gXUZLdh7cF1uNJaL+l4JkedzfdW7/8C1zqaJTEsDjse3LsRFxtqer6ZrhlGuw1rd23AleYGSQw7x+H7O7fiZFWFZIbF4cBD2zbhUl2NJAbH8/jRll04WOzsGC2FYXNweGzjNpwuF+7O3Fc8T/HLLfuw43KB8/93sX/jeAqO5/GT9TtxuFC4k3VfUUrx2qZD+PpMrmQGTyle+HIfdmcJd/2+VW9vcRohUhmUAr/76jA2n8yTzPjX1rP419azToaLQmK9i/tf1h/Hun1Zkhnrd17CXz87dtM1hNT7Gj7ccBoffX1GMmPH7hz8+a8HnIu3RMZXG8/jHx8elcw4vP8KXv/tDvA8FTVC+jJ2bcvCX97cI7nY3Jmjhfjds+vBcbzLomu0h3Fo52W8+eIW8CrbR0jR/1lDxMYZcazuGQA8XD+a/iMKHg3dF5HX+i+Xc+28FZuqXgBHHRKMkJsZdd2FON74ocu5HHXgg7I/wsHbZDF48KizVGFT9Ueu51IebxS9i27OIskI6ctosDTi/bKPXc6llOLVKx+i0y7NmOrLaLZ24A8Fn0qa/8ucr9Bk7ZJVup2jPNrtZjyb9YWkH/YvL21DtblNJoPC6LDiB6e/ksR48fw+FHc0yyruxVEKC2fHQ4c3wCHh4fHbc0dxualeFoOnFHaew4N7N8Ii0Bm2r/589jTOVlfJKnPPUwqOUjy8fYtoJ91evX/2Io5cK5PFcBpFFI9v3I4Ws2tDet35HOzKK5JVs6N37k/X70JdR5fL+dsuXMXGs1fk1wUB8MKX+1De5NrIPZhdgs8OXZJJcOq3Xx5CUZVwi/penckrxwdbpS/2ffXOl8dwudi1AZpbWIN3PzmqiPHhhtM4l3Pd5byS0ga89d4BRYwNmy/g2Mkil/OqK1vw+m93yL4+pcDu7TnYvyfX5dym+g787tn1N/5ODuPo3jzsWH9e9uuTq2/FEPnrX/+KmJgYGAwGjBkzBufPf/Nv7HrXbth5k6zF+z+iKO7YAAcv7k0o6jyGbq5DEYOCIrd9N6ycSXReXsdFtNmbZS3e/2HwuNR2Al128Z3+lY6rqLXUK2Lw4HGxLRtNVvFd+NXOchQbq8ApZGS3F6PCVC86r6SzDpdayxT1j+Eoj/yOalztqBadV2lsxeG6YkXVPznK41pXE842iT8EG81GbCvPV9SjhqMU1aYOHK65Jjqvw2rB+sI8xYzmbjN2Xxd/0Jrtdnyam62obi1PKbqsVmwtKhCdZ+M4fHQ+SxGDUqDbYcfGy/nir4Wn+PDkRQUEpzHi4HmsvyC+YFBK8dGRi4qqpPYaVV+dynE595MDFxWXnicE+PKoa8YXey6qqir7pQSvyFc7LoJVyGAYgq+2u/48N227BKXtlRiGYP0m1+vcts0XFfcGIwT4et1ZlxubXV9fAOfgZRkhfbXxk1PfuFfkGzdE1q9fj6effhovv/wysrKyMHz4cMyZMweNja4ta6WilKKoY72qa9h5EyqNh0TnZLVuVdXp1kHtyO84KDrnRNM+xWXnAecD6lzrEdE5+xuOKC47DziTWA83Hheds6P2BFgVDJYw2Fl7SnTOpspzqroCs4TB1xVnReesv35JVQ8RljD4ovSC6JyvruUovr6TQfBJofiDdmPxFdh5V+FKYTGE4OMr4gvGjuJCmO12xQwA+CQnS/RBe7D4Gtq6uxVfn1Lg80s5ot2LT14rR32ntNBgf+IpxVcXcmFzCN/v7Ou1KGtsVdxsgOMpNp/Lh9kqfL+LqhpxpbxecRNGjqfYfb4AnWbhDVpVQxvO51cq7uvC8RRHs66hqU34fje1GnHi/DWXoRIh8TzF+dwKVNe3C87p7OrGwSNXXYZKxBhXC+twrUx4nes227B352XwChmUAhXlzcjPE9482WwO7Np4QVWfnab6DmSdkRZeVKpv3BB566238Oijj+Khhx5CUlIS/vGPf8Dd3R3//ve/vzFmsyUXRns15IRkbhdBSccmwdEmSxkaraWqO91ebtspONZsrUeZqUChV8cpCopTzcLuxXZbB3La8xR5Q3rFg8ehhmOCC4bJ0Y3jTTmKvCG94iiPffVnYef7DwdYOTt21mSp6qbLUR776i7D5Og/HMBTig3lWYp7ofQyDtcVo8Ui7An7oiRbVcdejlKcbqhAtbFdmFGQo/j6gPNe5DbXo6i1SXDOurzLqrrDUgBl7W3Irq8TnPNldp4qwxAA6ruMOF1eJTi+4VKeqkZ8ANDRbcHRojLB8U3n8hTv8HvVbbNj/+ViwfEtp/NVMxw8j93nhfNRdhxXzwAFdp28Kji852i+2kbpYBmCnYeEc14OHilQ1cAOcBYt27VX2BN2/GgBLBZ1hjrLMti9PVtw/PzxInR1KDfUAYBhCXZvVOYRlMz4Ji9us9lw6dIlzJz5n54LDMNg5syZOHPm9hii1WpFZ2fnTf8pUZdd3L0uTVT0Om222gFhdNiFww0ttoHxGrXbW8ALLNBN1mbVxhQAmDgzugUSY5us7aoMhF5ZeTs67P3vlFptRlgFjBQ5clAeTQJJq512C7rsrnMWXIkHRa25vd8xO8+hsVv57ruvKkUMkaqujgH41IGKTmFGRXv7gDAqO4QZ5a1tqoy2G4x2Ycb1pjZVxifg9FJVtQmHSK83tine4fdKwzCobhVmVA4Ag2UYVDe3C45XNbSr/jwIIahuFGbU1LcpasLXVzwFahpEGLVtqjvUchyPmlrhvJ26mjawGvWM6qpWYUZVq7Qy8CLiOYrqCmkJ8Er1jRoizc3N4DgOISE3l/oNCQlBff3tC/Dvf/97+Pj43PgvKkrZuXIHb4ZqkxkAR4UtSTuvzsrslYPaQAUWaaknXqTIJpDvYuXVL6y9sggwLNzAMcwC1zI7pJ1AkiKTwLW+FYZ94BhGgWs5eB72AYr5mkReb7dD3W6vV0abMENt6AdwGgkmMYbImFQRQmCyCl9HbEw6RPw6Jov63yGlgElkF2+22BTnIvSKpxRmi8jnYbFD7deXUnqjYmt/6h6A9wEARpMIo9s+AKsUYBJl2FR7DAHALMIYCH2nTs08//zz6OjouPFfVZWwu1RMGsYd6sIyPdchwrVEtDLqjIgzdCACeQ16Vl0p8b7SMf1fS8+oK4neV25s//dESdEzIXkI3BO1pd2lXMtDo65E/U3X0vbPcJfZW0ZMXgIMDcNAqzQL7xZ5ijTLc9OqaxsgheE+AAyOUnjqhO+7h179Z0IpFb2Op2FgPndRhpv63wghgIdB+J57uOlUlxNnGAJ3kfvhblDWefYmBiHwdBf57hp0qr0uAODlKcJw0w6Ix9BTlKEbEI+hhwhjIPSNGiKBgYFgWRYNDTfXHGhoaEBo6O1Fl/R6Pby9vW/6T4m8ddILIQmJgIG3LkZw3F8fqZoBEPjqhAtDBenU91MAAH9dkGBJ9mBDkKqE2155abxgEDBqgvR+0Lgo1y5FbqwePtr+e5f46zzhprKXDeBsahds6P9756U1wEer3gBlCEGku68gP9xdfTsDAmCQl3ARuBgfvwHZjQ32EWYM8fMfkN3YYD9hRlxgwMAw/EUYwQGq8x44ShETIHKvQtQzHBwv2q9mcKi/+vfB8Rgk0hl4UJif+rAJTzFIpCR7dLj/jVoXikWAqHARRqS/+hwRhiAq0l9wPDI6AJxDfR5K9CDhBoaRMYGKk2F7xbAMomNdl5ZXxfgmL67T6ZCZmYlDh/5z+oTneRw6dAjjxklvwS1XAfoUeGtjoCY8Q8FjqM9ywfFAfQzCDAkqF3GKDL9FgqP++mAM9UxRdaKFgGBi4GzBcR+tN0b6pas8NUMwM2SK4E7IQ2PAtOBMVadmGDCYFzpWsHOvjtVgcWSm6lMz88Mz4C7gEWEIwd2xmapPzcwJHwY/vXBJ+XvjR4BR8b1iCcHk8FiEewgbNPcNS1d8fcB5L0YEhyPOV7ga7drU4ap2YwRAfEAg0oKFu7iuyUhTzYjw8caYQcJh4FUjU1XnVgR4uGFyfIzg+Iqx6hkeeh1mpQlXcb1rfIpqhk7LYu6oRMHxRZNSVJ3QAACGAAsmCneKnTc1eUCMnUUzUgXHZ0xLglajbvPE8RQL5wpXo504JRHu7uo2TxzHY8GSDMHxURPj4eOnrrklz/GYv2Kkqmu40jcemnn66afxwQcf4JNPPkFBQQEef/xxmEwmPPTQQ98YkxCCBN9VUBOe0TFeiPKcJjonw3+JqkRPDdFjmM900TmTguaqOtHCEAaj/aeKzpkVMk0VgwKYFjRJdM6i8ImqTs3w4LEgfILonGVRo1WfmlkRPUZ0zqrBIyRXMxRirIkV/1GvjEtT9aDlKMV98SNE59w1NBk6F834xMRTigeSxRkLhibASyTk4UoUwAPDM0Rd/dOHxiLQRYM5V7ovM13UuBw7OBqRft6KTUOGEKwZPRxaVnhhS40OxdCwQMWfO8sQrBibCoNIOe4h4YFIjw1XbEizDMHCMUnwEgnxhAf5YHxajGLPC8sQTB8VD39v4c/U39cDU8fGq2KMGzEYoSLN9jw99Jg9IxmswkRPhiFITY5AjIi3wmDQYt6iDMVhJkIIYocEI0GkSZ1Gy2LhqlHKQ1kECI30Q/roWGV/L1HfuCGyevVqvPHGG3jppZeQnp6OnJwc7N2797YE1oFWjNdc6BkfxTU4EnzvBkvEH6LxXpPgqQlQyCBI91sEnYtckyTvDAToQhR5LAgIRvtPddlvJsk7EdFukYoYDAjG+o9EgF7YBQkACd7RSPIe7LJrb/8MBqP9hyHSXdw9GOsVgrGBQxU9aFnCIMMvBok+4j1UItx9MSciSTFjmE8IRgUOEp0XaPDA8tg0RV4RlhAM9vLD1PAhovO8dHrcO2y4osWVJQRhHl6YO1i8h4peo8GD6SMUMRhC4O/mhsXxwrtvwJnv8vCYTAWEnjwBvQ7LUsX7tDAMwSMTRynacjAE0GlYrMwU3n0DzkXl4emjFCVIEjjfy+oJaS7nPjh7pCoP0t1T013OWTtvpPIaH5TinjniBi4A3L1I+fvgeIo1i0e5nLdsSabivSzPU6xZIb6pAYDFyzLBsIwiA5RSitVrXffNmb98JLQ6jbLcHQqsfHCi6rwfV/pWklWffPJJVFRUwGq14ty5cxgzxvUHpFZaxgNTw98GIRrIe5sEEe6TkOzn2mOjYXRYHvVbZ8KpDAYBgyj3NEwKftDlXJaweGzIc9CzBlmGAgGDaPchWBrxgOu5hOCZhCfhqfGQxWDAIMItHA/H3i9p/ovJD8Ff5y0rfMKAQZhbAJ5NvE/S/NeGr0aEm78sBksYBOq98IeMe6QxRizCEK8gWbUlepvS/X3cGkk/6l+PmoXUgFDZDE+tHh9NXw1WQjLqL0ZPweiwSFlGFUsI9KwGn8xdAb0Ej8pTo8dhyqDBshgMIdAyLD5ashweEjwq3xudiXmJ8bIe5gwhYAjB+yuXws/ddd7PqpGpWJ6RLMuoIj3/+701ixHs3X9uU18tGJGIeycLu9mFGcCf7luAqABfl/OnpA3Bo/OUPX9/fd8cxIUL7/B7NSopGk+tFveQCunZ+2cgeUiYy3lJQ8PwzCMzXc7rTz+8bzIyU1znEcbGBOEXz8xXxLh/zThJXXjDI/zwwitLQSlkGyPLVo3G9NkpLucFBHvjpbfWgBB5DEKA2UszMH/5NxuWAb5jp2YGWgGGJMwIfw9axg0EruJ9zlsR7TkdE0N/C0ZicmWQYTBWD3oDetbTpTHSm08y2GMk7op6FSyRlvEfpA/DU3G/hqfGW4LB42QM8RyGx4b8EjpGmms8UB+Al5N/AX+dn+S8l1jPGLww7Gdwk3i6x1/njbfSf4RQQ4AkBgFBjEco3kh/Cl5aae53H5073h/zKGI9gyUzotwD8K+xj8Ff73qxAABPrR6fTrofyb5hIHCdicSAIMzNB+umPIRQiYmoBo0Wn864GyODonpepwsGIQgyeODr2feJJqn2lY5l8e85yzE5MubG6xQTSwh89QZsWLQG8f6uFyTA6bH424JFmBUbd+N1umJ46nT4YtlKpIrkhvQVQwjeWDwXS5OHSWYYNBp8dPcyjIyS1kWYEIJXFs/E6lFp0hgMgU7D4m9rl2BCnLgHrK9+vmgKHpqaeeMarhgsy+CN+xdgRqr01vOPLxyHHywYK5nBEIJX75+DBWOGSWbcO28kfrR6smQGIcAvHpiB5dOld/i9a046fvboTBAClyGU3tfwxH1TcI8Eb0ivZk9PxvPPzAfDEMmMh+6dgIfumyiZMXFKIl78zTKwLOOydklviGXVPePw2JPSDbHM8XF45Z210Oo0khkLVo7Cj3+1+Bv3hgAAoWoC3t+w5LQRFpPJXo+ijvW41rENDmoCgQYU3I1FnYJDgD4Z8b6rEOM5W/A4rZiM9hZktW3D5bZdsPJGMGB7KqI624Hz4BCsH4IM/yVI9pkp2dDpqy57B04278Op5v0wcf0zQvSRmBw0F6P9p0LDyM8BMDpMONBwBAcajqDD3gkWLHjwTgJx5jiEGkIwJ2Q6pgZPgo6Rf3zS5OjGztpT2FZzAi22DrCEuZF3QQjpYfhjScRkzA8bD4OC0zDdDhs2VZ3DhoozqOtu75cRYvDBqkHjsCxqNDy18o9KWzkHNpRn4fNr51FhaoWGMDfcxQwhcFAeQQZPrI0dhTWxI+Gjk3/ixsZx2Fiai48KL+BaZws0hAHtyUxi4GT4691xb3wG7k8YiQCD/HwJB89jc0k+PrpyCQWtTf0yfPQGrB02HA8mj0CwuzSDra94SrG9qAAfX85GbkN9j8eqh0EIHDwPT50Oa1LS8MDwDIR7yf+9U0qxp7AYn1zIQVZNrXNhoLiJ4abVYuXwZDwwMgPRfr6KGIcLy/DpmSycL6++sfhQip428Tz0Gg2WZiTh/nEjMDhQmlF4q04UXMfnJ7JxpqjixsLAUwqGMOApDw3DYtHIYbhvcgbiQqUZhbfqXGEl1h3Owon86yDEabb/h0HBMgRzMhNw74wRSIxSdmoiu6gaX+7LwvGsUoDgJgalFIQA00fF4545IyR5QvpTfnEdNuy6iCNnip2eBYaA5ykYpqfJGwUmjY7DqgWZSE9Sdtqx5FoDvt56EYePFYDjKJhbGJRSjB01BCuWjkRmhnTDs6+ulzVi8/rzOLT/ChwODgzD9DAIKKXgeYqRo2Nx18pRGD1OuuHZV1Xlzdj2xRns354Nm80BlnUyej9/juMxfNRgLL1nLMZOTVRlhMhZv/+/MER65eAtqDAeQIslHza+CyzRwcAGIMZrNvz08aJ/SykHjtrAEoPoh+PgbSjuOoka8xVYeCNYooE764sE7ykIc0sQZfA9DI1LhgN5HRdQarwKM2cCQxh4arwx3HcMYtzjRf+Wpxwc1A4t0YvO4yiHrLbLuNJRABNnAgMGXlpPjPTLQKKXKwYPO2+HjhGvKcBRHhdbC3ChtQBdDjMICLy17hgbkIJ036Gi+SQ85WHj7dAxWpfzzjeX4mRTIdptZgAUPjp3jA9MwNigoaIhHEopLLwdekYjyqCU4nxzBQ7VFqHNZgZPKXx0bhgfPBhTQ+OhEQmTUErRzdlhYF0zLjXVYG9lIVqsZnC80zgYGzIIs6PioRU4TdSXoWc0oiEbSikuN9VjV1khWrrNsPE8fPR6jA6NxNzB8aKhGEopLJwDOoZ1GRbKb2zAjpIiNJtMsHEcvPR6jAgLx4Kh8TBohA1bSiksDge0LCt6TwGguKkZ2/ML0Wg0wepwwEuvR1pYCBYmJcJdNzCM682t2JZTgMYuI7rtDngb9EgMDcKitER4GoQTOimlsDo4sAwRTWAFgKrmdmy/eBV17V3ottnh5abH0NBALMwcBh93cePZaneAEKdnRky1LZ3Yee4qals6Ybba4Ommx+BQfywckwQ/T3Hj2Wp3OENpLhiNrV3YdeoqqhvbYe62w8NNh+hQPyyYmIQAH/FTHTa7s2KyTiQRFwBa203YczQflXVtMJutcHfTITzEF/OnJiMoQDxPzu7gwPMUep04o73DjP2H8lFR1QKTyQo3gw5hoT6YPSMZoSHCya8AYLdz4KlrRldnNw7uu4LyskYYjVYY3LQIDvHGrLlpCI8QN2wdDg4cx0PnIifEZLTgyO5clBbVw9Rlgd6gRUCwF2YsTEdUjDLD9lbdMUQGSCZ7Fco7v0Zl13bY+HYAvfVFEhHrczciPGaDFSgUJlVd9nrkt+9EQcdudHNtNxj++sFI9b0LQ72nqy6e1mFvwcWWA7jQegBdjl4GQaA+AuMC5yPddwr0AsXIpDM6cKLpGI43H0WrrfUGI1gfjGnBMzA+YALcNeqOkXXYjThYfxZ76k+i0dIKCupkGPwxP2wSZoaMhbdWHaPTbsae2kvYXHUGNd29DCDY4IslkWOwOGI0/HTyPQJ9ZbRbsKP6Mr66fh7lxhbwPYwggxdWDMrE8kGZgrVMpMrssGF7RT4+K76Eks4mcNTJCDB4YFXscKwZkoFwD/EHpytZHHbsuF6ITwqyUNDaeKMMur/eDSuHpmJtYjqivXxVMaycA3uuleCT3GzkNdbfYPjqDViWmIS1KcMR6yeeKO1Kdo7DgWul+DQrGzm1dXD0lO300uuxJCkR96QPR3yguoezg+dxtLgMn5/PwcWKmhsMD70O85Pjcc+o4RgWqq5WA8fzOFlUjnWnc3CutAr2njoY7jotZqcOxd3jhiM1Sl1tIp6nOFdcia+O5+B0YcWNRn4GnQbTU+OwetJwDB8cpmonTSnFpaJqfH04Bydzr8PaY4jotRpMTBuMldPTkZkQqZqRV1KLTftzcPzCNVhsvcYOizFpMVgxOx0jUwapLpxWUFyHrbuzcexUMbp7KtJqNSxGDI/GsoUjMCojRnUp+WvXGrB9axaOHLkKs9lZkVajYZCaGoWly0Zi3Lg41QwlumOIqFS3owE5Tb9GY/dpELCguLVrJgOAh4Z4It7vEcT5PCD7R2F2tOFow5soN54GAemnsR0BQKElbkj3X42RAffJDhl1O4zYVvNPXOk4DYgydBgXuBAzQ9eAlRkysnDd+KLyc5xrOdPjzu//66QhWkwNmoYVkatkh4wsnA3/KtuEgw3nwFO+XwYBAUMYzAoZi0dil0EvM5xj4x34a/EubKs+Bwd1ft63UpwMgtmhGXg6cYlgvREhOXgOfyk8hHVl52Dr6YtzK6M3T2N2eDJeHL4Q3jILqPGU4t0rJ/BB4Tl0c/aeT/gWBnG6emdGxOO3o+YhwCDPeKOU4u955/C33LMw2m0939+bKSwh4CnFlIhY/HHiHIS4i+9I+2N8nJuNd86fRofVCqbnercyOEoxPjIKf5w+F5EKnhFfXc7FmydOoa27W5QxMjICv58zW7TwmZC25xbg9QPH0Ww037jeTQyGgOMp0iJC8driWYgPlm/07M8rwR92HEVDh1GUkRgehFeXz0JypPxTi8fzy/CHjUdQ09J543r9MYaEBuClu2ciPVb4WKmQzl+txB8+P4jKhnZRRnSIH56/bwZGDZNfvPJyUQ3++K8DuF7dIsoID/LG0w/OwIQR8o+uFl2rx+vv7sO1skawLLmti29vaCc40AtPPjINUyaIe8v70/XrTXjjT7tQWFAnyvD398D3fzAds2a5TmwdSN0xRFSoy3Ydp+oegY1r78cA6V9RnouREfSKZEOh01aHrVU/gcnRLLmzbqznFMwK/xVYIm0R77C34MPSl9Bqa5DMiPcagbWDfgGNxLyPTnsn3ix6HXWWWkl1SAgI4j0T8KOhP4VeYtl3o8OMl/L+imvGKkk1WwgIhnpF49WUJ+ChkbaImx1W/Dz7I1xuL5fEYEAQ6xmKdzIfha9O2iJu5ez48fmvcLrpmqQTgSxxJtF+OOEByd4RO8/hx6e3Yl91kaT5LCEIc/fGF9PWItLTV9LfcDyPn5/cg82l+ZIZgW4eWDd3NYb4CBc/6ytKKV46dgifX7ksmeGjN+DzpSsxLDBIMuNPx0/g/fPSuoqyhMBdp8PHK5dheJj0XIa/HTuLvxy9vcFnf2IIgV6jwftrl2LUIOm5DJ+cyMLrO49JZmhYBu/evxgTE2IkMzaeysVrGw7dyLlxxWAYgj89tADT06TnMuw5W4CXP9wLSqnLY8zOEyAEv354LuaNlZ5Ee/RCCV58Zyd4nro8/kt6/tcvHp6FJTNcH43u1fms63jhtS1wcLzk4m5PPDwNq5ZKP52Se7kSzz+3ATabQzLjoe9Nxr33iddiGkjJWb//T5+akSuLoxmn6x6TZYQAQJVxO/Jb/yyNwXVge/UzMDlaJBsIAFBmPI7jDW9LKqZl4cz4uOxVtMkwQgCgpCsbG6v+Itipt6+snBXvlLwl2QgBnKmJxcYi/LPsb5IYdt6O1/LfR6mxWnLhOAqKa11V+O3VD2CX0I3XwXP4Ve7nyJVohADO7rllpgb8PPsjWDnXTdd4yuP5rM0401QquSwBRymqzK34wZnPYHK4bjhFKcULF/Zgv0QjpJdRZ+7E/Ue/RLtVWhPH35w/ItkI6WU0d5tw794NaOo2SfqbN8+dkmyE9DI6rBbct+1r1HRJ69j9/vkLko2QXobJZsODX29GeZtwR9W+WnfhsmQjBHB6s6wOB77/xVYUN0rrdrrt0lXJRkgvw+7g8NSn23GlWrjzd18dvFyC36w/5EzKlMjgOB4/+/cuZJVK64R+Ou86Xv7XXvC8ayMEcCaI8jzFy//aizNXyiUxsguq8at3doLjeEk1SGgP5w//OoCj50skMQqK6/DL17bcyDmRqr9+eAR7D1+RNPf69SY8/9wGWK3SjRAA+Ojfx7Ft6yXJ879N3TFE+uhq6zuwci2yjJBelXZ8hjZLnst5F5o/QZe9QQGDoqBjF2rM2S5nHm/cjCZrjexqqRQUeR2nUNjp+gF9qPEAKs0Vihi5HZdxrvWsy7n76k/jamepbAYPHnkdJThQ73oR2F+fg3MtxeBlVi7iKY+CzmpsrDrlcu7hukIcqLsqm8FRHqVdTfj3tZMu555qKMem67my6y9xlKLa1I53810zshpr8XGB/AcZRykau43406XjLucWtTTjrxfPKWK0Wyz47cmjLudWtXfgjeOu3++t4imF2WbDKwcPu5zbZDTht3uPKGJYHQ68tOOgy7md3Ra8stn1vFtF4exL88v1+1xubLptdrz0xX7ZRemcizjFLz/d63KxdDg4pydEQfUwCoqX/rUHDof485TnKV792x7wvNxfoVOv/WMvLFbxTQelFH94ew84jldUmO7N9/ajy+i64/qbf9oNm82hqMLzX987iNZWo/wX9w3rjiHSIxvXjmrjHkVGCAAQsLje+bXoHDvfjYKO3bK8FLcy8tq3is5x8Hacb92vgsHgbMtu0Tk85XG48aDi8vYEBIcaDojOoZRiR430nd7tDGBH7TGXP9aNlacU9wuioNhYedplWfl118+BVcjgQbGh/KJL786nxRcV99nhKMWGshyYHeKt6D8rzFLF2Fp6FR1W8Qft53k5soq43crYV3YNjSbxB+2Xly8rLnPOUYpT5RWoaGsXnbcx64riNvI8pciprkNRQ5PovG2XrsLuYgEWY5Q2tiKnok503t5LRTBZbIp+6TylqGvrwpmiCtF5R7NL0dbVreh+UQq0dXXjaE6p6LxzueWob+5U/JmYum04eEbc23iloAblVS2Ke+3YHRz2HRb3NpZea0BBQa1iBk8p9uyW7m38tnTHEOlRZdc2xUYI4KxFUm3cAxvXLjinuPMAHNS1m12MUW48CaNd+AGV33EG3Zxyi5eCR6kxF83WWsE5uR2X0W5vV8GgKDdfR6VZ+AF1peMaai1Nijv5UADV3Q242in8gCroqEZRV42qfkGN1g6cay4WHC/rasKFlnJwKhjtNjMO1RUKjteaOnG4tkRVnx2zw46dFVcFx1stZuwoK1TFsPMcNl4Tdj932azYWJh/W6KlXH11VdgzaXU48OXlPFUMhhB8dTlXcNzB8/jiQo6qUuosQ/DlBWEGpRSfn8pRfP0bjDPCixKlFOuOZavqe8QyBF8dzxGds/5QtqpGkgxDsP6QuKd44/5sVZ2HCSHYsDdLdM6WXdnqTqdQYNOOLNHN0/bt2Yr73wAA5Sm2bs1S3Vl4oHXHEOlRtXEv1DTJAwAKO+rNJwTHSzqPQE1HYCcDuG4UdivndZxW2RHY6RXJ7xAOnVxqu6CqWy/gLN1+sfWC4PjJ5mxVnXQBZ+n2k805guNHG/MGhHGkUXjBOFh3VfEOv1cMCPbVCi/g+2uKoPZ7RQDsrBQ2RA5WlcKhwggBnN/dHWUFguMnKitgcbjO6xETTym2FwsbbeeqqtFlVb4ZAJxeke0Fwozcmno0G83qGDzFrivCjOL6ZlS3dqh6YnE8xb68YsHddU1rJ4prmxV7EXoZJ65ev3E89la1d3Uju6RGldHG8xTZxTVo7+o/z8liteNMznVVnYcppSipaEJdU4fgazh2uljVAk8B1Na3o6xCOD/oyOGrt52OkavWFiMKC4Q3mv8N3TFEemTlWlRfg4CBjWsVHDdzrVBr7DBg0C3idemyt6na4QPO0InJ0f8PDnDWDFHTrRdw7jCMji4RRpeqhxPgfHh02oUZbTb1sVKO8qLXabWaVBuGPCiaLcKMFotJtbFDATRZhJNJW7rNqhkARBNWW7rNKu9Uz3XMwkaA2JgctXULJ/e2mgaG0WW1geP7/521qjR0euXgeJis/YfkWrsGhkEp0GHq/34NFAMA2gSu1WFUFvbpT60d/TNMZiscjoHxMrQLMDiOh8mkzoi+wWgfuPs+ELpjiPRITVjmPyLgqfCOTmxs4BgD8T4AToTBDQCDUnqjXkf/jP7rhchiADcKRwkxVCIAOE/eCI6p9CL85zpi94oOyAJuF3kfHOVVG1ROhvD9cPD8gPS1ELvnQgu7XIldR+w7N1Ach4rd/e3X6p8xUPcKEP7cBzJE4BC4lloPgjTGAL4PAYPm27hX/y3dMUR6pGXkFVzqTxQctKzweWmDyJh0Bg89K/xa3TXq3wcAuLHC1UM9NZ4DEP4h8BCptOqpcVcdNmFA4ClSEMxL46Yq/t3L8BZpyCe3IJmQxOqVeGv1sk/k9Cc/vfBr9dbpVeWH9MpXL1yJ2FuvV+0FA5yvVYwxEPIUuY63SHl3OdKxLHSa/usG+bgNDAMAvARer7ebuqrRN1+rf4aXx8AxhK7l5TFw98pbgOHpOYDvw7P/16vTaaDVyu9R1j9j4F7vQOiOIdKjILexEjr0ulagIVNwLNJ9hITuueKi4BHuJtyhMtYzZQBCARwGeyQLjid4Jar2VnDgEO8p3N8nxSdO9cLHgUeyt3BBpXS/WNUeCx4U6X7ClRdHBsSoZhAQjAqIERwfEzxIdYInQwjGBQszxoZGqzZ1WEIwIVy4Idjo8EjVPheWEEyIEq62mREerioxspcxLjpKcDwlPAQ6Fz1kpDBGDRLuDDw0NBAeevkNJ/uKIQRp0aHQCCRYRgf7uuw140oEQGyoP7wEDJFgX0+EuugDI0VhAd4I9u1/8+TprsfgiADVmw5fbzdEhfr2O6ZhGSQlhKkuCe/upkPsIOGifGnDo1QztFoW8QnqSv0PtO4YIj2K8V6pMjzDIMCQCS+d8KKU7LtI8bFapwgC9XEIcROuJJjpP0OlIULgrwtFrGeq4Iyx/uOhJeoegj5aH6T5pguOTwzMgDurzmr30LhhYlCGMCNomOq+MQZGizlhIwTHxwYNRoS7r6pPhCUEd0ULv4/0gHAk+ASpW8QpsCZOmBHvF4iRwRGqFnGOUtybmC44HuXtg8nRMapyUThKcX+q8PsI8vTA3Pihqhn3jUgXHPc2GLA4bZiqUxocpbh3jPD7cNNpsXxUqioGTynunSDM0LIsVk1MU/WZUwBrp2QIhtwYhmD19AxVRgIhwOoZ6YILNCEEK+dmqMoTYQjB8lnp0Ig091u2cITiY7WA814snJMGg0H42br0rpGqGCzLYPbs1AH14AyE7hgiPfLWDYG/YQSU3xIesd53i87w0oZikMdYFV4RilS/ZaIzPDW+SPEZr+pUy7jABaKxeneNO8YHTlDMICCYHjxTtK+NntVhTqhyBgMG80InQidSrl7DsFgWOe5Gfxe5YgmD+REj4SHSc4YhDO4ZPEbR9XsZ8yJS4acXDs0QQvBA/CjFHguWEMyIGIowd/HQ4QPDRigOnTCEYFxotMsy7w+kZSj27jAAUoNCkBIs3kflvhHpihkEwGA/P4yOFC/BvmZUmqpTGiFenpgcFyM6Z/VYdQwfNwNmpYiXYF8+TnhDIkVuOi3mj0wUnbNoYrLLjs1i0jAMFk4Q9uACwJyJw2Bw0fXWlRZPF78XUybEqwp58DzFknnponPGjBmCgADlmyeO47F4qfDG6b+lO4ZIHyX5P6XIm0DAwlefglCPqS7njg58CISwkHvckoCFv24whnpNdzl3WsgqsEQr+70wYOCvC0Gmn2vG3NAF0LN6RQxfrS+mBE1zOXdJxDR4atxkGwoMGHhpPbAoYorLucuixiFA7yU7H4UBgTurxz2DJrtmRI9AhLufIoaOYfFovGvGkkHJiPMOlL3TJwA0hMGPUia5nDs3Jh6pASGKGAwIfpbpmjFl0GCMDo9U5rEgBL8Y75oxMiICUwbHKN7p/2LqZJdJtclhIZifHK+Y8fNZk1wuzjFBflgxOkWxN+Gn8yYK5qD0KsTPC/dOU75w/XD+OLjrxRtQ+nq64XsLlBvrDy0YA18XISR3gw7fX6W8z8rq+SMQ7C8eQtJpNXjsQde/1f5ECLBwThoiw8WbKrIsgx887vr53D+DYNq0YYiLk9/w8JvWHUOkjwIMGRgR/Bqcj05pv24CFu6acIwNfReMhHBFkCEec8JfBgEjeREnYOGhCcCiqNehYVwnXgUbInFvzHNgCCvZ+8KAgbvGGw/Fvgw96zouHKQPwo/inoaGaCR7LRgwcGPd8HT8z+GpcW3VB+h98OvUJ6BjdLIYelaHV1N+CH+d6zb3vjoP/HnEI3Bn9ZINBYYw0DIavJHxEMLcXLeg99Qa8P64++GjdZPOAAHLMHh39D0Y7Om6G6tBo8XHU+9GkMFT8iLOgIAlDN6bsAxJfq4fTlqGxUezViDS00cyw/lLInh7ykJkBgvnPNx4TYTggwVLMMTPXyYD+MO02ZgQJZyDcmM+IfjL4oVICg6WbSi8OGMaZsYNkTT390vnICNKfk7KT6dPwMJUcS9Cr361dDomDB0k2xh5dNoorBwjzdvxk8UTMXN4nOwt2prJ6bhPohHz6OKxWDA+SSYBWDghCY8uHitp7t3zM7Fidrqs6xMA00YPxRP3SDMwFs0ZjrUr5RlVhACjMgbjpz+YKWn+9BnJePgR15usmxkEqamRePa5hbL+7tvSne67/ajBfAIXGp4FR3vLUffXdp4FBQd/QwbGhLwNHet60euralMW9ta+BBtvAvpt1v4fRpA+AQsifwd3jetFr68qTUX4rPx3MHNd/bZqB5wLNw8eIYZoPBDzK/jo5LUgrzCV4y8lf0aHo8MlI1gfgp/GP4MgfbBMRh1eyf87mq1tYED6PSHyH4Y/Xkl5HFHu8pKxqs3NeCbr36jubhFhOP89QOeFN0Z8D/Fe8tqc15nb8cNzX+BaV2O/rdr7Mvx07nhvzFqk+U5lZusAALbsSURBVEnvwgoATd1GPHJ8A6601Qsyer9t3lo9/jFpBcYEu168+6rN0o1HD23GxcYalwwPjRbvTVuCaZHyWql3Wq14Ys92nKyuFGUAgJ7V4M+z52PukKGyGGabHT/duQuHSssEGb0cDcvi93NmYWmyvMXS6nDgl9v2Y9eVon5bzveKIQQMIXhx/jSszpTe6RUA7ByHVzcfwuaL+S4ZAPCzBZPxwCR5Xg6O5/Gnzcfw5fEclwwKih/OH49HZ4+WdRyb5yn+tuUkPtl9AaSnhX2/DIaA8hQPzB+FH941UVbyJqUUH285hw82ngIhwoze97hyTgZ+fP9UWaEjSim+3nYJf//oKEAhGM7sZSyYlYqnfzhLNP+kP+3ckY2/vLO/p1GgAIMl4DiKadOT8OwvFkCnMjwlR3LW7zuGiIDsvBHVXbtQ1vkljPbym8YIGIR5zMRg79UIMIxQXPvAznejpPMQcts2o9V2/RYGQYznBKT6LkWEuxqGFXntp3GmeRdqLWW3jSd4ZWJs4DzEeaaDUXhc1s7bkdV2EYcaD6LMdHtJ9STvZEwPnok0n+GKGQ6ew9mWXOysPYb8fsq2p/jEYWH4FIzxT4WGUXZiwcFzONNciK8rT+FSW3+MaKyImoApISnQMcp+0BzlcarxGr68fh6nGktuM3eSfMKxNnYMZocnw8AqSwjmKcXphnJ8VnIRh2puZyT4BOHB+FFYOCgJ7hpxt7mQKKU411CFTwuysbei+LaH7RAffzyYlIm7hiTBU6vs+CSlFFn1tfgsLwe7rhXfVu8i2tsHDw4fgeWJSfAWORbsSrl19fg8Owc7Cgpvq3cR4e2N+0akY3lKMvzclJ8gKahvxJcXcrH18lXYuJuT4kO8PLF29HAsz0hBgIfwUXBXulbfjK/O5mLLxXxY7DfXAfL3dMOacelYMToFwd7KcwzKG9vw9clcbD6TB/MtTeB8PQxYOXE4VoxPRaif8pMw1U3t2Hw0F5uP5cHYfXMBL083PZZNScWyqWmIDPJVzKhv7sS2Q7nYfPAyOm9pNOdu0GLx9DTcNTMN0WHyNn991dxixM79udi6KxtttxQp0+s1mD8rFUvmpmPwIHmbv75qazNhz+7L2LrlElpabi56qNWxmD07FYuXjPivhGPuGCIDKEopOmxFsHCN4HkrNIwXvHVDYdAIJ91ZHU3osObBwXeCEC30bAB8DZmCoRtKKVqsZTA5muCgVugYD/jrY+ChEf6Cdjva0GS5CivfCQYsDKwfQt2Gg2WEF5ZGSxXabE2w8xboWXcE6SPhK+IB6eaMqDYXwNLjUXHX+CDKPRlakfBQvaUOzdYmWDgL3Fh3hBhCEKgXPo7WzZlRZiyGydHVU1vEC0M8E6AXOTFT292EekszzA4L3DUGhBkCEeYmxrCgsLMUXQ7nD9VL44EEryFw1wgvLLXdragyNcHEWeHO6hHm5o9BHsIMC2dDbnsZOuwmUErhpXVHik8MvERqjNSZ23Hd2Ayjwwo3Vodwdx8M8RL2Flk5O7JaK9BmM4GjPHy07kj1i4SfSI2RenMXrnU2o8tugYHVItzdG/E+QYKGrY3ncKm5Ei0WE+yUg4/WDWn+4Qg0CC9ejWYjitub0WmzwsBqEOLuiST/YEGGg+dxobEKTd0m2HgOPjoD0gJCEeIuvHi1dJtR2NyEDqsVepZFsIcnUoKEGRzP41JDLRpMRlg5B7x1BiQHBiPCS/g50t7djauNTei0WKBlWQR5eCAlNEQwtMJTiuy6OtR3daHb4YC3Xo9hQUGI8hH2jnZZrMiva0BHtxValoG/hztSw0MEd9yUUuTW1qOmoxMWuwOeej3igwMQ4y+cS2Cy2pBf3YCObgtYhoGfuxtSokKgFThSTClFfk0Dqts60W23w1OvQ2yQP4YECz/jum125Fc0oMNsAUMIfDwMSB0UCq3Arp5SiqLaJlQ2taPbZoe7XoeYYD8MDRN+/ljtDuSX1aPT5DQUvD0MSI4NhV4rvAm4VtOMioY2mCw2uOu1iAr2RXyk8Pfd7uBw9VodOowW8JTC28OApCGhMIgcjb5e04Ly2laYum1w02sRHuSNxMEhwt93B4fCknp0dHaD43h4eRmQMCQU7u7Cz+qq2jZcr2yG0WyFQa9BSJAPkoaGCn/fOR5FRXVobzfDYefg5WXA0PjQ/+rpmDuGyH9BlFK0WS6guvNLNJj3A7cc09Uyfoj0vhuRXqtg0Cg7w00pRZMlHwXtm1HWdei248Y6xguJPkuQ4LsEXtowpW8Ftd0luNS6C/kdR2+rsKpj3JDhNxcj/ObBXy8vNNFX1eYKnGw+iPMtJ2CnN++sdIwe4wKmYmLQTIQalDOqzHXYX38cRxpPw8rfXMZax2gxJWgs5oROwSAP17kLwowmbK85jd2152Dmbt69aQmLWaGZWBI5AfFe8kIsfVVjbsPGygvYWHEBXY6bd28awmBueCpWDRqDVN9IxZ6zOnMH1pdlYV3ZJbTbbi7HzRKCORHDsHbIKIwMjFLMaDQb8dW1y/i0MAvNt5STZ0AwK3ooHkgYgXGhgxQzmrtN2FB4BZ9eyUb9LV14CYBp0bG4PyUDk6OUJ6u2W7qxMT8fn2TnoKar87bxSdGDcH96OqYOHqz4NEiXxYoteVfx6fkcVPbT6XfMoEjcOyodM+KHQKOQYbLasCOnAJ+fyUFZ0+2tKTKiw7B2XAZmJsVBJzNs0Ktumx17soqw7kQ2imtv76GSHBWCeyalY3Z6vKiBISar3YEDF4vx1ZEcXK1ouG08PjIId09Lx5zRCXDTKfMy2h0cjlwowYYD2cgrub1jcUy4P1bPzsCc8cPg4abMy+hwcDh5oRSbdmUhJ7/6tvHIMD8sX5CBuVOT4TmARdq+Kd0xRL5lOXgjLjf8GK2W0zfyOvqX84GREPBLRHuvlcmw4Gjdr1FpOiHKIGBAQTEy8DGk+q2V9UB38HbsrH0b+R3HwIAFL8rgMTHobkwOksfgqAMbKj/G6ZYjN/I6+lPv2PTg+VgSsUZWSIejPD4r34RddYclMWaFTMLDsatFjxPfKkop/l22F59XHARDGPACRctYwoCjPGaFZuLniauglRHSoZTi36XH8V7RQWc8WygO3MOYEZqE36avkB3S+bTkPH53eT9AROLZPYxJIUPwztjlssMt60su45dn94GCumSMCY7C+9OWw0dmuGVbSQF+dmQvOMqLMJy5IMODQvHv+csQ4CYvFHKg9Bp+vHs3rA6H4HHpXkZCYCA+vmsZQjzlhUJOllbgyU070G1zGuj9cXoZMf5++Pc9dyHSV16O2sXr1Xji8+3oslgFMtSc+R48pQj39cYHD96FwUHywhRXKuvxxPtb0WbqBiHot45HLyPI2wN/f+wuxIcLex37U3F1E578yxY0d5huXOt2BsBT5+mcd59aiuQYeRvBirpW/Oj1zahv7gQjkLvSew893fV44+klyEiQt/GobWjHz17dhKraNmFGz2PWoNfitV8swej0GFmMb1t3DJFvUQ7eiAu198JoL8GtXhAxDfH9EWL9HpfIsGBP9Y/RbCmQVRAt1e8ejAr6oaS5HG/HV5Uvo8KUJ6tq6gi/eZgb9kNJxghHOfyr9M+40pkDOU1eRvlNwL0xP5BkjPCUx7slH+Nks3Bn31tFAIz0H45nEr4v6VQLpRRvFn2NXbXnZDAIRvgNxR+GPyI5h+Wtq3vx6fVTkhkMCNL8ovCPMQ9KNkbevXoM7149Lp1BCBJ8grFu6oPwkJhf8kH+Ofz20hHJDJYQxHj5Y/O8+yQbI19cvYwXjh8QXFT7Y4R7emPzXfcgyF04tNVXWwuu4pm9ewEZjEB3d2xecw/CvKTlTOwvvIYfbdoJgEJKiRCWEHi7GfD1g3cj2t9XEuNUSQUe/3QreCpsFN7KcNNp8cVjqzE0RFo+w6XSajz2j81wcMJG4U0MhkCn0eCjJ1ciKUpaPsPV8no8+uZG2BwOSfVUGEKgYRn8/afLkREnzQtaVt2MR179CharXTKDMARvPbMUY1NjJDGq69rwg1+sg9FkkcQghIAQ4DfPLsbkMfIStL9NyVm/7xzfVSFKKS43/Fi2EQIApe1/QZ1xp6S5x+tfk22EAEBe2zoUtm+VNHd33V9lGyEAkNW2B+datkiau6X6C1zpzIbcTnMX2k5hb500xoaqnbKMEPS8mgutl/F5+WZJ87+qPCLLCHEyKLLaSvB20SZJ8zdUnJdlhADOcvO5bVV46bK097G1IleWEQI4PSZFHY348ZmNgpn6fbW3skiWEQI4q4qWd7Xi0SMbJS1ix6vK8avjBwBI/2ZxlKLW2ImHdm+GnXNdUflCTQ1+tm8fqExGs9mMBzZvgsVhdzk/v64BP92yC5RKM0J6GZ3dFjy4bhOMVtedWa81tuCpL7aLeo36Y3Tb7Xjko81oNwt3He5VdUsHnvxgm2QjBAA4nsJqd+AH/9yMxg7XXbGb2o144i9bYJVohADO766D4/Gjd7eiplm4u3iv2ru68dQfN0k2QnoZPM/j2be343qN647uJrMVP33la8lGCOBcdyilePmNHSgqvT0U9b+oO4aICrVbL6HVchpyjZBelbS+CeqiD0mLpRjlxqOKS8NfavkAHBV/CLZaa5DbflBx/5jjTetg4y2ic9ptrTjetF/R9QHgQMN2mB3CLeQBoMtuxNYa5YzddYfRZhN/QHU7rPjkujIGBcWuunOoMd8eK+8rG+fAe0UHFTF4UOyvu4LiznrReRzl8UbeIWUMSnG8oRTZLbfHsfuKUoo/XDqqqG4tRynON1bjRO11l3NfPyfPmOrLuNLcgAPl11zO/fNpeUZhX8a11lbsLCp2OffdE2edRzEVMGraO7E596rLuR8cvQA7x8kud87xFC1GM9afz3M595Mjl2Cx22VX4eUpRafZii9P5Lic++XhbHR1W2WXO+cphcVmx2cHLrmcu+VwLlo7zLKr11LqzPf4ZMd5l3P3HM5HQ1OnIgbPU3yy4Yysv/uu6o4hokJVnetUNcqzcvVo6T4pOqegfbNKRgcqjOIP6kttu1U147PzFlztEGecaj6s+PqAM6xzrlWccaTxjGCuhhRRAIcaxBecAw1ZsPKud7dCYsBgR634w+NgfT467a53nkJiCYOvK8QfgsfqrqHR4nrnKcb4ovSi6JyzDZUo72pTVXr+06Is0Tm5jfW40tyoivHJlWzROaWtrThbXa24vD0B8HG2+Puo6+jCkeIyVc0LPz2fI+qlajN1Y09ekeKy8DylWHc2Bxwv/BszWWzYej5fFWPD6VzYHA7BOVa7A5tO5CnuucLxFNtP58NksQnOcXA8vj6Yo/gz53iKA2eL0N4l/DumlGLjriy5DuIb4nmKkxeuobG5S9kFvkO6Y4golJVrRqNpv8pGeSyqOr8QYXThWtc+VQwCBgVtwuEAO29FTtt+1c34LrRsFxzlqAMnmpR7XACnN+FY4z7BBy1PeeypO6Kasbf+KDgqfL83V51Q1VyOB4+dNWdh44SNmS/LzyrufwM4vR3bq7NhtAt7qT4rvaCy8RuPPdVX0WoV9lJ9Wpilurnc4eprqDEKe6k+z89RzThXV41rbcJu9C9yL6tiUABXm5pwuV7YS7U+O0/xSaFeRmVbO86WVwnO2XzpiuouzU1dJhwrEvZS7bxUAJtd2IiQoq5uKw5cLhEcP3ipBF1m12EoMVlsDuw5VyA4fiqnDC3t4h5YV+J4ih3HrwiOX8qrRE19u6qu1oQQbN9/WcUVvhu6Y4goVKc1T6URAgAc2izCLsJWawl4F2EVV6Lg0Wi5IriAN1urYOOV7757KY3Wcjj4/ncYLdYmmDj1VnuLrQlGR//X6bB3odnWpprRYe9Cs/X2o4yAMyxTYW5Q9eAAABNnQaW5qd8xnvK40l7Tb2VXObLyDpR0CcePLzVXqV6UHJRHXuvtRxl7da5BPYMCyG6uFRw/U6ueAQCX6oUZ56qrVTMYQnCpVphxoVK5x6VXLEOQVS3MyKqolZTXIyYNwyCrokZwPOd6rapuvTcYZcLvI6e0BhpW3dLFMAQ5pcKMy8XqGZRS5BYLM/IKasCy6u4Vz1NcvioeIv1f0B1DRKHs3O31A5SIo2bBPBEbr9x13lcUPBy0f2PDwg0MA3AWQOtPZs7c778rY/S/SzE5Bo5hFLiW0aHWYHPNMDlsqrw6fSUU3uEoD4uIR2YgGABgsqvbtfaqwybs2emyqWcwhKDDKszosIjnP0lldIow2rvVMwgIOi3C96PdrJ4BOD0WQuo0W1QbbTyloowus/zckNsYPEWnSZhhNFsH5FcoFpoxmqyKmqzeqo6ugflc/5u6Y4golJQGd1LkzP/o/8vIYGAYgPDrZcnA9R4QupZGRn0OpQw5NUBcSSPEUFg6vl+GwLWUFqfqT1oBBtPTcvGbZABQ1dpdKmMg7helVLDqqCu+HIkyRMYGjjFAn4cYQ8Oq/mYRIv5atRpWccfhG4ye6whJw6p/HwCg04rfq4HQQF3nv6k7hohC6Vjl/QH6Ssv6CsaG3TTiLaElM4g7WAFDxGOAGM4y8/3XY/DSyCu2JCZPTf/1GLy1yntb3CofgWt5a9wHbAH3E2AYGC0MzMAYoP66/gtpEULgq1PeN6WvAvTCxboCDMr7pvRVkEG4zofUGiBiogACRQqbBXt6qP7UHTwvygjx8lAd0uApFe1VE+QtvTOzkCil8PcUZgR4uoNRbRwS+HsJM5xj6t4HwzAI8BZh+LirDmMxDEGgr/D308/XXbVnhyEEgf7Kewd9V3THEFEoX0MGtIzyhkiA0xsS5rlYcDxAHw9PheXg+zJivWcJjvvrwhGoj4aaHzYDFoneE8AIeCV8dH6I8YhTdTKHAYNk73TBHjQeGjcM900Co4pBMMwrDr66/ovvaBgWk4JSJRU9ExIBwRDPcIS79d/HgxCCueFqGUCkuz8SvIW/O4ujU1UvSoEGD2QECFeQvCs2WfXi6q3VY1yYcHfgpUOHqTYSDKwGU6MHC44vTkhU7abXMAxmDhkiOL4gKUF1jgilFHMS4wTH56XGqw6bcJRiXmq84PicjATRUzWSGDyPeRkJwoyR8QPCmD1S+H3MHJOg+ORPr3ieYuYY4fcxdVy86jAsTylmTkpUdY3vgu4YIgrFEC2ivO+BmltIwSHSa7XgOCEMkvxWQI2RQMEh0XepCINgtP9iKD5DBoAHh5EBC0XnTA2aq+pkDg8ek4Nmi86ZFzpVsJy7NAbFvLCponOWRk4Ep+qIMMWyyImiJyRWx4xRxQCANTFjRRl3DxmhalFiQHDvkFGioZE1Q9PVfK3AEoI18ekwsMLhw5UJKapCQCwhWJGQDC+dcMn6JcOGwU2jPITJEoKFCQkIcBfegc8dNhTeBuX9Q1hCMC0+FuE+whUspyQMRpCXcg8SQwhGD44ULfU+Ki4SUYG+ip9YDCEYFhksWl01KSYUiVHBisMzBEB0sC8y44WN6Jhwf4xIjATDKH/2Bvp6YHy6sIEbEuSN8ZlDwKpgeHkaMGXcd7e6qlTdMURUKMJrpeK/JWDhbxgPd2206Lyh3vPBKMzjIGAQZEhCgF78i5rsOxU6RlmXRgIGgfpoRLoNE5033HcUPFhPRclZBAT+ukAkeqeKzkv3S0aAzlcxw1vjiVH+6aLzhvvGIso9SFGIhgBwZ/WYHpIhOm+YTziSfSIUeRMIAC2jwaLIdNF5sV6BGBsUo9grQgjBysHi7yPMwxszo+IUM3hKsTZenOHv5o4lccMUMzhKcW9yuugcT50OK1NSVDHuGz5cdI5Oo8GaEWmKPUgcpbhvZLroHJZhsHZcumIGTynWjhNnEEJw72Txz8wV455J4gwAWDMjXXZRtr66e3qGy+PSq2ZnKA6dEEKwclaGSyN5+YIMxZ4XhiFYMmc4dAqbBX6XdMcQUSGDJhiJAS8q+EsGGsYTwwJfcTlTz3pjYshzyhjEgEkhv3Q5U8cYsCjiadkEAgKWaLA44mmXP2oNo8EDg5+QzQAAhjB4IOaHLnvNsITBj+MfltUgr1cEwE/iH3bZB4YQgheS1kLDsAoMHoJfJt0DA+u6R8uvh98FA6OVzaAAfp12F7y1rnNAXstcAE+NXtHC9ErGPAQZXMemfz16Fvz07ooW8RdGTke0l6/Lec+Pm4wQD2X5Dz8ZOR6JAa4brf103HhE+fgoYjySmYmMMNddpB+bMBpDgwJkMwiA1RmpGD9YfFMDAPePH4GUiBD5DALMT0vAzCTh0E+vVoxLxai4KNnfK4YQTEmOxcKR4psaAJg/Zhgmp8XKZzAEIxOisHyy+KYGAKZkxmH2uATZ9V1YhiA5NhR3z3VtkI0cPgiLZqXJ9u6wDMHg6EDct3yMvD/8juqOIaJSUd53I87vp5LnE7DQMl4YEfoh3LVRkv4mznsOxgb95MYVXDMYaBkDZke+CV99jCRGovd4LAj/Uc/CJ42hITqsjn4ZYW6uH04AMMw7DQ/EPAEGjKQF1snQ4JHYnyDWUzjWejMjDs8kPAoNkWYoOI0pBj+JfxipvtJirQneUfhd2sPQMRpJnhHS8z/PDluNCUEpkhhxXiH46+j74cZqJS0avTOeT16IeRFpkhjRnv74aPK98NLoZS1MP0+dgdWxIyTNDfPwxhez7oav3k0W40dp4/HwsFGS5ga6eeCLhasQ7O4hi/FwWiZ+nDlO0lwfgwGfLV+BCG9vWYzVKSl4btJkSXM99Tp8uOYuDA7wk7XALkhOwMvzpktaMA1aDf7xwFIkhAXJYkxNiMXvls+WxNBqWLz98CKkDQqVzCAEGBUXhdfvny8p1MYyDH7/6HyMTIiSvIgzhCB1cCjefHyRpFNKDEPw4qNzMDEjVhqghxEXFYS3nlkKg8510jkhBE8/NhMzJkjP82AYgqgIf7z50gq4u0lrPPld1zfWfbe8vBy/+c1vcPjwYdTX1yM8PBz33nsvXnjhBeh00m7e/0L33V7VG3ejpPVNWLhaELD9FDtjAfAIMIxHYuDLko2QvqownsD5pvfQZa/pl9H7b2FuIzAu5Bn46oST/IRUaryEA3X/QoutCsz/Y++84+Mor/39zMw2rXq3LMu25N57xdjGmN4xvUPKTbs3NyG/VHJDcnPDTSC9h4TeTDO9mWKDsY1777Ykq3dpe5t5f3+sZFw0s7M7gkvZLx/xAb1n32eb5j1z3vOeg4J2EqPvd0MyxnHu4K9S6jL/R9qng969PF3/EI3Bo8jIp+R19P1umHsEV1bczLBM/SQ/fUY1/zzyBEf8iRhD+ELVVYzLSX6f9ZC3gd/tf5bdnppjLeyPV9/vhrlL+cboS5hZYM6ZOl5HvK38YtdLbOqsNmQMdRdy+/hzWVSafOLaUV8nP9n6Ku+3HDnWXr4/Rrk7l+9OXsp5Q8YnzWj0e/jxB2/wdv0hpH7atfdxB7mz+e60RVw+wpzDdrzaAn5+/N6bvNHbO0aPUZyRybdmzee68cbbJf2pOxTkzrff4eUD+xH9MPpa0ednZPCN2XO4ZVriLYCT5Q2F+Z83VvHCrn2ovQ3O+mPkuJx8ad5MvjR/VtKRgUAkyt2vrmbF5j3Hmv4dT5GkeD+TTKeDm0+bzlfPmJN0Lk44GuN3L63h6bU7iMT6YfT+f4bDzrULpvD18+cnfYw5qqr89fm1LH9nO8FI9NjzPp4B8WO0l58+mW9eviDprQxV07jvufU8/uoW/KEIssQJTQml3n/ZFIULF07gm9cuIsOV3Mk3TRM8tmIDj67YgM8fRpalU7aFJElCkSXOWjSOb35hCZnu1HOKPg4ls35/ZI7Ia6+9xvLly7n22msZOXIku3bt4ktf+hI33ngj99xzj6k5Pk2OCIAQGh3BtdR5HqUrtAlV+JGwYVfyKMu6iCHZVyfMCUnMEDQFt7C3+xkaA5uJaUEkZBxKNlXZSxmXdxm5DuuM+sAeNna+xBHfFiJaAAkZl5LFuJwFTC84jxLXcMuM2sBh3m1bye6erYTUeOGfDMXNlLxZLCheSoXbGgPgiO8orzevZkPndgK9BcncNhcz8idz7qBFjMweCEYTzzesZXXrdnyxIEII3DYXswvGcOmQBUzMHW6pfDdAta+NJ2s38HrjTjzRIJoQuG0O5hSN4Jrhc5lZYJ1R6+vkiSNbeOHoTrojQWKaRpbdwayiYdwwYibzS5MPhZ+sOl83jx/YzrNHdtERChDTVDLtDmYUl3Pz2BksGlxluf5Is8/LY3t38PT+XbQHAkR7GZOLB3HzpGmcOWyE5fojbX4/T+7axfJdO2nz+4moKm67nfElJdw0ZSpnjxxpuTZIpz/AM9t38+TWXTR7vURiKhl2O6NLirhh5hTOHTcKh4UkWoCeYIjntuzhyQ07aOrxEo7GcNltVBUXcO3cqZw/eQwuizkIvlCYlzbtZfn7O2jo6IkzHDaGFuVz9WmTOX/GWNxOa3f2gVCEVzfs46nV2zna2k04EsPpsDG4MJcrF03mgrnjyMqwtnCHIlFWrt/P0yu3UdPURTgcxeGwMagwm8vOmMwFCyeQk5larl2fItEYq9Ye4NlXtnLkaDuhUBS7w0ZJYRYXnjWZC86cSJ7BseNPkj4Rjkh/uvvuu/nrX//KkSNHTNl/2hyRkyWEMLU4+CN7aPU9Q1htQtNC2ORs3I5xlGRdkbBeiVlGd/gQNd6X8EebUUUQu5xFjqOKqpxLyLAlZgAJOZ3hOnb3vEFPpImoFsShuClwVDAh71xy7CUDwugIt7Ch8x3aw82EtSBOOYMi5yBmFyyh0DkwjPZwB6vb1tAUbCakBnEqLkqcxSwsPo2yDOPj1OYZ3bzR/AFHA834Y0FcipNSVwFnlc5mWGaZ4WP7OInfKy8vNWzhoLcJXzSES3FQ6srlgvLpjM4ZGEZXOMCKo9vY092ENxrCpdgpcWVz8dDJTMovHxBGTzjIs9W72N7RiCcSxqnYKM7I5JLhE5heVJ7w8WYY3kiYFYf2sLmlAU8kjF1WKM5wc9GIccwZNGRAGIFolOf372VDYz2ecBibLFOY4eb8kaOZXzE0oZNnhhGKxnhl337W19bRHQyhyDIF7gzOGj2ShVXDB4QRicV4fc8h3j9cS08ghCxBnjuDJWNHsGhUZcLS6GYYUVXl7V2HeW9vNT29FWFz3S5OH1fJkokjEjp5ZhgxVeO9XdWs3nmYHn8QTYPcTBfzxg3jzKkjE0ZOzDBUTWP9zhpWbT5ElzeIqmnkZmUwc1wFS2ePTrh9Y/b6/knUJ9YRueOOO3jttdfYtKn/rp3hcJhw+MOyux6Ph4qKik+tI2IkIQQdgZdo9PwLX2RH77aKRjxYGf9DlpApdJ9Pee6XyXQkHw4XQtDgX8X+7kdpD20/gSEhI4iHFcszFzM2/yYKXRNSei3Vvg1s7niKusD23nlFLyOebyIQVGXNZWbhlZS7kw+5Axzw7mBV64sc8O1A7mWIXoaEhIbGmOwpLC6+mFHZqTH2eQ7wctNrbO/eidT7vE9mjM8Zy/ll5zAlL3GyW3/a66nhmfq3Wdu+41jYWOtlyEioaEzMqeKyIWcwv8hcrscpr6OngYer3+Ptll1oQiAdz5AkVKExIXcI1w1fwNJBk1K60B3wtPKvA+/zSn28kdqHjHhysSo0xuUO4qaRc7m4IrXTIIc9Hdy75wNWVO8iqqm9z/1ExujcYm4dO5MrqyanFEk56unm7zs38vSBXYTV2AkMRZKJCY2q3HxumzCDa8ZOTqnKaqPXwz+3bmb5np0EotFjW0THM4bm5HLLlOlcN2kyToPjynpq9fm4b8MWlm/fiS8cOYEhyxKqJijLzuLGmdO4YfoUMuzJF83r9Ad4YN0Wlm/cQU8ojNI7L3Dsv4uzMrl+9hRumDuNrBSiHD2BEI++t5Un1m6nyx/sl5GfmcE186dw/enTyHUnH4HwhyI8vmory1dvp93j75eR63ZxxemTuX7JdPKzki/+FwpHeeqtbTz55jZaOr39MjIzHFy2aBLXnTuDorxPf1Gyk/WJdEQOHTrEjBkzuOeee/jSl77Ur82dd97JT3/601N+/1lzRDQR5UjHHbT6nyLudBjVjIgnXY4q+jVFmRclwVDZ1v5bDvYsT8iIOyiCWSU/oipHv8DayRJC8H7b/WzseKLXATFixMcXl36NaQWXJsV4q3UFrzUv7zfX43j1jZ8/6FrOKLkkqQX2taaVPHrUPOOSwReybEhyjFeb1vLHg08eczj0GRIagkvLF/GlqkuTOgX0WuM2frrzaQDDWiR9jEuGzOR74y9JeFroeL3ZuI9vb3waTQhDRp8zd8GQifxi+iU4klhgVzce4SvvPkNUUw3rnfTlGSwdMoo/nHYJGTbzC+wHTXXc+sazhGLRhAyA0wYP4+9LLyXLZI4bwLbmJm554Vl8kbApxoyycv554aXkuswvsHtb2rjtyWfpCgQT1oaRJBhfUsK/rrrMsArryTrS1sltDz1Dm9efkCFLElVFBfzrpsspzTG/wNZ39PDlfzxDQ6cnYXE3WZIoL8jhH19expBC85WbW7t9fPWPz1DT0pWYIUsU52bx129cTuUg88UrOz0B/vPXz7L/aGvCI8aKLJGblcEfvrOM0UMTn976NOkjdUS+//3v88tf/tLQZu/evYwd+2HSXENDA4sWLWLx4sX885//1H3c5yEiIoTgUMd3aPM/h/lqT/HL1JjiP1PoPtcUY0vb3RzyPJX085tdcieVOReYsl3T+i82dixPmrGk9BtMKTDn8LzVsoJXm59ImnF+2XUsKbnElO0bzW/xcO3jSTMuHnwBV1ZcZpKxnt8eSJ5xyeCFfGXkMlO2bzbv5IfbkmNIwIXl07lj4jJTTtXq5oN8bd3jvbEiswyJsweP4zezrzAVGVnfUssNbz2OJswzZEni9LJK/rnoSlP5H1tbG7nqpSeICc10RVNFkphRWs4j511pKmqxt72NZU89RlhVk2KMLy5h+eVXm4paVHd2sezBxwhEjJ2pkxmVhfk8eeM1ZDsT5000dHu44u+P4QmFTNe8UGSJwbk5PPnla8l3J44otHl8XPP7x+nw+pNiFGZnsvyb11GUk7hQW7cvyI13P05TpycpRrbbxaPfvY7BhYnXIF8wzBf++wmONneaZsiyhNtp5/6fXM+wQfmmHvNpUDKOSNKxzNtvv529e/ca/lRVfXiSorGxkTPOOIP58+fzj3/8w3Bup9NJTk7OCT+fNbX4HqPNv4LkSk7GbQ+0fZNQtC6hda3vtZScEICNrT+jJ3w4od1h77qUnBCAt1v+THPwQEK7g95dKTkhAK80PcZh356Edod8R1JyQgBeaHyZbV07EtpV+xv5/YHUXsfzje+yqnVLQrv6QAf/tf3JpCubCODFhi08X9//dunxag16+eYHTyblhMQZgtcb9/DQofUJbbvDQb646mmESO4vRBOCdxuP8Odd7ye0DUQj3PL6M0k5IRAvGrapuYG7N72X0Dasxrj1hWeJJOGE9DF2t7Xys/feSWyraXzxyRVJOSF9jCMdXfz4tTcT2goh+NpjzyflhMSfm6Cx28P3nn3NlP3tD72clBPSx+jw+vn2Qy+Zsr/jwdeSckL6GN5AiG/9/QVTvWfuun8ltUk4IRA/MRMMR/nWb1ZYLl3/aVXSjkhxcTFjx441/Ok7ntvQ0MDixYuZMWMG999//wA0Q/p0SwiNhp6/k1rJdoFAo9n3aAKGYF/XQykyACQO9iR2YjZ1PJly7xgJma2dzya0W932Usq9Y2RkVrclvkC93vSmJcbLTYkvtC82vJdStVeIRxOeqX8rod0zRz9AQ0u5ovrD1e8mvNA+VbOZqKamzLj/0LqEpeufOrKDQCyS0isRwP37NxFWY4Z2zx3eS3c4lFJvFw3BI3u34Y9GDO1eP3yQFr8vpTL6mhA8vXc3XUH9FvIAq4/UcLS7J2XGq/sO0uTxGtptrG1gf0t7StU/VSF492AN1e1dhna761vYWtOYGkMTbK1pZE99i6FdTUsn7++pSZlxoKGNLYcaDO1aOr28ufFAStVYVU1Q39rNuh01ST/2s6CPzDPoc0KGDh3KPffcQ1tbG83NzTQ3N39UyE+8ekJrCav1pN6AQ6XF+ziaCOtadIZ30xM5lDJDoFLjfYmo5tO1aQ/X0BjcnXLvGIHKAc9qArFuXZvOSCv7vFtT7h2jobHXs4WuSLuuTU+0hw2dmywx9nkP0BBs1LXxx4K82bLBMCfESALBIV89B7xHdW1CapTn6jZaappWF+hgS2e17nhUU3msepMFVwdaQ17ebT6oO64JwYP7N1lqA9YTCfHa0f2640II7tu12VKjvGAsxnOHjKNtD2zfaumos6ppPLV3l6HNI5u3WW5c+OT2nYbjj36wzVIvFEWSeGKTcdRw+drt1hiyxPK1xoyn39thnfHudkOb51btSPmGo4/x5JvbUn78p1kfmSOycuVKDh06xFtvvcWQIUMoKys79vN5VYtvOfHCZqlLFV46A2/ojh/xPIdkmRHhqHel7vju7tcsMwQae3v0Q8MbO1elHHH5UBIbO/VD3Gva11vufikjs7p1je746tYtxITxHXoiKci83rxOd3xVy278qr5zaoohyayo36A7vqblEJ1hv0WGxPLqzbrjH7QcpcHvscSQJYlHD+pvZe1sb+Fgd4elT10CHt67TXf8cFcnW5ubLDmGAnhkp/7C1+Txsqa61lLjQk0IHtuqv4B3B0Ks3HvIUhdaVQie3ryTmNq/Ix6MRHl5yz5rDE3w0pa9BCPRfsdjqsaKtbssM97aepAef6jfcSEEz76zw9JnrmqC9btqaOk0jlJ9FvWROSK33HILorcq4Mk/n1cFo4fhlIqryUohFNO/O/ZEavup6pqcJBR80Xrd8e5I44AweqL60bH2sPXImQR0RFp1x1tDrSlvy/RJQ6Mt3KY73hhqR5EsOoZoNAT1GfWBDpQU+uucwBAaR/360aNaf2dKjf5OZAhqfB36DJ9xCN+MNCGo8erPU+vptswQwFGv/jxHe6wzABq8Ht3rZV13j0UXOq7OQJBQtH9HubEn8ekVM/JHovQE+1/A2zz+Y1VXrSgSU2n39O8o9/iDBML9OynJSBOC5s7+HeVwJEaX13grzawa23oGZJ5Pkz7fSRsfs1Rh7Y4S4vkVqsG2SUyzzgCIiYDuWHgAGAJBRNNnRLRwyls/fdLQCGv6F4eQGrIcEQEIqPqMoBoeEIY/1v+FPM6PWAoJf8jQj6oEYhHLVVXjDP3cioFiBGL6i44RPxkFY/pRLn/U+qIH8YVPL98lMEAMAH+k//ckoPP7AWWEB5ChM9dAOCEfJyMQGrj35NOitCPyMUqRrBetEQgUWX8eu8FYMrJL+sfhnHLio3KJJCHhkPXrGDhll+WtGRkZp6x/dNCluAZkAXcr+gy34hwQRqZNv66EW3EMiLOTadM/yum2OQbk7jjLpl+DY6AYboNaIpkG/AFjpFAsrD8pkqR7TDjTREM1s9IrPJaZRL2UlBkWS7ufMJdLh6Hz+9QY/f+NuJPsLWPIsFiK/tOotCPyMcptH4XVHBGI4bJV6o7mOKoGIH8jRpZBv5p8R4VlJ0FDJd+hXwK82JW4bXoiCaDYqZ+TVOYalHKiap9kZAa5SnXHh2SUEBPWQs8KMkPd+qXlh2UWJzyNkpAhyVRl6ZfIr8wqspSoGmdIVOXoF22qyim0ND/EC7WNzNWfpyrPep0GGajM1Z+ncgAYEjA0N0+3tsuw/LwBcG+hODMTp06/msF5OZZ78gBku5zkZvTvSBfnZuG0Wb0mgtNuo1ineFpupstynxmId/wdXJDd75jLYacwdwBu0CQYUmK+QNtnRWlH5GNUafa1WM0Rscl5FLiX6o5X5VxqOX9DkTIYmnWW7vjEvHMtb5vIksK43DN1x2flLyb100V9EswqWKw7elrR3KSqlvYnDY3FJafrjp9ePA2nbO1uSUXj3EH67eoXlY4n2yBiYoohNC6rmKM7vqB0BCWu/i/C5hmCaypn6o7PKh7C0CxrC6yG4PpR03XHJxSWMqGgxFK+iwbcOG6a7vjwvHxmDx5i+UTLjZOm6o6VZGWxeGSlJYYsSVw/Xb8DcW6Gi3MnjLZ8aubqGZN0y++77DYumTXB8omWS2eO123Op8gyy06baJlxzozRZBuUlL9iyRRLW4uKLLFgStVnstx7IqUdkY9ROc7ZvdGMVL+sMqVZ1yFL+qHGAtdY8p1jU2ZIKFTlXIzNYEujwFnBEPcUC3VEFMbmLMGl6Besy3MUMj5nhqUaHxNzZ5Fr1y/NnG3PZk7BLGuMnPGUGkRE3DYXZ5XOscCQGJM9jKos/eiRQ7ZxWcXslBdXCRieWczkPP0omCLJXFs109ICXpaRy2klI/SfhyRxyxh9R8WMCpwZnF0x2tDmlonTLUV3Mu0OLh4x1tDmpslTLZ1osSsKy8YZ95e6Ybo1BsBVU4z7Ml03e7Kl0yaaEFw907gv01XzrDFUTXDVfH2HCmDZAuuMKxcaMy5ZNDH1S3sv44ozp6Y+wadYaUfkY5QkSZTn/hup3elLyJKdQdnXJbQcl39zygyQGJl7RULLmYVXWoiKCKYVJC6Nvqj4Qks1PhYVX5jQ7ryys1LOr9DQOL/snIR2F5cvJNUbJQ3BlRX6kaM+LRs6B5uspFgqD26qWpSwxPsVw6bjVGwpOyNfGDU/4R3jsqpJ5DhcqTPGzk7YmO7iqrEUZ7hTiiZIwC3jpyXsaXN21UjKs3NSZEhcO2ESOU7jKNeCymGMKCxIiSFLEpdMGEtxlvF2wrSKwUwqL00pmiBLEmeOHUFFQZ6h3ZjBxcweWZESQ5El5oysYHSZcQfxiuI8Fk8egZwiY8KwUqZUGpeeKMrL4ty5Y1OKiiiyxPDBBcyZMCzpx34WlHZEPmaVZF5Jadb1ST4q7iCMKf4LTlvi3ImKrKWMzbsxBYZg3qD/JscxPKF1ZdZs5hXdlCQjrrPKvkWJS//OuE9VWeO4eHBqjEsH38LwzDEJ7YZnDuO2ytQYVw65jEl5iTsWV7hL+c6YZD+PuK6qWMppRcZ3YgBlGfncNfVa6O0UnIyuqJjDBYP1txr6VOTK4i/zrkWSkiNIwCUVU7iualZC2xyHi/sWX4kiy0ld0GUkzqkYzVcm6G9h9clls/PguVdgl5XkGJLEgvLhfGvGaQlt7YrCgxdfTobdnpSjIEsSM8rK+MGCRaZs/3nlpeS4nEkxFElibEkxd56d2MGVJIk/XXMxBW53Uo6CIkkML8znrssSO+oA99x4AaW52ckxZInS3GzuvtFcb6yf3XQOw4rzk2bkZ7n5zZcvNtWL6Xs3L2XU0OKkHB5FlshyO/ndty5LyVH6LCjtiHzMkiSJqoI7Kcu+pfc3iRK1FCTsjC3+G/kZZ5jmTC78BuPyb40zE3zMEgoSMvNK/4eKLP38k5M1p+h6TivuYxi/jj7G2WW3MyHP3MUJYGHxBVwy+GaAhNsbfeOXDr6FBcXnmWYsLjmd2ypvQkIyzbi6YhkXDT4/CcZ0vjv2RmRJNs24dug53DI8cVSnT6eXjOOX067HJskJ64r0LVzXDjuN28dfZLqL8NziSv4271qcii0hoy+qccWw6fx8urkLOcD04iE8vOQaMpTEi3ifI3HhsHH8/rRLTDsWEwpLWX7BNWQ7Ei/ifa9j6dAR3HvWpQkjLn0aUVDIk8uuIT8jIyHjWIffIUO5/+JlpprqAQzJy+XxG66mJCsr4WuXen+mlpfx0LXLcJs8eVOak8XjX7ya8twc04yxg4p5+NYrydY5ZXKy8jMzeOgbVzO8ON9U9FCSoLK4gIe+cTX5mYmb6gFkZzj557euZHR58bHnaSRZkhhcmMsDt19Nicm8jQynnT9/9womjSgzzSjKy+LeH17D4OLPX5Jqn5LuvvtxKpnufZ9GdQbeotFzP57wWj50SDQkFAQqsuSgOPNyynJuw21PHEHoT82B9ezvfpzmwNpjzoJAQ0JGoCFjY2j2uYzJu5Y856iUGHX+7WzpfJYjvvW9d+NS79zyMdbonEVML7ic0gzj/Xs91fj3s7rtZXb1bAT6Wsx/+DpAYlLubBYWX8DwzNQYR3w1vNa8kg2d8XLpfW3s++7/BYJpeVM4t+wsxuUkjrb0z2hgRcMqVrVuRhUasiShCYGMhCC+3TMrfzyXDlnE9HzjPAR9RgtP1KzllcatRLUYsiTHGb1XeFVozC4cyTXD5rOgJDVGja+Dhw9/wLO12wirUZRehnQcY2bhMG4cMYezBo817YQcr3pfN/fv38QTh7YRiEWxHWPEP/+Y0JhWVM6tY2Zy4bBxKTGa/V7u372Fx/ZtxxMJ98uYVFTKrROmc+mI8bpJl0ZqC/h5cPtWHt25ne5wCJvcyyB+YxLTNMYUFnHLlGksGzsBu5L8KZLOQJBHt2zj0S3b6QgEjzEgvtjFNI2qgnxumjmNKyZP0D0pYyRPMMTjG3fw6IZttHr9/TIq8nO5cc5Urpo5WTd51Ej+UISn1u/gsTXbaOr29ssoy8vmugVTuWreZNwpHP8NRWI8+/4OHl+1jfr2nn4ZxbmZXL1oKledPtkwQVVPkWiMF97dxfKVW6lt7kKR5fgWsIh3242pGvk5GVx55lSuOHMqeVnmnKlPk5JZv9OOyCdAwWg1bf7nCMca0UQIm5yN2zGW4sxLscnGr1vTQkiSgiQZ3934og3UeF8hEG0iJoLY5UxyHSMYln0+TsXYE1e1MJIkIydgeKOt7OlZSU+kmYgWxKlkku8Ywvjcs3Db8hIwIiBJKAkYnmgXmzpX0x5pJqQGcSkZFDnLmJm/iBy7MSOmxYsO2RKcYumJeljTvo6mYBNBNYRLcVHiLOL0ovkUOPWTX+OMGAINu2x8gfRE/bzVspGjgWYCagin7KDUlc/S0tmUuoyPscY0FQ0NR4LX4Y0GebVxG4e8zfhiIVyKnVJXLucNnsbQTOM99ZimogoNh2wzXOD90TAv1e9kT3cTnmgYl2Kj1JXNRRWTGWFwVLePEdVUXIrdkBGIRXixZi87OproiYRwKgolGVlcPHwC4/L1jxzHGVovw/h1hGIxXqnez6aWBjyRMHZZpjgjk4tGjGVSkf7RaYj3hQmrMTJsxq8joqq8fvggHzTU0xMOYZcVCjIyuGDUaKaWlhk+Ns5QybAZv46oqvLWwSOsP1pHdzCETZbIz8jg7DEjmTmk3PCxmhCEojEy7MYMVdNYfbCaNYdq6QmGkCWJvAwXZ44dwZzKigFhaJpg7YFa3t1bTU8gXswv1+1i4bhK5o8eZriFIYQgGI3hstkS2m3YX8fqnYfp8cebIOa6XcwbP4wFEyoNnU4hBMFIDJc9MWPbgQbe2XyIHm8QVdPIyXQxc1wFC6eNwDYAR5c/qUo7Ip9hCaHSE3yLNu+D+MLrEcQXV1nKJN99IcXZN+F2GGepJ2ZotAbfp8bzBO2BD9CIV/pTpAzKMs9keM415DknpXQH+iFD0BDYwu7uFdT5P0AVfQwnw7NOY0LeZQzKsM6oDezhg45X2O/ZRLSXYZMcjMqeztzC86jMtM6o9h/mnda32Nq9iYjWx7AxPmciZ5QsZVzOBMvHhA/5jvJK47u8376VkBbuZShMyh3NBYMXMj1/guUy74e8TayoX8ubzdvxq6EPGXnDuKLiNE4rGo/N5LaEno5423iydgMv1m3HG+tjyEzMG8J1lXM4s2wcdjn5O+njVe3t5PFDW3imZgfdkXjVW0WSmZBfyk2jZnF+xTjTWx96qvP28NiBbTx5cAcdoUAvQ2Jsfgk3j5vORZXjEia0JlKTz8vju3fwxJ6dtAX8iF7GiPxCbp40lUtHj7NcdKzV52P5zl08sX0HLT4fgnhUYHh+HjdMncrlE8aT7bRWg6PTH+Dpbbt4YvNOGj0ehIhvrVTk5XLdzClcNmUCeTp1RsyqOxDiuc27Wb5+O3WdPccYZbk5XDV3EstmTqQgS7+Aohl5g2Fe3LiH5e9t52hb97GoVnFuFlecNonL506kOPfzd/RWT2lH5DOqTv/zNHT/nKjaQnwr5+R6IfHfuR2TGVrwK9wO4+N//anJ/za72/+XoNp8bIvoePX9LscxmslFPyHflbzTc9T/Ae+3/A5PtNGQkecYxsLS2ylzJ07WPFnVvl280Pg32sMNyMinnL7p+12Bo4yLBn+ZkdlTk2Yc8R3i4dr7aQjWJ2AUcs3QG5iap1/fQp9Rz58OPsphf10CRi63VV7O6cUzkmbU+Fu4a89T7O45iiLJpxRHk5HQEBQ4svjqyPM5b3DyR2zr/J38ZPtzbOqoQZGkU46d9jHy7Bl8feyZXD18dtKMxoCHH2x4iTUt1YaMbLuTf59wOreNnp20E9oa8PGDta/xdv1hZANGps3BVyfN4WuT5yV9iqIrFOSH76zktSMHkXq37o5XPK0cMmw2vjBlBt+aPT/pLSNvOMx/vfkWL+2LdyrujwHgsCncMHUq/+/0BUlvGQUiUX7++js8v2MvqtA4eaXpY9gUmSunTeL7Zy1MessoHI1x98vv8vTGeGO9/hYzSYo7opdMH8/3L15sOjemT1FV5Q8vvs8T720j2tsX52RO32d87vQx/OiqJWSZzI35LCvtiHwG1dzzZxp7fmnSWkaWnIwovo9sV+IM/z5V9zzOro67+PBSl4CBwsxBv6XUvdA0Y1/PK7zb/Kve2RMx4umjZw7+MSOyzSfq7uxew9N1v0X0/pOIAHDZkG8wLX+Jacb27q387fCf0IRq+vjvdUNvYnFJ4pMKHzL28/M9fyOmxUzXvbh5+KVcPsR8wvGO7hq+s/VfhLQomsnqrLdWLuULI842zdjT3ciX1z+IPxY2XQH2xqp5fGf8uaYdhQM9bdzwziN0R4Kma2tcXTWVn88837SjUOPp4trXHqc16DPNuKRyHL8+/ULTFUobvB6ufe5JGrwe04ylw0fwl3MvwmHSUWjz+7lh+VMc6eoyVVJfAuYPG8o/Lr0El8ny9d3BELc+8gz7WtpMMWRJYmp5Gfded5luOfiT5QuF+bf7V7DjaLNpxpiyYv75xWXkmcz7CEaifPPeF9hw8OgpjpQeY3hpPvd+/QqKcqxXWv00K5n1O31q5lOgdt9jSTghABqaCHGo7VYCkT2mHtHge6XXCQFzNUg0NGJsbP4WXaHtphg1vvdZ3fyr3oXbDEMgUHmr8b9p8Ou3dj9eh33bearut2hophyEPmfl2fo/sd+z0STjIH87/EdUEUuqBsljRx9iY+cHpmyP+Or4+Z6/EU3CCQF4sOY53mxeZ8q2xt8Sd0LUiGknBOD+6jd58ugaU7b1gS6+vP5BfNFQUmXoHz6yjnsPvmvKtjng4aZVjyblhAAsP7KNe3a8Y8q2IxTgutefSMoJAXihei93fvCmqa7jPeEQN7zwdFJOCMBbNYf57tuvm2IEIlFuffpZqk06IRD/S113tI7/fPkVVC3xZxiOxfi3J55jv0knBOIRmW0NTfz7Uy8SVRNXho6qKt985CXTTkgf40BTG1974DnCOh2Hj5eqafzgoVfZeLDOlBPSx6ht7eJrf1sxoI3wPutKOyKfcEXVDuo6f5zCIwVCRKjt+E5ihuZje9udJF8WMO4obG27I+FFMKaFeafpF0nO3yeNd5r/By1BzxZVqDxd9ztSLQ3/TP0fjiW06kkIwX3V/0hq4T5eD9X8i5Cq30m3j/GHg4/0Jr4m/1r+evgJPFH9Ds19unvvs3EnJAXGnw68SFuoJ6HdXTtfxh8LpcbY/xa1vo6Edv+7/S06w4GUqoz+fd86dnc1J7T79Zb3aAl4k2YI4JH9W9nYWp/Q9s+bPuBoT3dKjOcO7GVVbXVC239t2sT+9vakGZoQrDx0mFcPHExo+/imHWxvaEqJsbb6KM/tSHzz9PzmPaw/dDTpJomqEOyoa+KJ9TsS2q7cdpB3dh5OnqEJDja289A7m5N63OdZaUfkE64O/3ILvWM0gtFdBCLGf3QN3pdQRZjUFnANf7SWjpDxH90R7yoimi8lhkDgj7VT5zeOJuz3bMIX606xUqogqPrY41lvaLXPu4e2cGvK1VjDWpgNncYRi4O+Wqr9DSmXIVeFxtstxu9Vta+F7d3Vlkqdv9BgzGgIdLGm9UDKZcgVSeKpWuMoVXvIzyt1ey0wZB49ZPzd9UTCPHN4l6XX8dBe44heKBbl8T07LDEe3LnV0Caqqjy8bXvKHY5lSeLhrcYMTQge3rg15TZREvDQhm2GNzZCCB55f2vK1YoR8OjarWgJSr4//u62lHvHaELw5JrtxFRrPbk+L0o7Ip9gCaHS5n0QLDWYU2jzPmzAEBzxPGZh/nhyaY3ncUObXV3PWOrYKyGzq+tZQ5sPOl62zFjf/rKhzTutb6bcNybOkHir5Q3DC+0rTe9aOgEjELzUtNowavNc/TpLDA3Bivp1xDR9J/np2k2WTiSpQvDM0c0EYxFdm6eObDMdNu+fobGiZieeiH6UasXhXUTUxKF8fYbg1dr9tAb0o1QvHTqAN6L/Os0wVh+toc6jH6V6+/AROgKBlBmaEGxqaORAe7uuzbrqo9R3e1J2bwVwoLWdHY36Uaoddc0cbOlI+XMXQEOXhw8OH9W1OdjYzrbqxpSdNoAOb4DVu46k/PjPk9KOyCdYgcguomqTxVlUugIv6o76ojX4ozVY6XQrUGn2v4Um+r9Y+6NttIX3W+rYK9CoD2wkovV/IQ2qfo74d1pm1AX344129jse02Js796acv+bOEPQFGqkNdzS/7gQrGnbklQ+RX9qC3dS7W/QHV/Zss0yozvqZ2dPje74qw07LV3IAfyxMBs69LccXjy621JUByCiqaxqOqzPqN5raX6IL+Jv1h3SHX/p4H5LnVshXhzttcP6WyevHDhgmaFIEq/uP6A7/tqeAykVfTteNlnm1T36jNd3WGcosszrO/Xfqze3H7TUrRfihctWbtN/HWl9qLQj8glWTNO/80hGmgigiXC/YxE18R68GQk0opq337Gg2jUgDIBQrP87voDO71ORP+bp//eqL+UtmZPljfbPCGsRojoOXbLqifb/eWhCwxsNDgijK+JPaSwZdYb152kPWWfISHQYMFqDfsufuiLJdIb0oxGtAZ9lp02RpGM1TfpTm89vmSFJEp1B/e9Ohz9gKqHVSEIIOv36jE5/AEthMEDTNDp9+u9VpzdgKZoXZwjaPQPzN/BZV9oR+QRLJEjOTG6u/he31PNPzDMSJZkmI43+GeoAMlSd9yTVBNV+GTrP12qU4njpbZvEzywNjEMVM3jfrbaoP8Yw2P4ZEIYUr8Cqy7C4sPYpajCPET8ZGeUkDBTD+HVY/zwExs81pln/9gogavBeqZpmJUh8TEaMtD5U2hH5BEtJUN49iZmQpf6rCtrk7AFigF3n+TqVgWM4dZ5vhjJwFQ0zlP7P/7t1fp+K3Lb+58pQnEl3z9VTlq3/z1yRZFwJStCbVbZNv0dGlm1gijrlOPQZOXbrDE0Ich36dSVyndaqfh5jGMxT4LLea0QTglyXPiM/I2NAvlm5BpVWczOS6wTcn2RJIsegIFiOyzkgW0y5bn1Gdob175UE5GVa/+58HpR2RD7BcjsmIklWv8gKWU79CpLZ9hED4IzI5DrGocj9//Fm2weRoRj3aDGjXHsFLp2+OFm2PAocg0j+CPKJyrEVkGfvv0eKU3EyJGOoZUchU8lkkKus3zFZkhmXU3Ws42uqcsoOKrOG6I5PK6hCsfjnb5MUxudU6I7PKaqyXHZeRmJq/lDd8dMGVVpe+ABmFeszFpQNt8zQEMwu1X+v5g0ZanlxVYVg9mD9z3xOhf6YWcU0jdkG88weNsRylCqmacweps+YVTXEcnRHFYKZlfqMmSMrrEeQpPg8aSVW2hH5BEuRsyjMvJIPO/OmIpWS7FsNGE6GZV9h6bQJaFTmXq87Kks2JuZfZnkBn5i/TNehkiSJuYXnW5pfQmJO4fnIkv77vaT0LEvbGjIyC4uXYDdoWHfh4EWWEjBlZJaWziND0b+rWzbkNFQLSbeKJHPWoKnkOvSjRNdUzra01aRIMmcMGktphn5k8PoRMywtfIokMb9kOJXZ+o7y9WOmWmLIksSUojImFpbq2lwz3lp/KAkYkVfA7LJyXZtlE1Pr7Hu8yrKzWVRZqTt+wYSxuE1WX9VTvjuDpWNH6o6fOWGk6cqoenI77FwwVb/z9PxxwyjNsxZltSkKl8xJvs3G51FpR+QTruKsmzi1p4x52eQicjOMS34Pz7nS0uJqk7IYnHmOoc3Y3Auw8nVTJAejc4zLik/NX4Iipd7MTEJmeoFxCfZZ+XNwyalfBAWChcWLDW3mFEwhR2frxow0NM4rO93QZnbhKEpdeSkzVKFx+ZD5hjZT84dSlVWcsvupCo1rKucY2ozLL2VqweCUI0iqENw02rh3zrCcfE4fnHpURBOCW8cZ9wAqzczinKqRKTMEcOuU6YYJlrkuFxePG5syQ5Ykbpo21TBy43bYuWLaREuMa2dMNixX77ApXD13SsoRJEWWWDZrIhkGPWcUWebahVNTTlhVZIkLZ44lx6LD9HlR2hH5hCvDMYYC9+Wk+lGV5/0QKcHi7LYPYVj2VaS6rTGu4D90t2WOMWyFTCm4JqX5AWYU3oIjQY5GhpLJGSVXpcw4vfgysmx5hjZOxcml5VekzDijZCmFziJDG5uscHPlpSnNLyGxpGQOFW7j1vWyJPP1URemzFhUPJFxucZhZ0mSuH28sYOqJxmJecUjmF2of/fdp+9OWZLSV1eRJKYXDuGMslEJbW+fdjqSlHxMT5EkJhSUcN7wMQltvzlrHjZZSYlRlZfP5WMS331/dc7seOv6JBdYRZIoy87mmsmJIze3zZ1Bdgp5HIokUZCZwQ2zpia0vX7+VAqyMpI+YitLElkuJzefnrg55LJ5kxiUl50Sw2m3cdvS5Bs3fl6VdkQ+BRpa+EuynHNJ9uMqy/02hVnmFs2JRd+jxH06yV7RR+TezPBccw7G7KIvMiLbfGO5Po3LvYipBdeZsl1YvIwZ+eabvvVpcu5ClpRea8r2jJKlLC1NfoGdkjuNKyvMMZaWzuPqivOSml9CYnLuaL420hxjSelkvjbqgqQZ43Mr+K+J5hinl47mh5Mu7H2sOclIjM4p5TczrzF1RzqnZBi/mn0RUhIMRZIYnlXAvadfZaoh3dTiwfxx4cVIkmQ6+qJIEmWZOdy/9EqcSuJI3djCYv5+3sUosmx6EVckiSK3m4cvvsLUlsjw/HzuvexS7Ekycl0uHrxyGTkGybB9KsvN5t5rL8Nps5mOjCiSRIbDzn3XLaMws/8k6+NVmOXm3i8sI8NhN+0oKHLcQfj7rZdRlpc4Ly7H7eLvX7uc7AynaYYsSdgUmT99+VKGFueZekxa6e67nxppIkxtx3fpCqwgnjOit10jAxIV+XdSnH1zkowYu9rvotb7FBKKwdHe+IV7XMF/MCL31qTCl5pQ+aDtb+zoehIJWbcAWXxMMKPwZmYU3pIUQwjB262Ps6r1KRMMjQVFl3LWoBuRk0isFELwWvPLPNfwNCDpMmRkNDQWFS/hmqE3oBjkn/SnlxpX8a8jzxp2Eu5jnFEyh6+PvBa7nNz21EsNG7ln37PxVu06DEWSUYXGGSWTuGPCNTiV5PIAXm3YyY+3PUtU67+N+vGMBSWjuGfGVbiTPHXzZsMB/nPdc4TUaELG3JJh/PW0K8gxOC3Tn95rrOar7zyHLxrR7VGtSBKqEEwrHsw/z1xGoSvxwnq8NjTW8+VXnqc7HEJG6jdfqI8xrqiYBy68nNLM5PIZdjQ188UVz9ERCCBLUr/1RfoYIwoKuG/ZZQzJ7T9RXE/7W9r50uMraPH6EjKG5OXwz+sup7IwPylGTVsXX77vWRq6PLqMvt+X5mTxt9suY/Qg44jkyWro6OHrf1tBdWsXiiyh9nNEuY9RkOXmT/92CROGGkckPw9KZv1OOyKfMgUie2n3PUyH/ynESUXKbHIJxdk3U5R1NXalJGWGN3KEWs+THPWuQBUnFhZyyPkMz7maoTnLyLDpJ98lUk+knj3dL7C350WiJ1VLdcrZjM+7hHF5F5FtT/0PuivSysbO19nY8Toh7cTCQk7ZzcyCs5hdcA4Fzv5PsJhjdPJu2ypWt72FL3ZiCW+n7GRB0SIWFp9BWcZgCwwPK5vX8nLTu3SfVAjNIds5s3Qu5w06nWGZVhg+Xm7cyDN1a2kLn1gczi4pnFM2ncuGzGNMTuonL3oiAV6o28aj1etpDHafMGaTZM4rn8Q1w+cwMa885b15byTEitqdPHhgIzW+EwvpKZLEeUPGceOomcwoGpIywx+N8NyRPTywZxMHe04sCCgjcfawUdw0djrzBg1NmRGMRnnx4D7u37mVve1tJ4xJwJLhVdw8aRoLKoalnCsRjsV49cABHtyylR3NJ1b6lYAFw4dx07RpLKocnnIl04iq8ua+Qzy8YRtb6htPGZ8zvIIbZ03ljNFVpiJT/Smmaryz9zCPrd3GhiOnNhecNmww18+fypkTRuKwpZasq2oaa/bU8MR721i3r/YU13Di0FKuXTiNs6aOwmlPPU/ts6S0I/I5kKp5CUR2EtN6kFCwKYVkOqbo5oMIIQhFthFT69FEAFnKxmkfg8M+QpcR0wJ0h3cT7WU4lDzynBORpf7vhoUQ+CJ7CMbqULUAipxFpr2KTId+BnxMC9Ma2ktY9QASLiWHEtc4FIM6F53hQ3gjdURFALvkJttRQYHTiBGlPniQQMwLCNy2bMozRmI3yGtpC9XSEakjogVxyBnk2csodVXpLiwxLUaN/wi+mA+BhtuWxXB3JU6DkystoUaaQ/WE1CAO2UmBo5ihbn2GKlQOemvpifrQhEamzc3IrArcBrU8moOtHA00EFCDOGUHhc4CRmVVGjA09nnq6Yr4iGkq2fYMRmeXk203YnRyyNeAPxbCIdsocuYyIXe4boRJExp7e5poD/uIqDGy7S7G5paR59CPHLQEe9jracQbDcYZrmym5g/TPR4shGB3VzNtIR8hNUaO3cWYvBKKXPp5Rq1BLzu7GumJhnDICkXOLGYUVWCX+1+8hBDs7WqjJeAlGIuS43AxOq+IErd+dKI96GdbeyM9kRA2WaHI5WZWSYVhcub+jnYavR4CsSg5Ticj8wspy9LfWugKBdnS0kh3OIRNkinIyGDWoCG4bPoL5KGODup7PASiUbKdDqryCyjP1b/mekIhNjc10h0KIUkSBa4MZpWXk2GwPVTd0UVdVw/+SIQsp4PhBXlU5Ofp2vvCETbXNdAdjPcBystwMaOinCyn/rWhrqOb2o5u/KEIbqeDoYW5DCvSj7IEI1E21zbQ5Q8ihCDXncG0oWXkZOhHyho7PdS0duELhnE77QwuyKFqUKGu/edVaUckrWNSNS8e/9N0+f5FNHZqA6YMx1zys28lK+NcJB0HI5Fimp8W/0vUex7GHz21f0OOcwpDsm+kJPMcZCm1QloxLUy1dyX7ep6mM3xq/4YC5xjG5V3J8KwzsSVInNVnRNjnWcOmzhdpCu0/ZbzYOYyZBZcwPncxjhRPzsS0GDt7NvJu2+sc8Z/KKHGWsbD4XGYVLMClJBfS75MqVDZ37uC15lXs9pzKKHUWc27ZYhYVzyNTp+hZYobGxo59PNewho2d+04ZL3HmcemQBZw7aDa5jtSOQWpC44P2wyyvXc97rftPuQstdmZz1bA5XFoxk0JnagwhBB+01fLI4Y282bT/lNB+odPNdVUzuapyOqUZqdXbEUKwua2Bh/Zv5uXafaccBc5zuLhh9HSuHT2F8szktj+OZ2xva+bh3dt4/tDeU2pgZDucXDduMtePn8LQnLyUGAC7W1t5ZMc2VuzdS0Q9ces2027nqomTuH7SZKoKUq8bdKC1nce27ODZ7bsJxU6spOyy27h88gSumz6Z0SXJbbEcr+q2Tp7YsINnNu8iEImeMOawKVwydRzXzJnCuLLUI8ufd6UdkbQA8IfW0Nh+G5ro25bod0cbULHbhjOk+DEctuFJMXpCW9je+hViWg/o7prLgIZTKWNq6b/IdOhHYfpTR2gfbzbeTkjtMmDEf+9SCjir/DcUOEcnxWgL1bL86B14Yx1ISDq5EnFGhpLNlRU/pdytX4egX0a4mb8euouOSKth7gqAS87gi1W3Myp7QlKM9nAn/7PnDzSGmo/ljvQnCXDIDr41+stMy5+YFKMj7OGHO+7lkK8BWZJ1S99LSNgkhR+Ov56FJVOSYnRF/Hxr0yPs6K47ltehx1Akmf+afCkXlk9LiuGNhvj6uqdY31ZjyJCRkCT4ydTzuKYq8WmL4xWIRvjGe8/zdsNhQ4bSm2PwoxlL+MK4WUlt6YRiMW5/51VePrL/WM6FEeNbM0/j36fPTYoRUVXueOtNnt6zOyFDFYJ/mzmL/3fagqS2jVRN43/eWMUjm7fr5mIcz7hhxhR+dPbipLaNNE3wu5Vr+Od7m4wZvWOXTZ/AnZecabn+yudRaUckLXzBlTS030Z80TZTVEpBlrIZVvqi4XbN8eoMrmN7yxd7F1RzDEVyMaPsMbIc5hbx1uBO3mj4dzQRM9VZV0JGluycM+RPFLvMLeItocM8XPP/iGmRJBgyVw/9OcMyJ5tkNPDbAz8hrAZNde+Vev/5YtV3mJBrboFtDbVzx65f4o36TTMAvjn6i8wrNLfAtod7+Mbm39ER9ppkxL+B/2/sNZxbZu44Y1fYz83r/k5TsDupgmjfG38hVw+fa8rWEwlx7eoHOOxtT6oR3HcmLuHLY04zZRuIRrhm5WPs6mxJivGNifP5zrSFpmzDaowbX36aTU0NSRXBu2XiNH4yf4kpZySqqnz5hed5t7YmqWpDV4yfwC/POtsUQ9U0vrXiFV7fd9A0QwLOGTuK311+gSmHRwjBHSveYMWWPSYJccbpo4fzp+svwaakD5kmo2TW7/Q7+xlUKLKHxvYvg2kHAUBFE17q2q5B1RJ3sg1Eq9nR+tUknJA4QxUhtjV/wVTXX1+0iTcbbzfthEC8C7AmorzZ8G380ZaE9v5YF0/U3mHaCfmQofJU3Z10Rk5NwDtZgZiPvxz6hWknJM4QaAjur/4tjcGjCe1Daoif7/m9aSekjyEQ/PHgfRzyVie0j2gxvr/973RGzDkhcUZcv963nG1dhxLaxzSV/9j0cNJOCMAv97zEmtZTt6JOeU5C8I31TyXthADcs+ttXqnfbYrxH2teSNoJAfjTrrU8eWiHKdvvrXqdjU31SVfifWDXVh7YtdWU7c9Wr0raCQF4es9u/rJxgynb36x6n9eScEIg/t16bd9Bfv3OGlP2f1+9ISknpI/x3oEafvHyO0k9Lq3klHZEPoPq8PwWQYzk20eqxNRGenyPJ7Ss7b4XISKYd0I+ZES0Thq8iRl7upcT04KmHYQ+CTSimp893U8mtN3U+SJB1ZsCQxDTInzQ/kxC23Udb9MT7TK9eB9PUYXKyubnElq+2/YBLeG2FBjxRfPp+pcT2q1u3Ua1vznlsu33V7+a0GZN2wF299SnxJCQ+MP+N0gU5F3fVsP6tpqkHYQ+3bPz7YSP3d7RxJv1h1Jm/Grr6oS9Tg52tfPcob0p10T+zaY1hGJRQ5v6nh4e27E9ZcafPliPNxw2tOnwB7hv/eYUCXD/B1vo8AcMbbyhMH9f9UFK8wtg+YYdNHR5EtqmlZrSjshnTDG1GV/wVVIvCy/o8t2HMFgIomoPzf4XDOqMJJJGvedRNKF/EYxqQQ72vJgyQ6Bx0PM8MS2ka6OKKFu7Xk7aCTmesbPnTUKqT9dGExrvtr2Rcgl9DY1t3R/giXbrPw8heLUp9Ts2DY2t3btoDbUb2q2oX5NyKXUNwa6eamr8zYZ2j9esS5khEBzytrCr59QjnMfr4cMbLTXjqw90s67VOIL08P4tlhrltYf8vFl/auL38Xpk93ZLDG8kwstHTk38Pl6P79qZ8hFkiOeWrNi319DmqW27LDSYiCdOP719l6HNC9v2Eoml3ipDkiSe2rQz5cenZay0I/IZU7fvMctzxNR6AqF3dcebfCt6Iy6pK6p10h54S3e82ruS2Ek1TJJnBKj2vqk7vt+zlqDqtcRQRYyd3fqvY69nO93RxNtQRhII1nes0h3f5z1EY8h4gU8kGZk3W97THT/kbWC/96ilZnyKJPNCw/u640f97WzsOGKZ8WSt/p1vc9DD240HLDbjk3j08Ebd8a5wkOdr9lhulPfgPv0ogT8a4cn9Oy0zHti5RXc8oqo8tmN7ylGdPj24datulErVNB7ZtM0SQwh4ZNN2VJ0IkhCCR9aZ24bSkyYET2zYbsmZSUtfaUfkM6Zg+AOS3y45WTaCEf293Z6w/sXLrCRshvO0BncgWeo6DBIKbSH9u5j64F5kywxoCOrvO1f7D1hmCARHfKceke3TPu9hZIt/yhoaezz6d8e7eo5Y7J0cv3Pd3nVYd3x7V+JcGDOMzR360YrtnckldfbPEGxs13+uuzqaLbeQ13qP/Oppf2c7wZi1mwFNCHa2t+g+15ruLnoSbKskkgCqu7t0t2davD5aff5+x5KR0TyeUJjajm6Lnzp4gmGOdnZbnCWt/vSxOCLhcJipU+OdDLdt2/ZxID+3UrWuxEYJJRkmrEbVbpLPPzlZgqimH42IaD4LWz99BI2IASOs+ix1HY4zhGFUJaj6LS/gAH4DRiAWsBQ+75Mvpr/P7ouFkiqBryevAcMbDaW8LXMiQ387zhu1trD2yReL6I55BogR0VTCav/Ohseig3DiXP2/XwPJ0HNoPKEBZAT7fx3e4AC+VzqMtKzpY3FEvvvd7zJ4cOrlp9MyLynFgmGnzIP+PHqVVZMlyOjPI0s2Uu0G/CEhMSP5fqqnSjF4PxRJsfoyALAZMGwJuiubZhj0qLFJCgNx0N+mU6UUwC4rlqMVEH+u+oyBueTZDJyygWIYcQaSoVcjY2AZ/c81kEdi9V7Hx8FIy5o+ckfk1Vdf5Y033uCee+75qFFpATalFOsfq4qi6FctdCjFlrdNQOBQ9KsvZigFSJZfh4TLpl/eOVPJszg/yChkGjCybbmW99glZHLsebrjufZs3aJi5hkS+Xb9s/75juyUTuScrEKHPqMgxeqopzAM5ikaIEaBU78irVEZ+WSU63DpFusqcqdWEfdkOWSFLHv/Nx1F7oF5HbIkke/qv0VA4QC9DoACd/+MPHdGyv14TlZh1sA937Q+1EfqiLS0tPClL32Jhx9+GLeJL1w4HMbj8Zzwk1ZyynZfgvUcEUG2+0Ld0dLM8wZg20SlJPN83fHh2WcOCGN41pm64+NyT0ezyNBQGZejX3xqav7clE/l9EmgMT1/vu747MLkKor2zxAsKNIvODavaLxhpMGMJODM0ukGjJFkJNnV91SGxPmD9au4zioaSq5Dv2+OGcmSxMVDJ+mOTyksY1CGNYdHkSQurRyvOz46v4jK3HxLwTZFkrho5Fjdbb3ynBwmlZRaWsQVSWJpVRVOnT43+e4M5g6rsHT6R5Ek5g6vIF/HEXHZbSweW4Uip86QJYmJ5aUMzksX1vwo9JE5IkIIbrnlFr7yla8wc+ZMU4+56667yM3NPfZTUVHxUT29z6yyM85FkVPv8wAKbtdiHLZhuhYFGafjVKy0uZbJdU4ny6Ffhr3ENZlc+3BS39eQyHOMoNilX7681DWCMtcYS9szufZSKjOn6o4XOwcxOnuSpWTSLFsOE3P1K58WOPKYVTDVEiNDcRlWV82xZ3Jm6XRLx17tso2lg/SvBW6bk0srZlpiyJLEpRX6r8Oh2LiuaoalxVUIwTWV+g6VIsvcNHaGpXwXVQhuGK3PkCSJWyfqj5tl3DhhqqHNzVOnWYroqUJw4xRjR/mGmVMtnf5RheDGmVMNba6bM0W3nLsZaUJww1xjRlqpK+m/+O9///tIkmT4s2/fPv74xz/i9Xr5wQ9+YHruH/zgB/T09Bz7qaurS/bpfe4lSQ7ysm4mdR9TJT/r1gQMmYqcm0jdSdAYknNjAobEuPyrSD0pVjAu78qESZyzCi+2kLAqMbPgYqQEC+ei4nNS3taQkDi96Ox4romBzh20OGWGjMzSktNxKMb5RZeUL0j52KuMzNmDZpFl0CkY4Iqhs1NmKJLM0kETE27xXF05PeWvlSJJLBo0kvLMPGPGyCkp34ErksSskiGMyjNu6nbZ6PE4bbaU/goVSWJcYTFTio1vKC4YPZocpzMlhixJDM3NZX6CG8olo6soznSn5BzKkkRxViZnjKoytJtbNZSKgtyUGBKQ43JyzsTk+lelZV5Jr1a33347e/fuNfypqqri7bffZt26dTidTmw2GyNHxtu0z5w5k5tvvrnfuZ1OJzk5OSf8pJW8CrK/jtM+AZLO45DIcV9Npkt/O6NPQ3JuJNc5PQWGTLH7XErc5ya0HJVzIYMzZiedKyIhU+6ey8gc/a2fPo3PWcTo7PlJR0UkZCrcE5ier7+F1acJOdOZlX960gwZmSHuSpaUJmaMzxnN2aWLkpq/jzE4o5TLhyR+r8bkVHDt0CVJMxRkSl353FaVmFGZVczXRy9NniHJFDqy+Pa48xLaDnbn8sMpZ6fAkMixZ/CTqYkZhS43/zMn8Xf8ZMmSRKbNwS/nJX6vsh1Ofr34vKR9KlmScCo2fnPG+QkddafNxm/PPS/pU1kSYJNlfn9eYoZNlvnNZecjkdytjUT8tfzm0vOwJUislWWJX10Zt0vWFZEkiV9eeR5O+8Akhad1qj6ypndHjx49IcejsbGRc845h6effpo5c+YwZMiQhHOkm96lrpjaTn3b1YSj+zGbM5KVcSGDC/+MZPJUTFTtYVvLF/BGdptkSBRmLGRi8R9RZKc5hubnzYbv0BragbnbWIlBGdNYMvhX2GVziWVRLcyzdT/niH+LKYaERJlrNFcP+29cirlcgJgW48GaP7CjR78Q1okMmTLXEL4+8kdkGSSRHi9NaPzl0IO8126ulLWMzCBXMT8e/58UOPUTbk9m/OHAs7zYuNY0o8SVxz1Tv0pZRqGpxwgh+MP+N3jwiH6BteOlSDIFjkz+Puc2hmcVm3oMwJ/3vsvv96w2yYg7IQ+cfj3j8sxvS/5zzwZ+vvlt3Z7RJzMybQ4eWno1U4vMnzJ8bM92fvTeSjDJcCo27jvvcuYONr/1/dzePXznjdcRInH8UJYk7LLM3y66mEXDK00z3th3iP9c8TKaEAm3g2RJQpYkfnfZBZw9dqRpxnsHavj3x14gpmoJGZIU/1u/a9k5XDR1nGlGWnF9Irvv1tTUUFlZydatW5k6daqpx6QdEWvSNB8t3Xfi8T8F/faekQENWcqhIOerFGT/e8JthpOlaiEOdf2KJu9TaPSVbD+eE2coUiYVOTcxPO8bvUdzk2FE2NLxN/b3rEAVffUbjmfEL/OK5GJs7uVMK/o3wyO1/UkTKu+1PcLGjueJitCxOU9kgCLZmJp/HmeU3IrdpDP1IUNjZctzvN3yEiEtiIR0yraQhIQsKcwuOJ3Lym/CqbiSYggheLFxJc81vIZfDSAjnXIkNs6QmF84i1srrybTltxJACEEzzes4aGalfREff0y+u47FxZP4d9HX06eI/nkzefqNvGXA2/RHvYi97awP5khgEWlY/nBhIsodiV/jXjx6E7u2fU2TUFPv+3tZUlCCMHC0pHcOe28hFsy/em1o/v5xeZ3OOrr7pfR97vTBg3j53POoTIn+Ryvd44e4b/XvsORni4UST5le6uPMWtQOT8/fSljCsw7bH1aW3eUn616hwMdHYavY+qgMn62ZAkTS0qTZmypb+S/X3+H3c2tKLJ0Sl5H3+8mDCrhx+ecwfQhyZeF2N3Qws9ffJvt9c2GjFGlhfzg/MXMHTE0aUZaaUckrZOkqp30+JfT7X+UWKwBQQRJysBpH0Ne1q1kuy9ElpJb8E5WVPXQ7H+OBu/jhGKNaCKMLLlw2ysZkn0dpZkXosjWTitENT+HPa+zv2cF3mgDqgijSE5y7OWMyb2cqpyzscvWjhxGtBB7elaxpfNlOiP1REUYm+Qg1z6I6QXnMzH3TFyKVUaEbV3reK99Jc2heiJaGLvkIM9RwPzCM5lTuJhMm7VTF1EtyobObbzW9A5HAw2EtDB2yU6BI5clpQs4o2Q+uSYjLXqKaSrvt+/iuYY1HPTWE1LD2GQbBY5szh00mwsGz6PQOQCMtgM8UbOeXd31BNQINjkeAbloyHQur5jJoIw8SwxNCN5rOcwjhzayuaMOfyzOyHe4uWToJK6pmk5FprmIkZ6EELzfXMuD+zezvvko/lgERZLIc2RwSeV4rh89LSUH5GTGhqZ6Hty9lTX1tfiiEWRJItfh5MIRY7lhwhRG5RvnnZhhbG1q4uEd21hVXY0vEr8xyHG6OHfUKG6YPIVxxck7OSdrZ2Mzj27ezlsHjuDrLYaW5XRy5ugqrp8xhUmDrSTLx7WvqY0nNmznjd0H8YbCCAFZTgcLx1Ry3ZwpTKkoG5BCgZ9XfSIdkVSUdkQ+GgkhEv6BCREiEHyRUOg9NK0bSZKR5UIyXOfici1BSpA8aYahiQhdgdfpDq4mpnUDEnY5nzz3meRnnImUIHJijhGl0f8uTYE1hNVuAJxKLoPc8ynPPCNhcTZzDJUa3waO+NYSVHsQQsOlZDM0cwYjsxdik42TQM0xNA56t7Oz5wP8MQ+qUHHbshiRNZEpefNxJIjOmPvMBXu9+9jQsRFPzEtMi5FpczM6exTzCufiShCdMcvY7TnC6tbNdEU8REWMLJubsdnDWVI6i8wEyaymGT11vNa0jfaQh4gWI8vuYnzOEM4vn0GO3ToDYE93I88f3U5LyENQjZJjdzE2t4zLhk6lwGnsrJpl7Otu5ZkjO2j0xxlZdidj8oq5qmoKxQmOCPdd2hNxDnZ38NSBndT7eghEo2TZHYzKL+Sq0ZMpy8weEEZ1dxdP79lFbU83/miULIeDyrx8rho/kSE5uQPCqO/p4ekdu6nu7MIXiZDpcDA0L5crJk9keH7egDDSMq+0I5JWylLVNry+v+PzP4wQHuLJqH21NmxADEUuIyvrNrIyb0M2mYdxvKJqF82ef9Hqe7TXATmeEf9vu1xEafaNlObcik02vhj2z/BysOdxDnmeIqx2IqEcq0vS998OOY8RuVcwOvc6HIrxxbA/RbQg27tWsL3refyxjn4ZTjmLSXkXMq1gGW6Dwme6r0MLs7b9Dd5vf5XuaBsyyrHaJzIyGhpOOYPZhWeysPgicu3J31XHtBjvtK7mjZaVtIbbjs17IsPJwuIFnDvobIqcyd9Vq0Ll9aZ1PN+wmvpgC4oko4n4hk4fwyHbObN0NpcPWcLgjOTvqjWh8UrjFh6veZ/DvuaTGPFtMJukcM7gqVw/fCGVWSVJM4QQvFS/k4cOr2N3d1M/jPgWxXnlE7lt1HzG5CZ/5y6E4NW6ffxr3wa2tjecyJCk+I6hBOdVjOVL4+YyubAsaQbAm0cP8Y+dG/iguR5FktBEvJ7M8SdLzho6ki9Pms3M0vKUGKtra/jn1o2sqTuKIsXfH03EGVLvfy8aVsm/TZ/F3CGplWtYf7SOez/YxLtHauJbaScxVCE4bdhQvjB7BgurhqfESCt5pR2RtFJSJLqPtvZr0LR2SFjoS8ZuH0dx4WMoivkLeihazd6WG4mozaYYLlslY0sfwmkzvxfsjzbxbtPX8EXrSZREKyHjtg1i4eC/kGU3fyH0xzpYUfd9OsM1CY//xhn5XFbxKwqd+vVZTmV4uO/IXdQHDydkyMhkKFl8seoOyt3mEwT9sQC/P/hHDnjjLeeNODIyLsXJ7aP/k5HZ5hMEg2qYu/bcx+auvQmTNhVJxi7Z+a8JX2JKvvnjkmE1yk92LGdV6+5+c29OZtgkmbumXs/84rGmGREtxn9tfYEX6nb0mxdzMkMC7pl5BWeX6xcmO1mqpnHnptd59NDWfvNiTmTEF91fzbmQy6v0C6ydLE0I7tq4in/s3NhvrsfJDE0Ifj7/LG4YZ75wnhCC329Yx+83rDPFUIXgB6ct5EvTZpqOSggh+NeGzfzvqvdMM/7jtLn8+2lz05GPj0HJrN/p7rtpARCL1dDadplJJwRAIxrdR2v75WgGDfKOVzjWxJ7mq0w6IXFGKFbDnuariKodphihWCerGr+IP9qAmZM8Ao1ArIV3Gr5AMNZqjqF6ebr223SGj5qqQRJndPF07X/SE2kyxQirQf5++Kc0BI+YYmhoBFQffzv8X7SE6k0xIlqEX+//LQe9hxC9/yRiBNUQv9z/a2r9taYYMU3lZ7v+wdauePfgRK9EFRphLcJ/7fore3qOmGKoQuNH2x9jdeueXoYxRRUaES3Gd7Y8xMaOQ6YYmtD44ZbneLFuR/z/TTBUofGtjU/ydpN+5+TjJYTgxxtf47FDW3uZiRjx0yXfWf8iL9TsNsUA+MWGuBPSN0cihgB+tHYlj+/bbprxh14nxCwD4K733+Vf2zabZty3cQv/u+q9pBh/eH89f3x/vWlGWh+P0o5IWgih0dZxI0J4Mecg9EklFquhs+vbJhiCg21fIap1Jc2IqM0cav+mKev1LT8gGGtLqjy8QCWsdrO2+f9hJkC4sulueqJNSTI0wlqAF+rvMMVY0fBPWkL1SRUpE2hEtQj3HfkFqkj83J44+iRH/NVJMgQxLcavD/yOiBZNaP9I7cvs7DmUVDM7gUATGj/d/XcCsWBC+4erV/N+276kCtOJ3p/vbn2Yrogvof1jRzbySv2upGp29Nl+e+NTNAUSO+vPVO/kicPbkq4LIgHfWfciRzyJnfVXqvdz7y5zR8hP1g/XvsHujpaEdqtra/hdrxOSrH6xZjWbGhsS2m2ub+Cud95NifGH99fz7pGalB6b1kejtCOSFqHwamKxQyTnIPRJJRh6lVjMuAquL7wFf2RHygxP6H0CkYOGVt3hg7SFNqXUo0ag0hneRVfY+M6yO9LIEd/alPrHCFQ6I7XUBbYa2nmiXWztWpMSQ0OjK9rGXo/xnaU/5md123spVZXV0OiJetjYabyghdQwLzakyhD4YkHebt1kaBfVYjxRsyalQqkCQUiN8mK98XulCY37Dr2fAiHujMSExpM1xq9DCME/9qxLqYJp3KkSPHIgcTTh7zs3pFzeXgIe2L0lod0/t25MuXeMLEncZyIqct/GLZYq19630XzkJa2PXmlHJC18/vtJvkLq8ZLx+R82tGjxPmyRodDqe8TQ4rDnKUtdgSUUDvU8aWizq/slS12BJRS2dz1vaLOh8y1SL20fz0l5v/1VQ5s17WtNRU30GRIrW94ytFnduoWQFrbAgBcaVhtGkFa37qE7GkiZIRA8fXSdYVn5Na2HaQ6m3oBTE4InqjcR0WK6Npva6jnk6Uj5U1eFYPmR7QRiEV2b3R0tbGtrSrl3jCoEKw7vpicc0rWp6e5iTd3RlHvHqELw+pFDtPj0o1QtXh8rDx5OuXeMKgRrao5S29Wd0uPTGnilHZHPuVS1lVDoTVKLVBybpfeUTf8XhpjmoTPwsmVGm+8pNNH/hVbVwtR4X7LUsVegctT3OlHN3/+40NjZ/bKlbroClWrfWgKxLl2b9e1vWOh/E9+iOezbRWdEP+fl7dZ3LDIE1f4a6gP6+SivNq2x1FBQAA3BVvZ5a3RtnqvbYKm5HEBruIdNBrkiT9VsttQdFqAnGmRV0wHd8eWHt1lq9gcQjEV55ah+PsryAzstM2KaxorDe3THn9qzy/J7BfDMPv3I5LO79PlmpUgST+3YZXmetAZGaUfkc66YWoeVu+8+CdGNEP3fxURizQj07wbNShNBYmpnv2MhtRNNpH733SdBTDdpNaz5iOg4KckxBJ5o/3vtqojhMXBSklFnWH8/vy3cPiCM1nCb7lhjsN2Ss9OnpqD+c631tyWVf6Kn+kD/3yuAam+7pe6wEF/46gL6n+sRT0fKzf76ZJNk6nz6jOqeTusMWeaoR59R09ONhSa3QPwYdG1Pt+54bVc3Vn0dARztNpdkn9ZHr7Qj8jmXGICFtU+ajiOiiYFjqDpzxUTq4flT5tL6nyuiJU6cNKuozlxhVT/snazCWv9zqUK1tC1zvIIGzzes6W8TDBQjpFpnyJJEQNV3Yo22O8xKQiIQ02f4ogPBMJ7HG7HOEMKY4Y9ELDufGgK/wXMNRKNYLTqhCXGsYmta//dKOyKfc0kWS6IfL1nqv/CYLA0cQ5H6ryZpl5IvrKYnvTLxjhSKt+lJb65ke8sYyan0X0FUkRRsSfb70VOGDgPApRhXlDUrt8F7kmFLrt9Pf9KEINOmz8i0W2cIBG6D55o9IAzIsuu/5zkO6wxJMmZkORwpJ8P2SZYkMh36DLfdPiCMbKf19yOtgVHaEfmcy6YMYyC+BrJciKTjcDhsZUgk14SuX4aUiU3pv3Ko01aIIlnrZQMgYyfD1n+BNqeciSuFKq8nS0Imx95/xU1FspFnt9YPpE9FDv2qnqXO5KuK9qdBLv3GZkMySi3liPSp3K3/XCszSyzniAAMdeu/5yOziy3nPahCMDxLv/vwqNwi6/kbQqMyW58xMq9gQHJEqnL1q/dW5RdY/jQ0IajKN2AU5qeccNsnCagssNY/KK2BU9oR+ZxLUYrIcJ2L1VMzWZk361YrtMnZFGZebJGhUJJ1jW5vGEVyUJlzseVTM8OyL8CmE62QJJlJ+RdZPjUzMvt0Mmz6JeXnFZ5taQGXkBmdPZU8h/7iuqT0jJTnh/g+/sisEQzO0C8vfv7gBZbC9BISw9xljMrS7356WcUcSzkiElDmymd6gX412iuHz7CcI1LgzGRh6Sjd8WtGTrWcv5Flc3BuxRh9xpgplhkOxcbFI/QrxV41fuKAOAnLxk7QHb984vgBcXaunDzR4ixpDZTSjkhaZGXdirUTLZCZeb3heGn2jRYZKiXZ1xlajMi5wvKpmRG5VxjaTMy7wPKpmcn5FxvazCpYYvG0icb8onMNbU4rmoc9QcM/I2kIlpaeaWhzevE0w22VRBIILipfaFiOe0HxWAoc1joVXzFsHrJBpGBucSXl7ryUPxEZiWsrZ2GX9Z3kKYWDGZNbnDJDkSSuGTkNl03/Mx2dX8TMkvKUtzUUSWLZyAmGWzxDcnJZOGx4yhEkRZI4f+Roitz626BFmZmcO2aUJcbCquGU56bbhnxSlHZE0sLpOA27bTypRSxkMjIuxqYY94LJck4hyzkjRYZCnusMMuxVhlY5jipKM+alFBWRUChyTSPfOc6YYS9lVPailKIiEgrFzhGUZ0w2tMuy5zKjYHFKzoiMTJGjjLHZUw3tMpQMlpSkzihw5DMzf7qhnUO2c0n54qTnjzMkcuyZLC6ZaWhnkxWuH356yoxMm4sLBxu/DlmS+eKoBSnFXWQkHIqNK4cZMyRJ4isT5qfEkHqf4w2jjRkAX50yx1LE4pbxiRlfnjYr5QiSJgS3TZ2R0O62WTMs1UP54mzj71VaH6/SjkhaSJJEUeGDyHIeyTkKCnbbGAry7jZlPar4rziUkqQZLlsFI4p+a8p6bun/kGkbnJQzIqHgUoqYV/orU/ZLB91OgWNYUs6IhEKGksNFQ35uquHWpeW3MSRjRFIMGRmnksEXqn6ILCV+/VdVXMGY7NFJOSMyMnbZzu2jv4VdThxRuXbYOczMH58kQ8Im2/jZxK+SoSROKLx2+ALOLJ2UFENCQpZk7pl+M7mOxMnUVw2fweVDpyXltkm9//rjnKspyUh8933J8AncOmZWEgSOPZ8/nHYpQ7MS5zwsHTqS/5g6LylGn+5ZeD5jChJ3RZ5fMZTvn7YwJcZ/L17K1EGJuwlPGTyIn569JCXG9xafzrxhqXX6TeujUdoRSQsAm20IJcUvoChlJP5aSICEwz6V4qKnkWVzoXGHUsz4QU/isg01zXDbRzNu0JPYFP2cihMZuSwuv5ccRxWYWjYksuwVLBlyHy6bfoLciQw3y4beQ4lr1LHnaUyQyLYXc+Ww35FtN9fe3i47+eKIO6jM7OsOm4ghk2XL42sj/5tCp7nW8zbZxrdG/weTcicee55GkpHJtGXyo3HfY4jbXFt4RVL44fjbmFc42TTDbXPxi8nfYFS2fm7ICY+RZO6cfBXnDZ7aO4cxQ5FkXIqd38+4lan5w00xJEnizqkXctXwmaYZDtnGn+dcy2kl5jsV/2j6Ur48bm7vHIkZiiTzxwWXcY5BbsjJ+vb0BXxr2mkmGRKyJPGbhedz+Uj9vI2T9eVpM/lBrzNihiEBP1+8lOsnTTHNuG7aFH569hIkM4zecvDfX3w6X5ydOOKS1scrSZjpwPV/pGTaCKc1MFK1Lny++/H5H0DT2gAb8S62fQtuDJutiqzML5CVeR2SlHwOQEzz0Op9jGbvA0TVFiRsx+VdyEAMp1JBac7NlGRdhyInfxompgU57HmaQz3LCcSakFCOJU7G28SrZCiljMy9mhG5y7CbdKZOZETY1f0y27ueozvagHwSQ0PFrRQwJf8SJudfjEtJ/sRNTIuyqWsVa9pepjV8PEMgIaOhkqnkMK/obOYXnUuWQRKsnlSh8n77Wt5ofpO6YD0K8rH+JbIkowqVTMXNGSWLOav0TPIceUkzNKGxqnUzLzau5oD3KIokI0QfQ0IVGm7Fxbll87l48CKKXcmfaBBC8FbLTp6sXcuO7tr4CRERz2fpY7gUBxeXz+TqYadR7jbneJ7MeLt5Pw8fXs+G9ppjp1CE+JDhlG1cOmwqN42Yy/Cs1E5ArWo8zP37N7Km6UhvToeEJkRvO3sNu6xwaeVEbhszm9F55pzbk7WmsZb7dm3i7brDx/JGNBFf1DUhkGWJi6vG8YUJM5lYpH86ykgbGuq5b9tmVlYfBuJ/3WofA4EEnD9yNLdNnWEqEtKftjU28cCmLby67yACjn0OsiQdqzeydNQIbp05jVkVQ1JipJW8klm/045IWv1KiBjB0BuEw++had2AjCwXkJFxHk7HPMPtBSE0ECGQMhLYqXQHV9MTWkVM7QEkbEoe+RlLyXHNRzJIIBRCQxMhZMmV0K4l+AFNgfeIqD0IBE45j0Hu+Qxyz0My2MIQQqCKIEpChqAhsJ3DvrUE1R4EGi45h6GZ06nMmmu4TSKEICZC2CRnQkZtYD87u9fjVz29C3cmI7ImMiF3FopBXRAhBFERxibZEz6XI/5qNnRuxBP1oooYblsmY7JGMbNghuFWTJwR6WUYR7sO++pY3bqF7oiXqIiRactgbPZwTi+ehtOg9ogQgogWxSYrKAm2ng57m3m9aRvtYS9hLUq2LYNxuUM4e9AUMmzGjLAWxSYp2AySSyFecfWFuu20hLyE1CjZdhfjcgdx4ZBJZNn1HfQ4I4YiyYYJrAC13i6erd5JY8BDMBYhx+5iTF4xl1ZOJNdh7KCH1RgSEg7FmFHv7eGZQ7up8/YQiEXItjsZmVfIslETKHAZ184JqbHePBhjRrPPyzN793DU040vEiHL4aAyL59l4yZQ7DbeGgur8arMTsW49k2738+zu/ZQ3dmNLxwm0+FgaH4el08cz6BsawnNaSWvtCOS1scuTW0jFHicsP8RNK2JeHklGVkZhivzZpzuK5Hl5O/Wj1dM7aTTv5wO7yNEjpWml3AoFRRm30hB5tXYFGu1AaJqD/W+F6j1PE4g9iHDpZQyNOcqKrKX4VT0azWYUUT1c8j7Onu6n6UnUtcbDZJwK4WMzbuYMbkXkWmzVkskooXY2f0uGzpepjV89FjEKcuWx7T8s5iRfw55jtTupD9kRNjUuZ5VbSupDxxFO8bIZl7h6SwsPpNii/VKolqUte1beaVpNYd8xzPcnFE8h3PKTqc8I7W79T7FNJX32nbxTN377PbUHjvimmVzcdag6VxaPo/KLHPbXUaMd1v383j1B2zprCHWy8i0OTl38CSuGjabsbmpRQT6pGoa7zYf5uFDm1jXWkNUi58gc9vsnDtkHDeMnMnkAuOk8kTShOD9phoe3LeF9xqrCatxRoZi4+yho7lp7DSmF5ebyoPSkxCC9Y11PLRzG2/XHjnBEVkyrIqbJk1l7uAKS4y0PnqlHZG0PjYJzYev5w4iwRXEF+2Tj7b2XSwcuDJvxJ3zQyQpuYqbmhakoetOuvxP9fas6e8rKyFhIz/zasrz/ws5ye0cVUTY1/Fr6rxPoh3ri3MyR0ZCYnDWhUwo/JFuvRHd1yFibGq/l93dT6OKaL+voy85tTJrMaeVfgdnkts5mtBY3bqcte0riIow8ff/RI6EjEAwNns2F5V/ncwkt3OEELze8hKvNb1ISAv2bnWdyJCR0dCYmDOFG4Z9gTxHcg6iEIKXmlbxZN0r+GIBZKRT6oX0MSbljuYbI2+gxJW8g/hiwwf84/Cr9ET9/TIUSUYVGpNzK/ne+CupcCfvvL1Uv43f7n2d9rAPuXfboz/GpLwh/GTypYzKSd6xeq1+Lz/f+gbNQW/v9k3/jPF5pfxi5oVMLEje6Xm7/jA/+WAldb4eHUb8d6Pzirhr3jnMKEl+G+T9+lruWP0m1T1dhozK3Hz+Z9FZzB9iLo8orY9faUckrY9FmtqOp+Ma1NhBzNUIkbA55pFT8ACSyUVc1Xo43Ho9wchOTnVy+pOM2zGFqpJHUGRz35mYFmBj81fpCm/BXANAmWz7KOaU/ROHyQhMTAvzZuMPqQ9sNMWIV18dwvkVvzcdHVFFjKfr7mGvZ50pewmZXHsRN1f+nHyHucVPExoP1dzL+s41puxlZLLtOXxr9A8Z5DK3+Akh+MeR5bzW/J5pRpbNzU8n/gfDM80l0Qoh+NvhV3i8dpVphtvm4J6pX2J8rvnF7+8H3uEvB942x5AknLKNP8++iRmFw00z7tv/Ab/YvtIcAwmbLPO3BVexcNAI04zHDmzjR+teBxJ/e2UkZFniz4su4Zyho00znjuwh2+/9eqx3CEjScQTiH9z5nlcOlq/wFpa/3dKZv1On5pJKyUJLYin86YknBAAQSyyHm/XVxEmmq5pIkx1220EI7sw54QAaAQiO6hu+yKaSNzkSxMxtrR+m67wVsx3IdbwRQ+xseVrqDqN5Y6XEBqrmv/btBMC8aJknmgDr9XfTkSnCd+JDMGLDX9mr2e9qfk/ZLTzcM1PCMS8ph7zVP2jpp0QAA0Nb9TD7w78L56ouW6njx19ybQT0sfwxQL8ZNcfaAvrd9E9gVG7yrQT0scIxMJ8Z9u91AX0Ow4fr+U1H5h2QiC+7RFWY3x9w0Mc9Oh3Tj5eK2p2mHZCIJ64G9VUvrLmSXZ2Npp6zGu1+/nRutd7E5jNMVRN4+urnmdDS50pxqqj1Xz7rVfRhDBVjVcQf7++/darrD5abYqR1idXaUckrZQU8v8LNbqL5KulakTDbxEJPpfQstP3OP7wxhQYKv7wOjp9TyS0bPS9THtwDeYdnbgEKj3hXdR4HktoW+N7jxrfasw7Oh8yuiM17OhMzDji38627reTZmhodEVaWN22PDHDd4h3Wt9Iav4+hifazXMNTya0rfU38nT9aykxfLEA91c/k9C2MdjJPw6/kgJDEFQj/G7/cwlt20Nefrk7NUZYi/GzHc8ntPVEQtyxKXmGIN6X5v9teIFEAfFgLMp33k+NoQnBt957KWHhsaiqcvtbryR8Lv1yhODbb71CVB2YbtJp/d8o7YiklbSEUAn67yfZxftDyb2PN2II2rz3pTg/gES79/6EF7caz6Ok/mcgqPU8ljC6s6f7mZT70wg09vY815tToq8NHS8jW2Bs7VpJJEF0Z1XbypQZGhobOtfij/kN7V5rftcS44OO7XRGjCMvzzesS7mEvio0NnYeoCHQbmj3bN3mlBZWiC/gO7rrOOBpNmbU7CCixQxtjBiHPO1s7ag3tHuxei++aCSliq8agga/h/cajSMWb1QfoiMYTIkhgI5gkJU1h1J4dFqfFKUdkbSSVjT8NkIzFzruXxpqdBux6C5dC394PZFYNcne4X8oQTh2CH94g65Fd3gXnsgeUneoIKQ20xZ8X58RqaUpuNVSf5qw2kOtT3+roifSxn7vxmMnSlJRRAuxq1uf4Yt52dT5gSWGKlTWd+gzArEgb7eut8QQwJsta3XHw2qUFxvWW2qUJyPzQoP+FlhMU3mi5gNLDEWSebJW/7srhODBg/rj5hgSjxzabMi4f+8mSw3mFEnioX1bDG0e2Lkl5f43fYwHdmxN+fFp/d8r7YiklbTCwZew1kkXQCESfFl3tDvwEvFialZkoyegz2j2r7TUrRfipdub/PrbFdXe1Za69cYZMke8+rkGe73rLXcjBYldPfpOwo7urWgWGyMKBBs79RNpt3XvJaIZR37MMN5t26g7vr37CL5Y4rweI2lovNmyTXd8V3cDHWGfJYYqNF5t2Kk7vr+nlTp/twVXJ95z5dW6PbpbJ/W+HvZ2tVlmvF1/mFCs/8+1MxhgY1ODpf43qhBsaKqnM5g4lyqtT6bSjkhaSUtobVjt1gsSmtahOxpTOwaAoREzYERU/TGzEqhEVP0EyZDaZdkREWgEY/oMf6zHMgMEvliX7qg35kl5y+R4eWL62yY9UWuLt5l5uiIDxdDfYuocIIYvFjpW0+RkdYSNt7jMKio0/NFwv2PtoYFZ2AXQFQ72O9YxgM5DZ7B/RlqffKUdkbSSlhCp7UufNAsYzKNfLyQ5htFzFaiWCQCaQf6GZuJ0kBkZ5YgMHEP/vVIHjKE/jyrUlHM3zDNS3/YxO4+qDQzDaK6YNnBVF6I6r2Wg3iuA2MfxOgbwfU/r41XaEUkraclyPuYayhlJQjKotKrIuQzE9o9iwLDLOQOw8Mk4DBryOZVsU8cRE8ml5BmMZQ4II0PRP+vvVtwDwnAr+uW8M20Dw8hU9IvZZdmS743Un9wGXYGz7cn3RupPdlnBoVPWPNcxMK8DIEenHP2AMpw6DGfi7spmNZBzpfXxKu2IpJW0bM65AzBLDLtjju5olnMu1rdmYmS69BkFrpm9kRcr0ihwzdQdHZQxFTEA21hl7qm6o8MzJ1lKho0TZKqyJuuOj84eZ9lJkJEZlzNRd3x8jvkCW0aMKXljdccn5A5L2Dk3kRRJZnq+fkfd8XmDE/aQScyQmFkwXHd8TG4JmQY9c8xIliSmFpRjk/tfBobn5FPgTK568MmSgFG5heTY+3cSBmVlU55lvVhleXYOg7KSbyqZ1idDaUckraTlzFgGWLsISnIJdtdS3fE894XIkrULiyLlkOe+QHe8xH0GDjn5DqwnMlyUZ12kO17unkG2rQwrESQZhTG5+q9jSMZoSpzDLDFAMKPgHN3RwRlDGJE5ylIuiobGwuIzdcdLXUVMyxtvKRdFQ+O8soW644XOHBaVTDrWNTcVqUJjWcVpuuM59gwuLJ9qkSG4tnKe7niGzc5VVdNQLJw20YTg5tGzdMftssINY6daOtEigFvHz9TtCyNLEjdPmmbpmysBt0yaZul5pvV/q7QjklbSkuUcnO4rSX3rRMaVeSuSQcdYWc6gMOs6CwyFwuwbkSX98LIs2RiWcy2p/hlIKAzJugybrL/dIEky4/OXpTR/H6Mq+0zDrRlJkphTeCGp5tRIyIzOnk2u3biU/BklZ6cceZGQGJM9LmGZ9wvKFqV8fFdCYkTmUEZkGZdgv3zIaSnnP0hAhbuIKXlVhnZXD59tKceixJXDgpJRhjbXjZh+Si+WZJTnyODscv3oEcC1o6ZYStVy2+xcUmlcgv3KcRN1ozJmZJNlrhirH2lL65OvtCOSVkrKyPoakpRJ8l8hBVkuxZV5Q0LL4uwvppgromCT8ynKvjWh5bCca3AqRSkc45VR5Ewqc29JaDkm50Ky7WUpMCQUyc7UwpsSWk7OW0SxsyKFiIWEIiksLrkmoeW0/JkMdVemFLGQkLlk8JUmGOMZnzMy5ajITcMvSWgzOa+SuYVjU9qiEcBXR16YsPPruNzBnFM2MeUcpG+POydhRKUyu5Crq1KPJnxn0hk4dXJQ+lSWmcMXxutvPSbSt6cuINNuHD3Nd2Xw9Rmpb/d+fcZc8l0Dk5eT1v+N0o5IWilJsQ0lu+BB4ls0ZhdYBUnKJqfw8d6EV2PZbYOoKnmkN6phniFLGVSVPIJdSdzIzaHkM3vQvShypmlHQUJBlhzMKv0LbnviJmsOJZPzyn+LU8lJgiEjSzbOLv9f8hyJm6zZZSc3DL+TbFu+aWdEilO4suJ7lGUY3+EDKJKNb4y8nUJnkWlHQer957bKr1KVZXyHDyBLMj8c92+Uu0uTYMT19ZHXM9kgP+SYvSRx58QbGJVdnrQz8s3Rl7CgeIIp2/+eejnTCoYmzfiPsUs5r1w/X+d43Tn9XE4fNCJpZ+QrY+dzzYjppmy/P2Mx5w0dnTTj5rHT+cJ4/a2f4/XNmfNYNsbc+3q8rhgzgW/O1N/CSuvTobQjklbKsjtnkVu0Aknua7+u93WKL76yMpTc4pdR7PqJfifL7ZjEqNLnj3Mq9Bby+O/tShmjBr1AhsP8RS3bMYLTBj9Bhi3uVOgv5PHfO5QC5pc9Qr5rqmlGjmMwlw6995hToc+IX+6dSg4XDvkjg90zTDNy7UV8acSvGeSqNMeQ3dxU+TPG5JhbLABy7Ll8b+ydVGbGE0sTOQsO2cnXR97OzAL9pOGTlWlzc9ek25mUO9qQIfX+2GU73x37Jc4sNb8guW1O/jDjq8wrGgdgGH2QkLBLCneMv4ZlFQtMM5yKnb/NuZmzB09MyJCRsEkyP550MV8Yucg0wy4r/H3BVSyrnNLL0HcXZCRkSeKHU87iO5OXmGYossyfFl3CzWNnJGZI8RjQ7VNP587ZSxNGjvokSRJ3LzmXr06bjZSAofQyvjptNr9acq5pRlqfXEki1YYIH4OSaSOc1v+dhAgTCb5C0H8/avTUcs525+m4Mm/F7jwTSUot50OIKD3B12n33t9v2fZM51yKsm8lN+MsJMmeEkMTMdoC71LjeZSO0AenjOc5pzA853pKM5eiSKkl62pCpcG/gd3dz1AfOJVR5BzDhPwrqMw6A5uc2nFETWhU+3ewoeNl9ntP7fhb4hzG3MKLmJh3Og45tSOaQggO+vaxqvVNtnZvOiV3ZJCrjDNKzmZOwWm4DI7TJmLs91bzatNq3u/YckrOxSBXEReULeaMkjlk2lI/3bHXU8eKurW82bKV2Ek1SEpd+SwbchrnDZ5Jrl0/FyiR9vU08WTtBl6s33ZKf5gSVw7XDJ/DpRXTKXRmpcw40NPGY4c380z1doLqiXVnCp1ubhg5k6uqplGakXoS+JGeTh49sJUnDuzAHzuxu3W+M4Mbxkzl2tFTGZyZ+vX6aE83j+7ZzuO7d+CJnFhsLcfh5NoJk7l+/BSG5ualzEjro1cy63faEUlrQKXGDqPGjiKEH0nKRrFVodgqdO2F5kGLbEZoXcRrixQgO2YhyfoLSzhaTThWiyZ8yFIWTttwnPbhuvaa5icY2YDaWwFVkfNxOWehyPoX5EC0Hn+0lpjwYZMyybCVk+Wo1H/dWpCe8CaiahegYZNzyXFOx25QY8QXbaE7UktU82OTM8iylZLvNGKEaQluI6x2o6HilHMoyphomMjqiXbQFq4jpPqxy05y7UWUOIfp3kWqIkp9YDf+WDeaiOFSsijLGEOmTX8rrSfaTWOwnoAawCE7yLPnMyRjqAFDpdq3D0+sh5iI4lYyqXBXkWvXP8HUE/VS62/AFwtgl+0UOHKpyqzQZWhC44D3MF2RbiIiSqbiZnhmBUXOwn7t4+9VgIPeBryxIHbJRoEzmzHZ5cg6kQxNaOz11NIe7iakRsmyZVCVNZiyDH2GNxpiT08j3mgQm6xQ4MhkQl65brRECMGunnqagn0MJyOzSxmaqZ9Y7IuG2dXVRE8khCLJFDgzmFSgf6RYCMGu7ibq/d0E1QhZNicjsosYkVOsywjGomxvb6I7HEKWJPKdGUwpKsOh6DP2drVS4+0iEIuQaXdQlVPImDx9RigWY3trE92heEn+PJeLKSVluGxWWz+k9XEo7Yik9YmXFt1DLPAoauAZ4KQS01IGSsbV2DJvQLalXlsiHD1Aj+9BPIEnEOLE8s+S5CLHfRW5WTfjtI9LmRGIVtPofZxm71Oo4sSy2xIOSrMuZnD29WQ7k9//7pM32sjBnhUc7HmOiOY9YUzGxvDssxidu4wi14SUw9SeaBvbu15lW9fLBFXPSa9DZkzOAqblX8QQd+qMnmgXH3S8zZr2N/CeVOpdQmJi7kxOLzqHkVlWGB5Wtb7P683v0BXtPoUxNW8iZw86g8m543UdjETyRP283ryB5+vfoyV8aln8GfljuKR8AbMLx6d8hNcbDfFyw1aeqF1LXeDU8v4zCiq5Ztg8FpaMxZZizRJfNMyLdTt5+PAGDntP7SY8rWAIN46YzVnl43CkyAjGorxQs4cH9m1iX3frKeOTC8u4ecwMLhg2LmHibFqfLn1iHJGXX36Zn/3sZ+zYsQOXy8WiRYt47rnnTD8+7Yh89iRElGjPf6EGnyCe16FX7Cs+Zsv8IrbsHyAlcUEXQqW952d0++41xcjJvJGSvP8xPE58KkNQ0/07jvb81ZAhoSBQKcm8hDFF/4OcxJaOEIJdXQ+xreNvSEi6R2f7GEMzF3PaoDuxJbndsrnjed5u+TuYYFRmzuDiIT/EqSS3FbK+422eqvsnovef/iQjo6FRlTmOL1TejtuW3DbF++0b+NvhB1CFmpAxInM43x377+TYk9umWNu+k//Z8zBRLZaQUZlZxi8m/xtFTv2oWH9a13aQ7259jKAa3/rojyIjoSEYmlnEn2fewmB34uTv47WxvZavrluONxpC0mNIEpoQDHbn8q/Trqcq2/h498na3t7Ibe88SWc4qM/ofR0lGVk8sORqxuWXJMVI65OrT4Qj8swzz/ClL32JX/ziFyxZsoRYLMauXbu46qqrTM+RdkQ+WxIiRqTrK2jht0mmOIHsuhRH3q9NOSNCaDR3/ju+4IoknplEpuscygrvNZXDIoTgQMePafY9mRQjzzWPSaX3IpvMYdnc9gf2dD+WBEGmyDWRpeV/MO2MvN/2KO+3PZIUo9hVyXXD78Yhm8v9eKf1JV5oNM+QkSlyDuKbo35m2hl5s2U1/6p+NClGobOAn034PnkOc9eWN5s38ct9j+ouqqcwJJl8exZ/nP4til15phhvN+/me1sfB0AzQVEkmWybiwfmfYWKTP0toeO1puUw/7b2cTQhTDIk3IqDxxffyqgcc47ChtY6bnrzcaJCM9VZV5EknIqNJ866nkmFxrVm0vp0KJn1+yM5NROLxfjmN7/J3XffzVe+8hVGjx7N+PHjk3JC0vrsKer5n6SdEAAt9Bwx3x9M2XZ67knSCQEQ+EOv097z36as6zz/TNIJiTO6Q+s42HGnKev93c8k5YTECRptoV2sbTH3OnZ3v5WUE/Iho5rn636BmXuYHd0bknJCIF4dtT3czL+q70EzURRsR/du7qtO7r3S0OgId/Kr/X8gpiUu87+r+wh374szzH57NaHRHfHx/R1/I6xGEtrv7Wngh9uWIzDnIEC8yqs3FuJrG+/HFw0ltD/kaePr65ejCi0JhiCgRrh1zSN0hRN3y63zdfOFd5407YT0MUJqjJvfXk5LwJv4AWl9pvSROCJbtmyhoaEBWZaZNm0aZWVlnHfeeezatcvwceFwGI/Hc8JPWp8NCbUZNfAQqZZpjPn+itD0W8gDqGonnd4/pzQ/CLp9/yKmthgzND+13X9KmdHse5pg9GgCRoRtHX9LkaFR63uLrvBBYyuhsrr1vpQIAo1q/yYag3uN7YTgxabkHIQ+aWgc8e/jgHdnQtsn6pJ1PD9kVPuPsrlre0LbB2peTYmhonE00MKq1m0Jbe899DaaMOseHMcQGk3BLl5q2JrQ9u/71xDVUmEIOkJ+lldvTmj7jz0fEIxFTTshfdKEwBMJ8eD+xIy0Plv6SByRI0eOAHDnnXdyxx138NJLL5Gfn8/ixYvp7Dw18apPd911F7m5ucd+Kir0T1uk9elSLPC4xRmiqMFnDC08geVgqYmdoMdvvHC2+F9EOynxNTnJNHqfMLQ46nvnlKTUZCShcKDnWUObI76N+GL6f4uJGTJbOl80tDns20N7uDllhozMe+2vJ2DUUO0/mnJDPhmJ15rfNrQ56m9he/ch0xGEkyUhsaLhXUOb5mA377buR7VQT/2J2rWGUarOcIBX6nenXHpeQ/DIkY2Gj/dFwzx9eEfKpedVIXj04BbCqtVmlGl9mpSUI/L9738fSZIMf/bt24emxb+oP/rRj1i2bBkzZszg/vvvR5IknnrqKd35f/CDH9DT03Psp66uztqrS+sTISGixPyPgMUOsTH/A7oXWiE0un33YakxBho9vgcQQv8i2OB5CGvN5VSavE+gaWFdi33dT1lqLidQOex5mYjq17XZ0vmiRYbGfs97BGLdujbvtb9huYHdXs9WuiKnnujo05stqy0yBPu8B2kINunavNS41hJDIDjsa2C/Rz8S9mzdRostC6Eu0MnGziO6Ns/UbrXUmwagLeRjVZN+tO256t2WnQhPJMyrR/dZmiOtT5eS+uu6/fbb2bt3r+FPVVUVZWXxZKPx4z9sduR0OqmqquLoUf0/RqfTSU5Ozgk/aX36JdQ6EKnffffOEp9H638eVWsjpjZYZBjPo2p+AtFDWHN2QBU+ArH+FwwhNNrDu1NuLvchI0J3RH/BqA9YZ2ioNAf1GUd8e1NuYNcngaDGr8/Y49lvmQFwwHtYd2xH9yHLDAmJ3T3VuuNbO2tSjrj0SZFktnfV6o5vaa9LOXLUJ5sks7lD/xq+qbXechdcmySzqbXe0hxpfbqU1MHt4uJiiov1C9D0acaMGTidTvbv38+CBfGSyNFolJqaGoYNG5baM03r0ytt4HJ9hOhB4tTTAdoAMlSth/7OtcQsbJecMpfa//ONagGsOjp9Cqv9P19NqMSEfkQmGYU0n8GYlS2sDxU0iOwEVOsMGRl/TD8J0xsbAIYk4TOYpyeaOAk0kSTi9Uf01B2xzgDwRvW/O55IyHLURSOeK5LW50cfSQWZnJwcvvKVr/CTn/yEiooKhg0bxt133w3AlVcm7sCZ1mdMKZZD7186cyVRAySR9ErEJ1NnJFWGPIAMRe91xLuOWI6IxBn6z1eW5AHxqYwYSootA46XQGAzmMdoLBkZFR7Tq3qarIzmscsD890yZCiK6ePNepJ650nr86OPrJTd3Xffjc1m48YbbyQYDDJnzhzefvtt8vOTK7yT1qdfHzbFszwTkk7XXkUuBMuXwL65+i/cZJNzMS6QZl4OpX+GIjlRJCfqAEQsXLb+S6ZLkoRLySaoGp9CMiO3ov/3nGXLpTNyajXNZJVt1y8IlmvPoSdqLRomEOTa9beBC505NIXaLX2zVKGRZ1A8rdiZwwGaLW3PqEJQ4NCvu1KckYUiSZYiFgJBgVO/mF2RKxNFkomlmBAbl0ShM/W+Pml9+vSRdd+12+3cc889tLS04PF4WLlyJRMmpF7mOq1PrySlFMk+DWtfNwXZuRhJ7v8Cpcg5uJ0L0e/Oa0YyLsccbEr/24+yZKfYfZZlRqZ9LC6d/juSJDE8+2wkSwyJLFs5+Y5Ruhbjc8+wlKwKkKnkU+7WL48/M3+BZYZLdjMqS/+6saBoDpKlNE9wyHam5E3UHT+jZLpl91aRZOYX6TPOLptsOUcEYMkg/ffq/PIJlrdNVCE4f4g+48Jh4yw6IXGn7aLhqbddSOvTp4/MEUkrreNly7wVa6dmVGzumw0tcrNuw1q0QiMv6zZDi8E5N1hmlOfcZNhLZUzuMoTFqMvYvKsMGVPzL7C0NSMhMa3gImSDbYt5hWdiJUIlIzO/6Ezssv7W3qLi+Sn3jOljLCyej9umXyX2zNIZOOXUOjpD3AlZXDKNPINoxdJBE8m2pdYJuY+xsGQMZRl5ujaLy0ZR4kq9u68sScwuGmZY6n1u6VCGZeen7BrKSEwsKE1XV/2cKe2IpPWxSHGdA1I+qR19lUEuR3aebmiV6VqCTSkjta+1jCIXkpVxrqFVrnMWGbbKFBkSipRFSeYFhlaFrrEUOselGE2QUCQ7I3LON2Y4hzDUPcVCxEJmcv45hhZ5jkIm5s5M+eirQDCvcKmhTY49m/mFs1JmaGicVbrI0MZtc3Fu2ZyUGarQuHjwAkMbh2LjiqFzkFNcwlWhcdWweYY2iiRzw4jZKTM0IbhxxGxDG0mSuHXszJTmh3ii6i1jUn98Wp9OpR2RtD4WSZIDR/7vSN4RkQAZR/7vEvaakSSFQQV/Ib51kgxHAiQGFfxFN4n0Q4bEuOJfI2NLkhHX2OK7UUz0aJlf+mMUyZkCQzCv9A4cSuJmbucM/g+csjslZ+Tssq+TpZODcrwuL7+FTFt2Sov4xYNvoMhZmtDu+mFXkO/IS4mxbMhFDHUPSWh38/DzKMsoTKmb7hVDFjM+d3hCu1tHLKIquyRphgRcXjGLOYWJO1XfPHIOk/IHoyR5xFZC4oIhEzhr8NiEtteOnMbc0mFJM2RJ4szykVxaqb+FldZnU2lHJK2PTYpzIfa83xN3FMx89WTAjiP/bygOc3dJGc45lBX+g3getllG3IFxu4wjLn3Kdk5kYunfkSWnSUbcmRpTdBdF7jNNMfKcVZxZ/ltsUoZJRyF+0Z9d/B0qs882xch3DOaqYb9I2hlZVHIbU/LPM2Wb5yjkqyPuwG3LSspROLv0chYVG0d1+pRrz+FH475FniM3Kcb5g5ayrPxCU7bZdje/nPIVSp35STHOK5vLl0ZcZMo20+bkTzNvYVhmUVJRi7PLJvO98RcZbsX1yaXY+fv8axmTU5oU44yyUfzvjEtMMRyKwt8XLWNq0WDTDAmYVzqMP55+KYqcXpY+b/rIuu8OhNLddz+bUsPriXp+iojto/9TKPHfSfYpOHJ+iuyYkjQjFN5Ca/cPCUd3GDIc9gmU5P2cDOecpBm+8B4Odv4UT3grEsopeR19v3PbRzKi4EcUZJyWNKMnUs0HrXfTEtxiyMi2VzCj6D+oyDLnTB2vrkgjK5v+TI1/S7/HemUUNFRy7KUsLv0CY3NSYbTzdP197PHoMWQ0NHLtBVxQdg2zChYmzeiOeLiv+lE2dW1D4tTutX2MPHsOVwy5mDNLk2d4on7+dPBZVrdtQwhxSoGwvrb2ufZMrht2FpeVLzS1eB8vbzTEr/e+zCuN29AMGNk2FzdXLeTmqtOTzpMJxCL8cudKnq3dRlSLf6eOp0hICASZNge3jpzL18YtTDpSE1Zj/HLrOzx2cBuR3mqrJzLi/59hs3PzmBl8e8rCATvGnNb/vZJZv9OOSFr/JxJCIKLbiPofjnfkFb2FsaQcFNc52DJvQLZbP2UViuygx/cgvtBrx4qeyVI2mRlnkZd1Ky7HVMsMX2Q/Td7HafO/RkzzINCwyVnku06nPOcGcpzTk16MTlZPpIYDPc9S411JWPUAApvspixjJmPyrqQ0wzqjK9LItq5X2NPzNiHViypUnLKbIe6JTC+4iOGZ0xJujyVSZ7iVdR1vsbHrPXwxD5pQccguKjNHs6DoHMblTLWUfArQGenirZb3WN32Pj1RLzERw6W4GJE5nHMGncH0/MmW6490hj282vwBrzWtpyPiIabFcClORmaVc0n5Ak4rmmxYN8SMusJ+XmjYzIq6TbSGeohoMVyKg5HZpVw1dC5LB03EoVirwNATCbKidjtPVG+mKdhDWI3hUuxUZRdxfdUsLqiYgEtJPVEXwBsJs6J6F48c2EK9r5uQGsNlszM8O58bRk/nkuETyLQPZK2htD4JSjsiaX3q1Pc1TLSYithRRPAZUGtAC4DsBmU4UsYVSDpHYpNlxGL1BAKPE4sdRmg+JDkTmzIMt/tqbHbjfXizjEismXb/kwSjB1E1L7LkxmkbQlHmFWQ4Rhs+to+TiBGKdVDrfZ6eyEGiqhebnEGGrZSh2ReR5xwzIIxgrIe9Pa/RFj5IRPVhk11k2goZk3MWpRmJ8wnMMAIxH1u6V1MfOEhQDWCT7OTY85mat4Ch7jGJvzNmXocaZF3HOg56DxJQA9gkG7n2XGYXzGZM9sAwQmqY1a2b2OU5jD8aQJEVcu3ZLCiayuS80QkdMHOMKG8172Bz52E80QCKJJPnyGRRyQTmFg0MI6LGeKNpD2tbD9MTCSJLEnkON2cMGsPC0lEJHTAzjLQ+/Uo7Iml95iTC7yP8/4LI+8TzMgTx48Ay9AbicSxAyvwiktP49ICewuH1+Hx/IRx6S4eh4nAuICvrK7hcS1Ji+MJbaPb8g67gG8f9VqMvjwRUspyzGJT9RfLdxqdS9NQV3svBrgdp8L95XFg/zohvi6jkOycyKu8GyjPPTmlR6AhXs6XjCQ563kFD7Q3ln8godo5iSsEyxuQsTSmS0hZq4N22F9jSvRpVnMiIb7OolLoqOK3wAmYWnGF4lFhPraFWXmt+jffb3yciIse2b+DDrZxBrkGcVXoWC4sWYkuhOmlbuIvn6t/mjeZ1hLTwCQxFklGFxiBXIRcNXsx5ZadhT+GocHvYw+M1a3i+4QP8sfCxeY9nlLpyuaJiPlcMnYdLST4C0Rn289DhdSyv2YQnGuqXUezM4trK2dxQNYdMuzNpRlqfHaUdkbQ+MxJCgP9vCN9vSVzVtDe3JOt2yPxyUgusz3cvnp476XMGEjGysv+T7Oz/lxSjzfc4NZ0/os+p0ZcMaJRm30ZF3h1JLeJ13lfZ1PpjgAS1SOKM4dmXMbX4h0mVlj/iXcNrjT/rzZEwYsSzAEZlL2Fp2XdRDOqBnKwD3m08XPMrVBFL0HAuzhiXM4trh/4nDtn84rffu5/fHfgdES1iqqndhJwJfH3k18lQEp966tMBbw0/2fVXArFQQoaExLicSn48/t/IsutXLz1ZB72N/Ofm++iJBo45BkaM0dmD+c30Wylwmq8pcsTbxhfXPkxb2IuWYMmQkajMLuLeeTdSmpG+bn9elcz6nU5PTuuTrWNOCCQuJNabdOf7NfjvNY3w+f6Fp+cnxCMg5hg+7+/wen9lmtHme5Kazh8Qj0wkYsQXkxbv/Rzt+plpRr1vJRtbf4hANVEQLc6o8T7H1rafY/Z+pMa3nlcbfoImYiYY8TkPet/hjaa7ECYrbh727eKB6l8QE1ETDkKcsc+ziUdr70EV5grBHfYd5p799xDWwqY76+717OV3B35HVIuasq/2N/DDHX8kEAuaYggE+zw1/HjXnwipEVOMo/42vrrxH3RH/AmdkD7GIV8T39j0D/wxc43lGgPd3LTmftrDvoROCMSThGt8Hdy05v4Ba7SX1mdbaUckrU+sRHjdcU5Iko/13YOIbEhoF4lswdPzXykxfN7fEwq9mdAuENlHTef3UyAIWn0P0OF/IfFzidaxseWHpFJ3pNb7PDXeFSYY7bzacGcKtVIFh72r2db1TELLQMzLQzW/RHDqaRFjgmC/dyvvtCZmhNUwvz3wWzShJcXQ0DjoO8gz9YkZUS3Knbv+SlSLJVW6XUPjsK+efx5JzFCFxre33E9IjSTFUIVGrb+N/939bEJbIQRf/+BxPNGgKUfneEZTsJvvb07MSCuttCOS1idWwn8fqfd1UXofbyyf715S/zNQ8Hn/mtCq1fcgqVWUBZBp9vwjoVV1z1PEowOp7LRKHOh+MGFUZE/Py2giliIDtnU+iZYgYrGp6x0iWigpB+F4vd/+MrEEEYt1Hevwq37TkZDjJRC80/YOIdU4mrC2fTudkZ6UGW+2fIAn6je0W9e+n4ZgZ1IOQp80BG+37KQ1ZNz4cFNHLQc8LSn1qFGF4L3WQ9T42pN+bFqfL6UdkbQ+kRKxeoi8S+p9XVQIv4NQG/Ut1DZCwZcsMSKRdUSjB3UtYpqHdt+zFhgagegu/OEd+s9CC1HtedZCfxqBP3qU9tBmfYaIsbPreUv9afyxDmp9H+iOa0JjbfurKTshAEHVz66e9brjQghWtqy01CgvokVY17HO0OalxtUpl1KH+HvxZov+6wB4+ujalCq9Hq/n642jho9Vb7DEUCSJ5TWbUn58Wp8PpR2RtD6REsFnsP71lCCoH+IOBp7GSlO2uJT/3969B0dR7XkA/57uyTwzk5BJJg/IE7jJKq8QIEiweMUgAhrh4rpGl9dCSQWEVQsirItVgggLWmu0AKUWULBARUUUdSPyiBQskQglYAhciEICJDzyIJNkMjNn/wiJ5F47M5nusZPh96nKH5nT6d/v5DH9y+k+58Bu/1Cy9Wb9HnB4d7+/oxhV9TskWyvq98PJO/7v2RMGERdrpb9Xv90+hgZXtcwYAk5V75Fsv1h/BtXNVbJjHL3xv5LtZfYyVDRWyCp2GBj2V+6XbL9sv4aSujJZu+lycOy9UijZXtlYg/+7cc6n0ZBWbnB8ekm62Kl22PHdlV9kxXBxjl2/FsPplreJIwlsVIiQrsn1G+QXCQzceUmy1eksg/w/ATdczt8kW5ucv4JB3qJTgAuNzWWSrbebf5Mdg8OF245fJdtrmstlbJDXGsONasdlyfYbTVdlnb81xnXHFcn2ysZKBWJwVDVJF0xXG5W5FVHZeFPydlm5/YYiMaqb69Ho+uNbWVfsNV49nOpJvdOBmuYG2echgYsKEdI1cTsg4zZACzfQwUgB5/WQX+zwthVb/4iL18v677vtPB3EcHJl3uSbW1e3/aM2d6Os2xm/n0c6V4e7UXax03oeKU3uJtnn93SeBpcyMTg4HBLPuzR4OavGG3bnH+drVzBGvVO5c5HAQ4UI6ZqYCfJ/PYU755EIIQTD94dI284CQZCeIy+yYEUu4GIHMTTM+3UtOhLEpNeVCBL0ihRUWkF6fQytoJf1DEornaDvoE2ZRbY6Oo9BVCaGAAFaicXNjBrlFgszSZzLqFFu2fVgBfMlgYcKEdIlMU2iAmfhHZ5Ho+kN+aMuAjSaJMlWfVASOLxbd0KaCENQH8lWszYBHE5ZERhEmLXS/QjVxsouEhhE9NDFS7ZH6GNknb8lhoAIXS/J9mhDtAIxGKL0UZLtMQab7BgAEGWwSi6Y18toVaTAtWrN0EnsJRNjCIVG5sOwAGDW6BCiVaZYJoGJChHSNRmmKnSeKdJNhinwfXpwKxeMpqckW8OMEyHIHrFwISL4ScnWaNMYBAlmWRE4XEiy/FWyPc40FCaNVXaMfqGTJdsTjP+EMG0k5IxScbgx3Jol2R5njEOcMU7WRZyDY4xtjGR7jCEC91t6y5o1wwBMjJbeHThcZ8GI8GRZM1oEMEyNGy7ZHqI1YHzM/fJiMIZpCUNkz+4hgY1+O0iXxMQoQDcGctYRgS4TTIyUPkK0wmB4VFYMne5BaDoYdRGFYISb/iojhgCTdhCM2vukYzAtEi1TZTxfwWAOSkSYfqB0FkxE/9DHZF3AgzU2xJmGSGfBGDLCH/H5/ABgFM24P2RYh8dk2jJl3WbSC3qkh6V3eMykmFGyZs1omAZjIzvux9S4B2TNaAGAR3t2HOPJxKGyYnDO8URCms9fT+4NVIiQLouZZsH3WyfuO1/fMVPwHPj+wKoLpuB5Ho+ymaffuYD7chF3I8ryrMejkizTwJjGxxgcfwmd4XHfnPtCH4HIdD4XI4Ot/+xx35zBPUbDIBp9LqoejJgM0cO+OenWdFg0Fgg+xGBgGBc5DjoPz4EMtw6ATRfmc4zxUSMQrOl4v5l0a18kmGw+jTYIYHg4JhVWXccjaalhsegXGgPRh40RBTCMjUpBrCms019L7i1UiJAui2mHgplf8u1rzcvAtIM9HqfVDkBIqPd7xtzNbMmDXj/a43GGoD5ItL4BXwqeKMs8hBkf9nicMSgG6ZGt/ejcRSPJ8gTizNK3TNpiaMIwqdcK/L4bsbcYUixZ6B+a7fFIg2jCjISlEJjQqWKEgeF+yzCMivAcQyto8ULyC9AImk4VCgwM91nuQ3aM5xgaQcQr/eZBL2o7FUMAQ4olEbOSPMcQmIA3Bs9AsEbfqWJEAEMfczReTPEcgzGG/GFPIkxr6lQxIjKG+GArVg72HIMQKkRIl8ZMM8DMrTvWerq9IQJgYOb/ADP9q9cxTKanEBL6X2j5c/AmBmC2LEVw8AKvY1hNjyLJ+t93vt67GDGW59ArZLHXMaJNozA8ah0EaMA8xGht7xOSg4Hhi73eRbiXaTAm9XoNGqb1IkbL28t9IY9gbLT3OxXHm5IxO/E/oRU8X8RbYwwIGYF/ift3CF5ekOOMcViSvAQG0eBFjJa8B4UOwoI+C6ARvFuzJdYYhdcHLoIlyOTxeZHWGANDk/HK/fMQJDFb5u9FG8KwYdizCNeZvYjR8tE/NB75Q/4NBi9nxdgMFmx7cDZiDKFex0i2RGHryBkwB0nPYCKkFePebrupgs5sI0wCG3cUg9dvBpoK7rzC0HLbRkDbSINufEvhok31KYbDcRK3b7+LxoY9d53bjd//+3dDp89CcPBc6HQP+BTD7jiDq7X/g5v23XdmuohoWf699WLoRoh+NCLNsxBieNCnGLWOv+F89Yf47faXcPNmMIjgcLdd7DhcsBnS0TskB9Em32JUOy7j5M1d+KXmGzi5405B0Po9a4kRYxiAgWFTkBT8oNdFyN1uOipx+PpXKLq5Dw53I4R2/WBww4U441+QEf4IBoRk+BTjluMWCq4V4EDVATS4GiBChPuu75UbbiQYE5AZmYkHrA94Xei0j1GLLysO4esrhahz2iEyAW7OwcDAWMsGcfHGGEzuOQrjbOnQCJ1/nqjaUY9dl45g16UjuOWoh+ZODKBlVMPF3Yg3RWBa7AhM7jUUWi+LqbvVNjdg58Ufsf3iMVQ11rWLITAGJ3cj1tgDOUnpmJaQBr3EbBxyb+jM9ZsKEdKtcFcl0PAZuOvXlsXKmAlMTAAMj4OJEYrEcLmuo8H+MZzO83Dz2xCYCaImHkbjNIii/CmmAOB03cL1+k/R0HwOLl4HkRmh1fREuGkqdJpYRWI4XHW4dPsr1DSdQ7P7NkRBD6MmEnHBExGslZ5G27kYdpTW7kNV0zk0uW5Dw3QwBYUj2ZKJsA6m6nYqhrsRJ6sP47L9PBpc9dAIQTBremBQ6EhEGxIUidHsbkbRzSKcu30OdpcdGqZBSFAIhoUNQ4JJqRhOHLl+EqdqzuO20w6NICIkKBgZ4alINif4VEj9PafbhcKqM/jx5t9Q19wAkQkICTJidGQ/DAxVJoaLu3Ho2jkcrjyPGkcDBMYQqjViTFQy0sMTFYlBuj8qRAghhBCims5cv+kZEUIIIYSohgoRQgghhKiGChFCCCGEqIYKEUIIIYSohgoRQgghhKim85PJ/0StE3pqa2tVzoQQQggh3mq9bnszMbdLFyJ1dXUAgNhYZdZVIIQQQsifp66uDiEhIR0e06XXEXG73aioqIDZbFZ8kZza2lrExsbi0qVLAblGCfWv+wv0PgZ6/4DA7yP1r/vzVx8556irq0NMTAwEoeOnQLr0iIggCOjVq5dfY1gsloD9BQOof4Eg0PsY6P0DAr+P1L/uzx999DQS0ooeViWEEEKIaqgQIYQQQohq7tlCRKfTYfny5dDpdGqn4hfUv+4v0PsY6P0DAr+P1L/uryv0sUs/rEoIIYSQwHbPjogQQgghRH1UiBBCCCFENVSIEEIIIUQ1VIgQQgghRDVUiAAoLS3FY489hvDwcFgsFowcORL79+9XOy1FffXVV0hPT4fBYECPHj2QnZ2tdkp+0dTUhEGDBoExhhMnTqidjiLKysowe/ZsJCYmwmAwoHfv3li+fDkcDofaqcnyzjvvICEhAXq9Hunp6Th27JjaKSli1apVGDp0KMxmM2w2G7Kzs3H27Fm10/Kb119/HYwxLFq0SO1UFFVeXo6nn34aVqsVBoMB/fv3x48//qh2WopwuVx4+eWX272nvPrqq17tC+MPVIgAmDRpEpxOJ77//nscP34cAwcOxKRJk3D16lW1U1PErl278Mwzz2DmzJk4efIkDh8+jKeeekrttPxi8eLFiImJUTsNRZWUlMDtdmPjxo04ffo03nzzTWzYsAFLly5VOzWf7dy5E88//zyWL1+O4uJiDBw4EOPHj0dlZaXaqcl28OBB5Obm4ujRoygoKEBzczOysrJQX1+vdmqKKyoqwsaNGzFgwAC1U1HUrVu3kJGRgaCgIHz99dc4c+YM1q1bhx49eqidmiJWr16N9evX4+2338Yvv/yC1atXY82aNcjPz1cnIX6Pq6qq4gD4oUOH2l6rra3lAHhBQYGKmSmjubmZ9+zZk2/atEntVPxu7969PCUlhZ8+fZoD4D/99JPaKfnNmjVreGJiotpp+GzYsGE8Nze37XOXy8VjYmL4qlWrVMzKPyorKzkAfvDgQbVTUVRdXR3v27cvLygo4KNGjeILFy5UOyXFLFmyhI8cOVLtNPxm4sSJfNasWe1emzJlCs/JyVEln3t+RMRqtSI5ORnvv/8+6uvr4XQ6sXHjRthsNqSlpamdnmzFxcUoLy+HIAhITU1FdHQ0JkyYgFOnTqmdmqKuXbuGOXPm4IMPPoDRaFQ7Hb+rqalBWFiY2mn4xOFw4Pjx48jMzGx7TRAEZGZm4siRIypm5h81NTUA0G1/XlJyc3MxceLEdj/HQPHFF19gyJAhmDZtGmw2G1JTU/Hee++pnZZiRowYgX379qG0tBQAcPLkSfzwww+YMGGCKvl06U3v/gyMMXz33XfIzs6G2WyGIAiw2Wz45ptvAmIY7sKFCwCAV155BW+88QYSEhKwbt06jB49GqWlpQHx5sg5x4wZM/Dss89iyJAhKCsrUzslvzp//jzy8/Oxdu1atVPxyfXr1+FyuRAZGdnu9cjISJSUlKiUlX+43W4sWrQIGRkZ6Nevn9rpKGbHjh0oLi5GUVGR2qn4xYULF7B+/Xo8//zzWLp0KYqKivDcc89Bq9Vi+vTpaqcnW15eHmpra5GSkgJRFOFyubBy5Urk5OSokk/Ajojk5eWBMdbhR0lJCTjnyM3Nhc1mQ2FhIY4dO4bs7GxMnjwZV65cUbsbkrztn9vtBgAsW7YMU6dORVpaGjZv3gzGGD7++GOVe9Exb/uYn5+Puro6vPTSS2qn3Cne9u9u5eXlePjhhzFt2jTMmTNHpcyJt3Jzc3Hq1Cns2LFD7VQUc+nSJSxcuBDbt2+HXq9XOx2/cLvdGDx4MF577TWkpqZi7ty5mDNnDjZs2KB2aor46KOPsH37dnz44YcoLi7G1q1bsXbtWmzdulWVfAJ2ifeqqircuHGjw2OSkpJQWFiIrKws3Lp1q90WyH379sXs2bORl5fn71R94m3/Dh8+jLFjx6KwsBAjR45sa0tPT0dmZiZWrlzp71R95m0fn3jiCezZsweMsbbXXS4XRFFETk6Oan9cnnjbP61WCwCoqKjA6NGjMXz4cGzZsgWC0D3/j3A4HDAajfjkk0/azd6aPn06qqursXv3bvWSU9D8+fOxe/duHDp0CImJiWqno5jPP/8cjz/+OERRbHvN5XKBMQZBENDU1NSurTuKj4/HQw89hE2bNrW9tn79eqxYsQLl5eUqZqaM2NhY5OXlITc3t+21FStWYNu2baqMSgbsrZmIiAhERER4PM5utwPAP7ypC4LQNprQFXnbv7S0NOh0Opw9e7atEGlubkZZWRni4+P9naYs3vbxrbfewooVK9o+r6iowPjx47Fz506kp6f7M0VZvO0f0DISMmbMmLYRre5ahACAVqtFWloa9u3b11aIuN1u7Nu3D/Pnz1c3OQVwzrFgwQJ89tlnOHDgQEAVIQAwbtw4/Pzzz+1emzlzJlJSUrBkyZJuX4QAQEZGxj9MuS4tLe3y75nestvt//AeIoqietc8VR6R7UKqqqq41WrlU6ZM4SdOnOBnz57lL774Ig8KCuInTpxQOz1FLFy4kPfs2ZN/++23vKSkhM+ePZvbbDZ+8+ZNtVPzi4sXLwbUrJnLly/zPn368HHjxvHLly/zK1eutH10Vzt27OA6nY5v2bKFnzlzhs+dO5eHhobyq1evqp2abPPmzeMhISH8wIED7X5Wdrtd7dT8JtBmzRw7doxrNBq+cuVKfu7cOb59+3ZuNBr5tm3b1E5NEdOnT+c9e/bkX375Jb948SL/9NNPeXh4OF+8eLEq+dzzhQjnnBcVFfGsrCweFhbGzWYzHz58ON+7d6/aaSnG4XDwF154gdtsNm42m3lmZiY/deqU2mn5TaAVIps3b+YA/vCjO8vPz+dxcXFcq9XyYcOG8aNHj6qdkiKkflabN29WOzW/CbRChHPO9+zZw/v168d1Oh1PSUnh7777rtopKaa2tpYvXLiQx8XFcb1ez5OSkviyZct4U1OTKvkE7DMihBBCCOn6uu+NZkIIIYR0e1SIEEIIIUQ1VIgQQgghRDVUiBBCCCFENVSIEEIIIUQ1VIgQQgghRDVUiBBCCCFENVSIEEIIIUQ1VIgQQgghRDVUiBBCCCFENVSIEEIIIUQ1VIgQQgghRDX/D5jrWwVsUUF3AAAAAElFTkSuQmCC", "text/plain": [ "<Figure size 640x480 with 1 Axes>" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "plotCompPinPow(fuelPin)" ] }, { "cell_type": "markdown", "id": "8b58b2a1", "metadata": {}, "source": [ "And one more 60 degree CCW rotation for good measure." ] }, { "cell_type": "code", "execution_count": 22, "id": "36fa1751", "metadata": {}, "outputs": [], "source": [ "fuelBlock.rotate(math.pi / 3)" ] }, { "cell_type": "code", "execution_count": 23, "id": "ca93a8a3", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "<matplotlib.collections.PathCollection at 0x1baf95f79d0>" ] }, "execution_count": 23, "metadata": {}, "output_type": "execute_result" }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiIAAAGdCAYAAAAvwBgXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzsnXV4FdfWxt8958Q9IQkhSpBAIEpw9yAJ7lBq1O3WXW791nvbr06BFnd3d+IQCAQJJMTdjs/s749DuEhGzkzaUjq/58l9epl15j0jZ/aatddei1BKKVRUVFRUVFRU/gKYv/oLqKioqKioqPxzUR0RFRUVFRUVlb8M1RFRUVFRUVFR+ctQHREVFRUVFRWVvwzVEVFRUVFRUVH5y1AdERUVFRUVFZW/DNURUVFRUVFRUfnLUB0RFRUVFRUVlb8M7V/9BYTgOA5FRUVwc3MDIeSv/joqKioqKioqEqCUor6+Hm3atAHDCMc87mhHpKioCMHBwX/111BRUVFRUVGRQUFBAYKCggRt7mhHxM3NDYD1QNzd3f/ib6OioqKioqIihbq6OgQHB18fx4W4ox2RpukYd3d31RFRUVFRUVH5myElrUJNVlVRUVFRUVH5y1AdERUVFRUVFZW/DNURUVFRUVFRUfnLUB0RFRUVFRUVlb+MOzpZVUXlVoovlWLb/D0ovFAMfb0BTm6OCGwfgMQHhiCgrX+LaJTlV2Dbr/tQkFsEXb0eTi6OCAj3x8i5AxHUMaBFNCqKqrB90UHkny1EY50ejs4O8A9theGz+iEsUnipm1Sqy+qwY8lh5J2+isZaHRycHeAb6IXhM/ogvGvLLIuvrWzAzuVHcfFUARrq9HBwtIdPaw8MmdwDEXFhLaJRX6PD7tUncC4zH411etg7aOHt54FB47uhc7ewFqkx1FhvwN51aTiTloeGOj3s7LXwbOWGgWNjEdWzXYtoGHQm7N2YgewUq4ZGq4Gnjwv6J0Yjpnc70VoLUjAazDiw7RSyjl9CXa0OGg0DDy8X9BkWiYR+HVpEw2Sy4PCuM0g7egH1tXowDIGbpzN6D4xAj/4dodFqFGuo/LMglFL6V38JPurq6uDh4YHa2lp11cw/nPRdJ7Hy0w1I3ZkFhmFAKQXlKAhDQAgBx3FIGBmLqc8nI25IlCyNkwdzsPLzTTixJROEITdpMAwBa+EQO7gLJv9rDHokxsrSyDlxAau+2orDG9PQNLRxt2h07dMRk55MRJ+kbrI0zmddwar/bsfB9WmglII0aRACRmPViOjWFhMfHY4BExJkDbKXc4qw6v92YP+6NLAWDoTcrtEuKhjj5w3GkMk9ZA2AVy+WYtX3e7BnTSosZgsIQ8CxFIQAjIYBa+EQGhGA8Q8MxPCpPaHR2K5RnF+J1T/txc5VKTAZzWAIuXYc/9MICvfFuPsGIHFaL2jtbB9ky4trsHr+AWxfmQKDzgRGYz0OEEBzTaN1sDfG3dMXo2f0gr297e+HleV1WLPwMLauTIGuwQhGw4BjOasGw4BlOfi29kDyrN5ImtETjk72NmvUVDVi7e9HsHlVqtWR0lj3C+D6f3u3ckXStJ4YN7MXnF0cbNZQuXuwZfxWHRGVOxpKKZZ8sAYL3lj2v4crD03bH/hgJqa9NN6mAXbN11vx/Qu/gWGkacx8ZTzmvj3FJo0tv+7D108vuO5w8GtYB6oJj4/AQx/OsGkQ37vqOD55dD4IgbAGYx1wE+/pjyc/nWXTW+yRrZn48KFfQDl6fSBqDsIQUI5i4IQEPPvlHNg72EnWSNuXg3fn/QKzmRW8HoQAlAK9RnTFS9/MtWmAPXX8It564GeYDGbh47h2iWP7dsTr390LZ1dHyRrnsgrw+oO/QNdgFDwOEIAAiIwPw1vfz4Wbh7NkjUtni/HaQwtQW6MT1oB1KWW7zgF47/u58PRxlaxRkFeOVx5ZiKryenCc8JBBGILgsFb44Lu5aOWvPrf/qdgyfqs5Iip3NEs/XIsFbywDANGHbNP2X15dguX/WS9ZY9232/H9878BVLrGkg/XYcFbKyVrbP/tAL568lfr4C3gIFg1rA/6tf+3Az+8tESyxoF1qfj4oZ/BsZy4xrXBZPtvh/DVv36D1PeRE7uy8d4DP8FiYQUHbwCg1zQOrEvDp08sAMcJ2zdx8uh5vHnvjzCZLKLXo+lrn9h1Gu8/PB+shZWkcTbjCl675wcY9Cbx46DWv6yj5/HW/T/DZLRI0sg7W4yX7vkBunqD6HHgmkZO5hW8dt/PMOhNkjSuXq7A83N/Qm1No7gGrI79pXMlePG+X9DYYJCkUVpUg+fu+wVVFQ2iTghgve6FVyrx/AO/oK5GJ0lD5Z+N6oio3LFk7DmFX19fKuuzv7yyGFn7T4va5Zy4gP97dqEsjaUfrcPxLRmidnnZBfjyiV9tF6DAuu92Yt/KY6KmRXll+M/DPwM2zrJQSrFj8WFsW3RQ1LaypAbvP/gTQK3fzRaNgxszsO7HvaK29dWNePv+n65Pi0mF4yhS9+Vg2X93iNoadEa8ef9PYC2sbRosxem0PCz8dIuorclkwRsPzofZZJE0eN+ocfFMEX54f6OoLctyeOORhTDozdedV2kaHK7mlePrt9aJ2lJK8fbTi9EgxZm65buVFdfgk9dWS/6Myj8X1RFRuWNZ9flGMDLm/QFAo2Ww6nPxh/nar7fKyi0ArNM0Kz/fJGq34YddkJvrSBiClV+JD3yb5++3DnhyJloJsPK/20WjItt+PwyLySI5enIrq7/fJRp92LHiOAyNJpschOtQYN0vB0QjFnvWpaO+RmeTg3BdgqPYvPgw9I1GQbsjO7JRWVZnk4PQBMdR7FqThrrqRkG71IO5KC6osslBuFHjwI5slJfUCtqdSruMvPOl8jRYipTD53H1SoXNn1X5Z6E6Iip3JCWXy3Bia4asByBgzY84vikdZfnlvDbVpbU4sPq46DQGHxzL4eSBHOTnFPLaNNbqsHPJYdkalKO4kHkFuel5vDZGvQlbFx2Qfa5AgaJLZTh5OJfXxGJmsWnBAVmDdxNVJbVI2Z3Nu53jOGz49YBsRwcAGmp1OLwlk3c7pRTrFxyQ7RgCgFFvxt71aYI26xcdBsPIF2FZDjvWpArabFhyVLajDliDZ9tWpQjabFx+QrajDljznTavFNZQUVEdEZU7ku2/7lW81JAwBNt/3ce7fdfig/9LMpCJRstg2wJ+jX2rjsNikpZTIKSxdcF+3u1HNmdAVy9tvl9QY+EB3u2pe0+jpqJekQajYbBl0SHe7aeOXURZYbUyDYZg8++HebefP3UV+edLFV12QoBNvx/h3X71UjnOZuYrctoopdi8hH9KrrykFmlHLsh3PmGNimxafoJ3e32tDod3nxGNYglqsBTb1qRJzt1R+WeiOiIqdyRFF0sU74MAKM4rFdAoBVHwtgdY31yLL5Xxa1wqhUarUMPCCZ6Poktlims3sBYOVy/waxTnlSt6wwesEaTCi/zXo7gFQvgcR1GUxx8FK85XrkEpUJJfybu9SGCbLZQVVvNGh0quVsmbhruF2qpGGA3mZreVFtcqcqaa0OtMqK/TK96Pyt2L6oio3JHoG2xLjmsOluOgq+d/AOobDPJyEW6EAo11/CsD9A2Glhgv0FArcByNRkVTDU0IRVX0jUaQFiiGJZRboW80KnZ2AGvhMN5tIrkdUjEKrGox6FpGg+Mob76LXuAYbYXvmrTUcQCArrHlvq/K3YfqiKjckTi7OSma/wasRZac3ZwENBxBFA58hAAu7vw1H5zdnGxdyNIsrh78x+Hk4qAor6IJZzf++hhOLg6gEpffCmoI1OBwcnFokTdwJ4FCWo4tVGTLQaBeiaNzy2gwGgJ7h+aLm7VksTBn1+b35dRCxwG07PdVuftQHRGVO5Kgjm0U74NSILADf0n2oI5tFEddCMMgqENrfo0OrWExK5sf12gYBEfwn4+g9q1lJ8PeqBHaiV8jsJ2/YieB0TAI7ihwrsL9FO0fsOaIBLfnL/XfEhqEEAS29eXXaNtKsQYIEBDsw1swLyDEu0XKznv7uvEWmvNr46koUbUJF1dHuAk40ioqqiOickcy8r7Bit/yKaUYed9g3u1DZ/ZV/KDlWA6J9/NrDJjYAw7OtpfTvhGW5TDqvkG823uPiYWrDZU4+TRGzx3Iu73b4Eh4+3so0uBYDmPu6c+7vUuPcASEtlI0zcRxFGPm9OXd3i4yEO0iAxVFwiilGCug0Sa0Fbp2b6tomokAGDurN+92H1939BjQUdmqGYZg7PSevNvd3J0wYEQXZatmGILRkxNaxKFRuXtR7w6VOxLfIB/0GtsNjMxET42WQd/xPdCqjTevjUcrdwyc0lt2MimjYRA/tCsC2/O/5Tu7OWHErP6yBwyGIYhICEe7qBBeG3sHO4y+d4BsDUIIgjsGILJnO14bjYbB2PsGKBrAfQO9ED+os+D3GHf/ANn7BwB3bxf0HhktaJN8b39FuUFOLg4YmBQnrDG7j6IIktZOg2EThHsNJc3opSiiRwAkTkoQtBk7tYeyVTOUYvRkYQ0VFdURUbljmfJcsvw6IiyHyc8midpNfHqU7AGDYzlMfnasqF3yI8OsYXQZYzjHUUz91xhRuzH3D4JGy8iKJlBKMfXpRNFQf+KsvnBwtJPtjEx+bLjokuyhk7rDxd1JdjRh4rzBoo3pBibFwquVGxiN7RqEAMlz+4n2tOk9rAv8Aj1lOYeEEIya1hOu7sLTGfF92yMk3FeWBsMQDEmKhbevm6BdZGwIOnYJlK3RZ3BnBATxvwyoqACqI6JyBxPVvzMe/exeWZ99/Mv70aVPhKhdh7i2eOa7B2Vp3PfvqUgYLvz2DQAhEW3w4k8PyVpuOe25Meg3TvyN0j/YB6/9+ggAYlvuAAGSHhiEYdP5pwGa8PJ1x1uLHgUhxCZnhBBg6NSeSLqff+qnCVcPZ/x74cNgNIxNzghhCPokRmPKY0NFbR0c7fHuwodgZ6e1SYNhCOL6dcTsZxJFbbV2Grz38wNwdLKzyeFhGILOcSF44CVx55NhGPz7+7lwdXO0yVFgNARtIwLwxOvJoraEELz1xQx4ernYrBEY6oPn3pkg+TMq/1xUR0TljmbiM2Pw6Bf3WtuZi0yhaLQMQIDHv7of458cJVlj1H2D8cx3D4IwRJoGgAfen47pL42TrDFoSi+8NP8RaDSMqEbTA3/mS+Nw39tTJGv0SozBG4sehcZOusaER4bhkY9mSHZeYvtF4N+/PwZ7e63ovH/TADxyZl/86/PZkjU6d2uLD5Y8Bgcne9HBr8mRGJAUh5e+mSu5CF67yEB8vPQxOEsYxJucrp5Du+CNH+4Xjbg0EdzOD58ueRTuXi6iDk/TuYnt3R7v/vwA7O2bXy1zK60DvfDZbw/Bx9dNgob1r3NMCD765X44Ssxd8vFzx+cLH4R/gKe443ZNI7xja3zyy/1wEViJpaLSBKEtse7vD8KWNsIqdzenj5zD6i824fA6ayVIQgg4jgPDMNZlpYSg/8SemPjMGET2Fo+ENEdu2iWs/moLDqw6fm3fBBxHwTDE2oGVo+idFI+JT41G9AD+XAchLp7Kx9pvtmPvyqNgLdbvz3EcGEJAYS1z3n1ENCY8PhLdhnSVpXHlbBHWfb8Lu5YfhcVkAaPRXD8eeq3DcNygzhj/yDD0HCEe0WmOwktlWP/TXuxYdhRGgwkaDQOOo9ZoCaxTY117tcf4eYPRZ3SsrBUepQWVWPfLfmxbehQGnQka7Q0axFqErVN8GMbdPxADk+NkaVSU1GD9goPYuuQoGusNN2tcO44OUUFIntsfg8d3k5V0WV1Rjw2/HcHmJcdQX6u7QcN6H7MWDmEdW2PcPX0xbEI3yY7OjdRWN2Lj0mPYtPQ4aqoaodEy1/NgCGPVCG7ri+RZvTByUoJkR+dGGur02LQyBRuXHUdlef3NGoSAZTkEBHlh3IxeGDUpAQ6Oza/GUflnYMv4rToiKncEJqMZhBDYiTwgK4ursXPhPhReKIGuXg9nNycEdgjAiLkD4d3aS1QDAO9yxSaqy2qx6/eDKDhXBF2dHk5uTgho64fhc/rDN8hH8LNmkwWUo7AXeQjXVTZg17LDuJJTCF2dHo4uDvAP9sGwWf3QOpR/aShg7fvCsZyoRkOtDntWHEPe6atorNPDwckevoHeGDqtFwLb8S9xbdJgWQ72DlrBAV7XYMC+NSm4cKoAjbU62DvawyfAA0Mm9UBIR/6l0wDAWliYzaw170RAw6AzYv+GdORm5aOhRg87Ry28/dwxeHw3tO0c2CIaJqMZBzdn4UxaHhrq9LCz08LL1w0DxsaiQ1SwsAbLwWyyiGqYTRYc3pGNUyl5aKjVwc5OCw9vF/QbFYVOMSGCn2VZDiaTBY4iGhYzi2N7c5B5/BLqa/XQaBl4eDqj7/Au6BIfKvhZjqMwGs2iGizLIeVQLtKOXEB9rR6EIXD3cEbvwZ0Q071ti2io/P1RHRGVOx5KKTIPnMXGn/YgZVc2zNecBAcnO/QaFYekBwejS6/2ih5WlFJkH7uAjfP34/iOkzBdK2Vt72iHhCFdkHT/QMT0i1CscTb9MjYtPIjDW09er7hpZ69F3IAIJN87AHEDIhT3zTl/qgCbfjuEA5syr1cO1dppENO7PZLm9kfCoM6Kl0jmnS3GpsVHsG9TFnQN1iqrWq0Gkd1CkTynL3oNjVRcSj7/Yhk2LzuOPRsz0FBn1dBoGUREBSN5dm/0GRop6oyKUXilEptXnsCuDRmou1aRVqNh0K5zAJKn98KAEV1EnVExSoqqsWVNGravz0DNtS65DEPQtoM/xk3tgYEju8LRUdmy7fKyOmxZl4at6zNQXdUASq0aIWGtkDylB4aOjIKTwqXhlRX12LopE5vXZ6Cyov66RmCQN5IndsPwxCi4CBShk0JNjQ5bt2Rh44YMlJXVglLr9E1AgBeSx8VjZGIU3EUSc1X+fqiOiModTequbHz74mJr/xINc9vKGI2GActyCIkIwFOfz0HXPh1t1jh5OBf/fXEJrl4ovb6/G2nSbdPWF49/PAPxA22faslJy8PXLy3H5bNFghp+QV549N+T0WtElM0aF08X4suXluFC9lVBDZ/WHpj32jjRZaXNkX+hFF+8shJnM/ObvR5NU1SerVxx/wujMXyi7csxi/Ir8eUba3AqJU9Qw93TGXOeGoax03vZrFFeUosv3l6L9KMXwWgIOPbmRxthCChH4eLmiJkPDcLEOX1sdkKrKurx5fsbcfxQLhhCbltx1aTh5GyPqXP7Yfp9/Wx2Qutqdfjyo804vC/n2hTkLRrEWqzPwdEOE6f3xD3zBtnshDY2GPD1Z9uwb/cZAOBdOWZvr0XyxG544JHB0NrohOr1Jnzz353YuSMbHEebrQtECKDRaDBmTAweeWyorCkjlTsT1RFRuWPZsfgQvnhqIUAhWrCMMAQMQ/DSTw9hwHjpg9/+dan4z2O/glIqWi+iaVntv76cg+HTxFeONHFs5ym8/9B8sCwnXpPi2lj3+HtTMHYuf0GvW8k4nIu3H/gZFpNF8hLjB15JwuSHh0jWOJ12GW888AuMBrPkpdIznxiGOU+PkKxx/nQhXn1gPnSNRskaE+b2xbwXR0t2FC5fKMXL835Ffa1ect2LUZO64cnXkyU7CoUFlXjx4YWoqmyQfByDE6PwwtvjJUeSykpq8cLji1BaUnObI8VH7/4d8foHU2AnMbekqrIBLzy9GFfzKyXdV4QAcQlt8e+PpsBBYiSprk6PF59fiosXyyRqEER2aYOPPp4G5xYsLa/y12HL+K2umlH50zi6JRNfPLkQlOft6FYoR8FaOHz04I/IPJAjSSNjfw7+89h8cFIcBOC6s/LF07/h+I5TkjTOpF7C+w/Nh8XCSiuMRa1/3762Evs3pEvSuJB9FW/f/xPMRulOCAD88uFGbF9xXJJt/oVSvPHALzDoTTbVa1nyzS6sW3BIkm3J1Sq8+uB86GxsYrh24WEs+2GfJNuK0jq88vAC1NXqbCq+tXV1GhZ8vUuSbU11I15+dBGqKuttOo6920/h/z7dKul+b6g34OWnfkdZSa1kJwQAjh06j8/f3yBJQ6834dXnluFqgTQnBLBGXzLTLuODt9dJOr8mkwWvvbpSshNi1aDIOVOEt99cA4tFWUsElb8fqiOi8qdgMpjx2WPzIaeYBqUUnzzyi+hDkLWw+OSJBZAb4/vsqYXXE1qFvsunT/9uHYxs1SHAl88vEexA26TxxQtLYTGzssrcf/PaStRdy1sQ4r9vrIHRYJJVZfTHDzeioqRW1O7/3t8IXYNRVtG4RV/vROHlClG7n7/YhtpqnU2DdxMrfj2ICzlFonYLv9uDivI62zUosGlVKrIz80VNlyw4iKKrVTZXMqWUYve2U0g5ekHUdtWy48i7VGbzcXAcxZGDuTi476yo7Yb16cg5U2jzNec4irS0y9i+XdoLgcrdg+qIqPwpHFiXioZanSwngXIUlcU1SN0p/IA6vuMUqsvqZA2slFLUVzfiyOZMQbvMw7kovlIhrxortbao37s2VdAsNysfl3KKZFd8ZS0cdq46IWhz5XwpslPzZA3egHW2aZtI5KW0sBopB87Jro7LaAi2LBc+jprKBhzYcVq2hkbDYPPKFEGbxgYDdm3Okn2uNBoGG1cIH4fRYMaWdemyrzmjIVgvchwWC4sNa1LlazAE61cLa3Acxdo1qbJfBgjBtc/fsRkDKn8AqiOi8qew4cfdivqUMBoGG37aI6zxyz5ZZbuvazAE63/ZK2izccFBZY3GCLB+/n7BB+3G3w4pWgFDKcWGBQfBcfyD8+alRxUdB8dRbFp8VLCz8NYVJ8AoWJHEsRTbVqXAcG0lUnNsW5emqG8My3LYtTETDXV6Xptdm7NgMlkUaRzck4Oqinpem/27z0AnEikTgmMpUo5eQHFRNa/N0cPnUVOtk6/BUWSfvIrLl8p5bdLTL6NEQqSMD0qBvEvlOCshSqVy96A6Iip/OBVF1cjNuKxowOBYDml7TkNXb2h2e0OtDlmHzsl+awWsD9qzqXmoKm3+QWo2WXB8xylFjcYoBfJzS1CU1/zDnFKKA5syFTUaA4CywmpcOlPIu33fxkxFxwEAtVWNOJN+mXf73s1Zihq/AYCu0Yis45d4t+/bckrx27PZZEHK4fP8GjuyZZXnvxGO43DswDne7ft3n1ZcV4NhCA7v5Z86ObAnR1FHYADQaAgO7OXP1zqw76ziZeQaDYN9EqaAVO4eVEdE5Q+nRuBN0FbqqprfV21lQ4tp1PJ83/oaneKBtYkanu9r1JtgNsp/+75Jo6J5DY7j0FDLHwGwhdoq/lwUKXkqUuA7VwBQU6X8uhNCBI+jmuc82oKGYQSjEVUV9YodKoYhqK3hP46qqgbl9y8hqKnhP47qmkbFTjSlFLUCGip3H3+4I1JYWIjZs2fDx8cHTk5OiIqKQmqq8By5yt0F24JZ8HxTAS2qwbOvltRgeY9D2UP8RviOg0pYOi1ZQ2BqhlUQnbppPwLnXemgB1iny4RWarSEBojIcbTQdbcI7KdFNOifdBysunLmn8Qf6ohUV1ejb9++sLOzw9atW3HmzBl89tln8PISLsWtcnfh6uHccvvydOH59xbU4Pm+bi2pwbMvJ1eHFit9zXccGg0j2sZeugZ/RUwX15apB+EqUHXT1U15RU6Oo4L7cRM4RskaLBU8DvcWuLc4SuEq0GTOvYV+h4Lnyt1R8fQPIQRuCqu5qvy9+EMdkY8//hjBwcH49ddf0aNHD7Rt2xYjRoxAu3bt/khZlTuM1qGt4OWnsCAdAQLb+cPDx7XZzV6+7ggI84XSMdwnwBN+wc33k3F0dkB4ZKCipFvA6tAEtfNrdhvDMOiS0FZR0i0AODrbo10kfx+WqJ7hipJVAWuJ+Yho/j4sMb3aKc4XIAxBZHwo7/a4FtAAgKhuYfwaPcIVD66UUkTF8R9HbLcwxRocK6wRExei+PfBshyi40J4t0fHhCie/mFZDtEx/Boqdx9/qCOyYcMGJCQkYMqUKfDz80NcXBx++uknXnuj0Yi6urqb/lT+/mi0GiTNG6J4AB/38FDeaAEhBMkPDFK0f8IQJN8vXC47+f4BipJuGQ3B6Dn9BHudJN/bX1HSrUbDYMTUnnBy4Y9IJM/pqyhZVaNhMCgpFu5ezUeoACBpZm9F0xoaDYPeQzqjlb8Hr83Yqd0VaTAagtie4QgM5W9mOGZSgqLBlTAEEV0C0b4TfxPAUcnxsvcPWKeXgkN9EBXLP4CPGBVtc5n2W/H1c0f3nvwvkkOGRMLJSVkfHw8PJ/TrL6+Dtsrfkz/UEbl06RK+++47dOjQAdu3b8ejjz6Kp556CgsXLmzW/sMPP4SHh8f1v+Bg4a6XKn8fEuf0VzTlYO9gh6EiJdiHTesFrZ38XhUMQzBihrDGwHHdBAd4MSgHjJrVR9Cm94gouHvzD/BisCyHMbP7CtrE9+sA3zaeijTGihxHZFwIQtr5yX4LZ1kOSTOFr0d4RAA6RQXJdnI5lmLcDOG+Nm2CvBHfU35UhHIU46f3FLTx8XVD34GdZEfCKAUmTOsp+Btzc3fCkBFdZUeQCCEYPzlB8Dw4Odlj1OgY2cfBMARJyfGSy9Wr3B38oY4Ix3GIj4/HBx98gLi4ODz00EOYN28evv/++2btX3nlFdTW1l7/Kygo+CO/nsqfiLe/ByY/OVL252e9mAQXkQ6drh7OmPncaNkaU54YAU9f4SkkRyd7zH1prDwBAiTd1x/+Qd6CZlo7DR54OUmeBCEYNrk7Qtr7C9oxDIN5L8s7DsIQ9B3RVXBapum7PPjCKFkaDEMQ36c9YnqGi9re/8wIyBn2GA1BZGwwevQXb6o499EhYBhis1PFaBi0i2iNfkMjRW1nPzAAWq3GZg2NhkFQqA+GjooWtZ0xpw/sHbQ2O1WMhsDP3x2jJTRVnDK1J1xcHGzXYAg8PZ0xfkI3mz6n8vfnD3VEAgICEBl58w+wc+fOyM9vvtyxg4MD3N3db/pTuXu4940JGDixO2wdNUbdOwBTn5E2oE17OhEjZwlHA5pj8MTumPOStME/+b4BmDBvkG0CBOg1PAoPvTlBkvmIqT0x8ynpjeUA68Af06c9nvpgqiT7/qOi8cBLY2zTYAgiooPxwmczJNl3HxCBx95IvvZhaRoMQ9C2Y2u89tUsSVG06IS2ePbfE0EIJA/iDEMQGOKDd76eLakhXaeuQXj5/ckghEiOvjAaAl8/d7z31SxJXWXbtvfHWx9NBcMwkgdxjYaBp5cLPvpqNpwkJCAHBnnj3Y+nQqO1TcPN1QkffTFTMBm2CT8/d3z00TTY20t3eDQaAicne3z8yXR4CUz3qdyd/KGOSN++fXHu3M1FfHJzcxEayp9QpXL3wjAMXvxxHiY+Ntz6/wVCxIyGASEEs15KwlOfz5E8rUMIwdOfzcKMZ0cDRFwDACY/PhzPf3uv5C6shBDMe3MC7ns56XqHYDGNMXP64fUf75fchRUA5jw7Co+8PQGMRnjwawq1D52YgH//+hDsbGilPvnBgXjmgynQ2mkEz3HTcfQd0RUf/fYwHByl5wGMnd4LL306DXZ21972eWSajqNbvw745LeH4GzDFNiwpFi8+eVMa+6NBI2obmH4YtFDcLNhJUn/oZF47+v/Dfh8p6tJo2PnQHy9aB68W7lJ1ujeuz3+8+2c6yuO+K570/UIDffFN78+CL/W/Hk0txITF4ovvr3n+ioavvu3aXqlTZAXvvn5PgQFC0fybqRT5zb477f3wNvbVVjj2r/7+rnj2+/mIjy8+SRulbsbQv/Aov4pKSno06cP3nnnHUydOhUnTpzAvHnz8OOPP2LWrFmin7eljbDK34vCi6XYsmA/ti48cFu1VFdPZ4y9fxBGzR0I/xD+JEIxSvMrsWXRQWxZdOC2Al7Obo4YNacfRs8dgDZhvrI1youqsXXxEWz+7RDqbimK5ejigJHTe2HMnH4IFpkqEaKqrA7blx/DxkWHUF1+c7E1e0c7jJjSA2Nm90VYBH8ypBg1lQ3YsSoFG38/clszOzt7DYaMi8fYWb3RvkuQbI36Gh12rU/H+t+PoLSw5qZtWi2DgWNikDSjFzpGBcnOJ2qsN2D3pkysW3IMRfmVN21jNAz6D++CpGk90SUuRLaGXmfEnq2nsG75ceTfUu6cMAR9B3VC0tQeiOkWJlvDYDBj385srFtxApfOl96sQYCefTsieUp3xHeXn7tiMlpwYF8O1q1Kwbmc4tu2J/QIx7hJCeiuYGWS2czi0MFzWLs2Faezb6/0GxsXigkTuqF3nw4tsvpJ5c7BlvH7D3VEAGDTpk145ZVXcP78ebRt2xbPPvss5s2bJ+mzqiPy94JSiovZV1GcXwmDzghnV0cEtfNHaMfWvJ8x6k04l56H+upGEELg7uWCjt3aCq4qyTtbhMLLFdA3GuHk4oDAsFZo26kNr73JaEZuxmXUVTWCAlaN2FA4CISyr5wvRUFeOfSNRjg62yMg2AftOgfwDiwWM4tzmVdQd616pZunMzrGhMDRmf+tviCvHFculkHXaISjkz3823iiY5dAXg3WwiI3qwC1VQ1gWQ6u7k5oHxUMF4FweWFBJS6fL0NjgwEOjnbwC/BAp678Az3Lcjh/6ipqKhtgMbNw9XBC+y6BgjUwSopqcDG3BI0NRtg7aNHK1w2R0cG8AyTHcbhwpgjVFQ0wmyxwcXNEu85tBGtplJfV4fy5YjTUG2Bnr4VPK1d0iQrmHbwopbhwthhV5fUwGc1wcXNCeAd/ePIs/waAysoGnDtbhPp6A+zsNPDydkFUVDDvShNKKS6dL0VlWR2MBjNc3BwR2s4PPgIRkOrqRuScLUJDgwEajQZens6IigoWTM7Mu1iG8tJaGPRmuLg6IKStL3wFlsPX1ulxOqcQ9fUG69SNhzOiugbBQSBSduVyBUqLa6C/phEc4gN/gShLfYMBp88WobZOD4Yh8HB3QnRkEBwFImUFBZUoLq6BXmeCs7MDAoO80KYNf00pnd6Ek2euovbai4qHmyOiI4Pg3EI1cFT+WO4oR0QJqiPy90DfaMTedWnY8OsBXMktuW17p7hQJN83AH1HxcDeQd6qFqPBjP2bMrFh0WFcbKaHSvsugUie2w8DRsfYNG1wIyajGYe2Z2PD4qM4d/L2ROnQDv4YN7sPBo+NhaOzvIeh2WzB0T05WL/kGE5nXLlte1BYK4yb2RtDxsbARWZRJ9bC4tiBXKxffhxZqXm3bQ8I8sK4aT0xPClWdkEwluWQevQC1q9IQeqxi7dt9/N3R/LUHhiZFAsPmcW6OI4iPeUS1q9OxfEj52/r6OrTyhXJExMwKikWXt78DoYQlFJkZl7B+rVpOHwo97Zlup6ezhg3vhtGj41FKxumWG7VOH26EOs2pGH/gbO3VZx1c3NEclI8xo6Jgb+f9CmWWzXO5pZg3aZ07N6Xc1uFVRcXBySNikHy6Fi0CfCUpQEAuRdLsX5LJnbsPQ3TLVV1nZzsMHZ4NJJHxSJEJCFbiEtXyrFuWxa27MmG8ZZmg44OWowa3BXjE2MQHio/kqnyx6M6Iip/GmdS8/D2/T+hvkYHQtBs+2+GIeA4Ct82nnjvt0cQ0oE/QtIc57Ov4s0HfkFNZQMIQ5qt49H0716tXPHu/AcFi3k1x5XzpXht3nxUltbxaxACSq3Rjne+m4vOAjUbmqMovxKvPrIAJVerr5+T2zWs/dWcnR3w1lezENNDfNXIjZQV1+DVJ35DweUKXg0QawqFvYMd3vjPNHTv28EmjcqKerz+zFJczC0BoyG8NU8IIdBqGbz87kT0H9LZJo3aGh3eeGk5crILwWgY3ponhBBoNATPvjIWwxPFV43cSEODAW+9sRqZGVeg0RDekvRNkZ2nnhmJJBvrfej1Jrz7/nocO34RGg3DW/OEYaz31iMPDcHkSd1tmtIxmiz48NPN2HfwnKgGx1HcP6cf5szobZOG2czi0293YNvubEENDUPAchQzJ/XAvHsG2DRtxLIcvv5lD9Zszby+HyGNiaNi8dQDQ9QpnTsU1RFR+VPIPJyLN+75ARzLSSr4xGgYODja4bM1T6NtZ/6plBvJSb+Ml+f8AIuZlaxhZ6fBR78/gk4SHYWLOUV4fvYPMBktkop8MQwBo2Hw/s/3I1qio1CQV45/zfkBukaTJA3CEBBC8PbXs9BDYnGnksJqPH3vT6ir1UvTINb/efWDKRgwvIskjcryejx53y+oqqyXVHStyTl9/s1kjBgbK0mjproRTz+8ACUlNTYVdnvi2USMm5Qgybah3oCnn1yE/PxKm4qVPfjQYMwQqW3ShF5vwrPPL8H5C6U2acya2QcP3DdAkq3JZMHzr63AqdOFNvUPmpgcjycf4S8QeCMWC4tX31uLE+l5zb5o8DFqWFe89FSiJA2W5fD2Z5uw/2iu5EbHBMDA3h3xzvNJiqvSqrQ8tozfqiupIourl8rwzgM/g5XohAAAx3IwGsx4bfZ3kjrylhZW440HfpHshDRpmE0WvPHAzygvqhG1r66ox2sPzofJaJZcaZTjKFiWw1uPLkTRlQpR+/o6PV556FfJTghgLYJFOQ7vPbsUec1Md92KXmfEy48tlOyEAP9rfvfx66twNvuqqL3JZMErTy1GdWWDZAehaeD6/L2NyEq7LGrPWji8/sJym50QAPjm8204fuSChO9E8dabq212QgDg5x/3Yt/eM5I03v9wg81OCAAsXnIEW7edlGT7ny+32eyEAMCaDelYsyFdku3XP+6x2QkBgK27svH7imOSbH9afAj7bHBCAGvkcN/RXPz4+0HbvpjKHYfqiKjIYuX/7YbJaLG53DnHcqitbMDm3w6L2q799QD0OpPND3KOo9A1GLFuofgDauPio6iv0dk86FGOwmS0YNV8cY1tq1JRWVZvc0l1Sq3dVJf9vF/UdtfmLBRfrba9bDu1nq/ff9wnanpg9xlcvlgmu6T6gu/3itocP3oe53KKZJW4JwT45fs9ooNyZsYVZGZckV22/acf94p+9uy5Yhw5ekG2xs/z94ue58tXKrBr7xnZnZTnLzoIo9EsaFNcWov1WzNtdkKaWLTiGBp1RkGb6ppGLFufIk8AwLINqaiuaRQ3VLljUR0RFZupr9Fhz9pU2b1KOI5i46JDgi3kDToTtq84IV+D5bBt+XEY9CZeG7PJgs3LjsseLDiWw6516Wio0/PasCyHDcuOyR4sOJbDwZ2nUV3ZwGtDKcX6Zcdll1LnOIqUI+dRUlQtaLd++Qn5pdQ5itNZBbh8sUzQbu3KFPml1Kl1hcnZM0XCGmtSFeUVlBTXIiP9sqDN+g3p0ChoXFhd3YgjR88La2y25lLIpVFnwr6D5wRtNm7PUjTtYTJZsGPvaUGbTbuzoaRPHsdx2Lw7W/4OVP5yVEdExWZ2rToB1iLPQWiitrIBR3ec4t2+b2MGDDp+J0IKugYjDmzO4t1+ZNcZ1NXoFGlYzBbsXs8f4k47ch7lt9TlsBVKKbavSeXdnp2Zj4LLFbLfWgGAIQRbVvNrXMwtwbkzRQob/jHYtCaNd/vVgipkpl1W1GBOo2GwQeBclZfX48jh88oa5TEE69fxH0dtnR579p7hTX6VqrF2Pb+GTm/Clh0neRM6pUAIwZoN/BpmM4sNW7MUd9NdvSGd1xFnWQ5rtmTIdtQBqwO6ZkuGomuq8teiOiIqNnM65fYlobai0TI408zS0ibOpF9W3KZeo2FwRiAv4UzGZWi0Cn8ChOBM+u3LcP+nka9Yg3IU2QIapzOuyG4y1gTHUZwUOFfZmfmKW8hzLCeYJ3LmlPLeUizLIauZZdFNnM2xPZ/iVjiO4mRW820qAOD8+ZLbls/K0Th9+vZl6k3kXa6A0Wjh3S4FSinOnS+FhWcAv1pUjfoGQ7PbbKGgqBoNjc1Pz1RUNaCiij/aJ5XyqgZUVCvfj8pfg+qIqNhMfU2j4oc5KNAoMKXRUCc96ZIPjuMEp00a6wyKj4NyFPW1AsdRr/xBDkAwctPQYACj1EuANamWV6PeILkEvhBC16OhwSh76udGGhv4cxIaBLbZgk4gWtdSGmYzC5OpeWejobFl7ivAev/8VRr1PA6KHOpb6Lyr/PmojoiKzdjSy4QXYu0yy4e1C6myQYkQAjt7fg2NVgMiq2/rzQhpaLVMCygIn3M7rcam1QZ8CF0POzsNaAuoiGoonAYArOecf1vLtJcXyjGxs2u5xyqfjlbTMscBWO+fZjVa6FwB/Ndd24I1QOyURjdV/jLUK6diM16+boqnTSgFPAUqVXq2clM83UAYAg+Bkt5ereRV47wRjYaBp4/Acfi4Kp5jZxgCb18RDQX5CIDVaRNqzubprVwDgLBGC3Vd9fLm34/QNlsQqhbr5dkyGm6ujryOiJeXvGq1t2Jnp4EzT5VgL5kVcW+FIQTuPBV8vWxoOiiGp3vL7Uvlz0V1RFRsZkBSnPJpE5bDgKQ4fo0xMYoTYlkLh4GjY3i390+MUpzgxrIcBoyO4t0+YERXxY4Ix1EMTOTX6DekM2+3WalQSjFEoDJpr/4dBaMZUiAEGDKyK+/2hB7hgr1KpGkQDB3Bf66io4Ph5i6vdH4TDEMwbDj/cUREBKCVQidXwxAMHRrJu71taCsEBXopytvRaBgMGdCJN/LY2s8DEe39FUUmNQxB357tePvceLg7IT6Kvy+RFBiGID4qBB4C/ZBU7mxUR0TFZroN7ARfBf0qGIYgMqGtYLfYLt3CENzOT/aDlhCCsIjW6BQXymvTrnMbREQHy89LIIB/oBfierfnNWkT4oO4Xu0UPWg9vV3Qe1An3u0+vu7oO6iToiiVs4uDYHVVdw8nDBnZVdGyVzs7LYaN5nd2nJztkZgUqygSxjBAYlIs73Z7ey2Sk+MVXQ+Oo0gScKI1Ggbjx3VTNICzHEVyEn85eUIIJo3rpmilFMtyGC+gAQATk+IV5VGxHMWEscIak0bHK3LWOY5i0hj+66Fy56M6Iio2wzAMku8fIPtBy3EUyff2F7QhhGD8vf1kZyVQSjHunn6i33HcnD6y8xIIgHGz+4gmcY6b2Vv2g5YwBEnTe0IjMl+fPK2n7CgVwxCMnthNtFlg8uTusiNIjIZg+BjxRn5JE7rJngJiNAQDBkfCS2SKZ4yAEyGqwRD07NVOsDMtAIxOjJFdR4RhCKKighAW2krQbsSQLnCQ2USSYQjahfuhk0BnbAAY3K8TXF0dZL0QMAxBm9aeiI8WbrXQp3s7eHu6yEq4ZgiBj5cL+iS0s/0LqtwxqI6IiiyS7x2AyIS2Nr+FE4ag/5gY9JfQd2TklB6I79vR5rdXhiFIGBiB4RL6jgwcHY0+w7rYHBVhNARdurXFmJm9RG17DozAsKRYmx/mjIZB+05tMGluP1Hb6G5hSJrSXZZGUFgrzHxwoKhtx8g2mDa3r20CsJ4r/9aeuO/RwaK2IaGtcN9Dg2RpeHu74uEnh4na+vt74LEnxO1u02AI3Nwc8fQziaK2np7O+NfT4nbNaTg52eP5Z0eL2rq4OOCV58TtboUQAnt7LV59frSoo+5gr8Ubz42FrXN/hFgjQ2++MFZUQ6th8PZzY0CIbSoE1ufJW8+OadGkV5U/H/XqqcjC3kGLt355EO27Bkp3FAiQMLATnv9itqSloBqtBq99MweR3cIkR18IIejaPRyvfj1HNIoAWKM7L34yDfF92ksexAlD0KFLEN76dg7sJawgIoTg6bfHo/cQ/jn/278XQWg7P7z7f/fA0an5ZMJbNR59YTSGjOLPiblNQ0PQJsgbH357j2ikoon7Hh2CsZO62aTh6+eBj76ZDXeJyY8z7umLqbOkNZZr0vDycsF/vpoNH4Fk2BuZMLE77r1fWmM54H9OyCefzRSNhjQxKjEajz4yxCYNJyd7/OejaQgO8pb0mUH9O+G5J0dYB3EJ9y/DEDg6aPHRO5PQrq2fJI1eCeF49V+jwDBEsoadnRbvvzYBnTvyT7/eSFzXEPz7hWRoNIyk37q16zKDfz+fhLiutnXBVrnzULvvqijCaDDhl/c3YNuyY7CYLNaplBvuKMIQUI7CycUB4x8YiFnPjJTkINyIyWjBws+2YtOSozAZzSDATXPjTR1eHZzsMHZWH8x9NtHmJcashcVv3+zC+t+OwKAzgRBy09x407NRa6fFqKndcf9zo0SnMm6F4zgs/Wk/Vi88BN21mhm3TgsRQqDRMhiWHIeHXxgFJ2cHmzQopVj122Es+/UgGuoM11u/36TBEDCEYFBiFB57YRRceVY0CGlsWJmC3385gNpqXbMaTc5p/yGd8fgLo2StiNm6MQMLftqPqsoGXg1KKfr0j8ATzyailcDKIj527zqNn3/ci7KyOl4NjqPo0bMdnvlXomQn5EYOHDyHH37ai+LiGmg05LaKq00a8XGheOapkQiS6ITcyLETF/HtT3tRcLUKGg1z2xSahiFgOYrorkF45vHhCA/ztVkjPesKvv5pD/KuVEDDMGC5WzSuHVtkRAD+9chwdGzvb7PGqbOF+PKn3ci9VNbsuWo6to7h/nhm3hBEdQq0WUPlz8GW8Vt1RFRahIZaHXavTsHm34+g9GoVTEYzHB3tERjui6S5/TFwXLykN3shdA0G7Fmfjs1LjqL4SiWMRjMcHOzQJrQVxszqjcHJcXCW+GbPh0Fnwr7NWdi09BgKL5fDYDDD3sEOrQO9MHp6TwwbFw8XN2UaRoMZB7afwsblx5F/sRxGgwl29lr4tvbA6MndMWJ8PNwULms0mSw4vCcHG5YfR96FUhj0Vg0fXzeMGt8NI8fFwdNb2coOi4XFkf3nsGFlCs6fLYZBb4LWTgNvH1eMTIrFqPHxkiMUfLAWDsePXsD61Sk4e7oQer0ZWi0DTy8XjBgVjdHj4uDnb7tzcCMcR5GScgnr1qQiO7sAep0ZGg0DD08nDB8ehbHJcQhQkJwNWJ239IwrWLc+DZlZ+dDrTdeiLE4YNiQSSWPjZDkgt2qczL6KtRvTkZp+GTq91aF2c3XE4AGdMG5MrGjeiRSN0+eKsG5zJo6lXkSjzgRCAFcXRwzs0xHjRseivcRIixBnL5Rg7dYMHDxx8XrTPBdnB/Tv0Q4TRsWhU3vh3BaVvx7VEVFRjMloxoEd2cg8dhH1tXowDIG7lzP6DIlEQr+OoqsnKKWiIVaz2YLDu3OQeuQC6mp1IITA3dMZvQZEoNeAjqKREykaFguLowfO4cSh86irtVYndXN3RkLvdug7uDPsRJakStFgWQ4njl7AkYO5qK3RgXIUbu6OiO8ejgGDO8NeJKFQigbHUaSmXMLB/edQV6uDheXg5uaI2LhQDBoSKbrsVYoGpRQZmVewf99ZVNfoYLGwcHV1RFTXIAwb1gVOIo6kVI1Tp69i974cVFU1wmy2wNXVEZGd22DE0K5wdRGOAEnVOHOuGDv3nkFlVQNMJgtcXR3QqUMAEod2hZuIIylFAwDOXSzF9r2nUVZZD6PRDFcXR3QI98OoIV1F62NI1bh4pRxb9mWjtLweeqMZrs4OCA9phbFDouAjEmVqerSL6VwurMSmfdkoLq+D3miGi5M9wgJ9kDSoK/wEauTYopFfUo1NB06jsKwGOoMZzo72CAnwRNKArmjjK+xIStUoKq/FxoPZyC+pQaPBBGdHOwT5eSKpfxcE+3sJflal5VEdERXZVFc0YM2iQ9iyKgWN9QYwGub6aoymsGgrf3ckz+iN5Bm94MhTDEmIuhod1iw+ik0rU6xOTjMaXj4uSJraA+Nn9pKcv3AjjQ0GrF16HBtXpqCmuvGmcHXTf7t7OmPspARMmNEL7h621yDQ60xYtyoF61eloLKioVkNVzdHjB0fj4nTesoqpmU0mrFhXTrWrU5BaWndTRpNIX1nZ3uMHhuLydN6opWMCITZzGLjpgysWZOKoqKaZjUcHe0walQ0pkzpgdYyIhAWlsPmbVlYvS4V+QXW6QOO40Dp/zTs7bUYOawrpk3ugaA2tg8cHEexbXc2Vq5LxaXL5bdpUEqh1WowfFAkpk3sgbAQH5s1KKXYeSAHKzak4tzF0ps1CAGFVWtov06YMaE72ofZHh2glGLv0Vws35SK7Nzi244DFAABBvXsiJnjuqOzzOjAwbSLWLIpFZlnr0LDEHDUqs00ZY1SoH+3dpiZlIDojvKmQI6dvIzFW1NxIju/WQ2Oo+gTE4bZY7qjW+dgWRppOQX4bWsKjp68bE06pwB3zdFjiHUJcY8uIZiVmIDeUWGyNFRsR3VEVGRx+XwpXn34V9RUNYguoSQMQdsOrfHe93MFq2XeSuGVSrz86EJUlNaJLmllGILAUB988H/3wM+GufnS4hq88sTvKL5aJUnDr7UHPvhmDgKDpYfGKyvq8cq/luJyXrno8l+GIfDydsHHX81CaFvpc/O1NTq8+vJy5J4tFq0XwTAEbu5O+OiT6eggsiTzRhoaDHj9jdU4da3hnJAOwxA4O9njww+noksX6QOTTm/C2++vw4nUvOv5PHxoNAT2dlq8//YkxMfy14C5FaPJgvc+2YQDR3IlaWg0Grz76jj06i592afZzOLjb7dh+74zt+UQ3abBEBBC8NZzYzGoT4RkDZbl8Pkvu7FuR1azOSu3alAArz6WiFGD+GvA3ArHUXy79ACWbEqVpMFRiufvG4aJw6UnQlNK8fPao/h57TFJGixH8eT0AZg1Wnr9FUopFm9Lw9fLD1zfBx9N32He+N54cFwvxe0jVMRRHREVmykqqMRT07+DrtEouR4Fo2HQJtgbXy5+BK4SqhqWl9biyVk/oLZGekM7RsOglZ8b/vv7w/CUEFGoqWrEk3N/QkV5vWQNjYbA3dMF3yyah1Z+4vdZfZ0eT877FSVF1ZJbvTMaAhcXR3w7/34ESHjb1+tMeOrxhbhyuUJyDRKGIXBwtMM3392L0DDxXACj0Yxnn1uKc+eKbdKws9Pg669mo0MHcYfHYmHxwmsrkHWqQLKGdUUEwZf/mYGukUGi9izL4fX31uJoyiXJxbesq0wIPn13KrpJcHg4juLdLzZh96GzkouINY11H7wyAf168Be9a4JSiv/8sBMbd520uX7O28+MwfB+nSXZfv37PizdnGajAvDyg8Mxbih/Qbob+XnNUfy09qjNGk/PHIiZo6Stylq8LQ1fLdtvs8a88b0xb7z0VVkq8rBl/FaX76qA4zi89fhv0NvghADWMu1F+ZX4/M01oraUUvz72WWos8EJadKoKKvHR6+ukmT/waurbHJCAIBlKWprGvHvF1dIGsg+eX8jigulOyEAwLEUukYD3nh+uSSNr77YZpMTAlgHS6PRjFdfWi6pPP73P+zF2bPSnZAmDbOZxcuvrOTtDHsj8xcdQuZJ6U4IYL1XOI7i5TdXo1FCd9Ylq47jaMpFmyqAUmr9e/XdNaip5e9s3MSaLRnYdVC6E9KkAQBvfrIBpeV1ovZb9p3GBhlOCAHw7n+3Ir+oStR27/FcWU4IAPznl13IvVwmanfs5GVZTggAfLVkP7JyC0Xtss4XynJCAOCndUdx9NRlWZ9V+WNQHREVpB+9iIK8cllVMzmO4sieMygprBa0O5NVgNwzRfI0WA4Zxy/hykXhh+Cl86XISrssq8Iox1KcO12Ic2eKBO2Krlbh6MFcWZVSWZbiyuUKZKRdFrSrrGzAnl2nZWlwLEVpSS2OHT0vaFdfb8CWLVmyyndzHEV1dSP27T8raKc3mLB2Q5psjcYGA3buOS1oZzazWLkuVVapc0opjEYLtuw4Jfpdlq47YbsArM4Iy3LYsCNL9LssWXdCVgVTek1ozbZMUdvfN6XIqmAKWCM8K7eni9ot3poqu4S+hiFYtk1cY8m2dGhkajAMwZJtqbI+q/LHoDoiKti49KiiPiUMIdi6KkVYY8UJRX1KNBoGm1YKa2xanaJYY6OIxub1GYr6lGg0DDasEn4Ibt2UKXv/gPVBu26NsMb2HadgsbCKNNauFdbYsy8HeoNZtgYIsHq9sCNz8Nh51NbpZUtQSrFmU7qgg3wiIw9lFfWyNTiOYt3WTJjN/Of75NlCXC6skt07huUoNu4+Bb3BxGuTe7kMZy6UgJMpwnIU2w/loK7BwGtTUFqNE9n5slsasBzFvrQLKK9u4LUpr27A/vQLgjkhQnAcxfHT+bhaViPr8yotj+qI/MOpqqjHiQO5irrpchzF5hUneAeMxnoDDuw4rajTLcty2L4+A2Zz89MBJqMFOzdlKdbYt/0UdDzTARxHsXl9uqIGXSzL4cihXFRXNfLabFSowXEUGelXUFJcw2uzYUOGooZpHEdx7lwJ8vLK+TU2ZyrqDkspUHC1Cmdy+KNUG7dmKnIMAaC8oh7pWVd4t2+4ljiqhLoGAw6nXODdvnHXKdlv+E0YjGbsOZrLr7FXuYaF5bD9cA6/xv7TijUAYPPBM/zbDvNvk4qGIdhwIFvxflRaBtUR+YdTWlitqLtmEw11et4BvLysTpGD0ITRYEZtdfPz+dVVDTAZxXMWxLBYOFSUN//229BgQGODeM6CGJSjKCup5dFnUVnJ/zZoC8UCjkhJCf82Wygq4p+SKyyqVuTsXN+PwHEUSFgZpVQjv1C5BsMQFJU2f80B4EpRlew3/Ca0GgZFpTW82wtKahRraBgGhQIaV0trZEdcmiAgKBSIVhSUVitycAGAo1AjIncQqiPyD0ev4w/l2rwvHkfE0IIafM7O3+44dDzHoW9Jjeb3xbIcLBKSWZVoAIChBRxDQPic6I0Kpn6uwTAEOiENJdNLTRpEWENomy3o9PzftVHfAk40BXQC0z86g0mx88lRKqihN5qh1PeklKKxBX9rKspQHZF/OE4yCpLxwVdeXU7RMz5ceCpvOotU5LQFvn216Lni01BYBv9GXHi+r0bDQGtjvx8+hM67o6O8FvW3aQicdydH5eeL4yjvuQIA55bQoBTOAtdWSN8WnJ34K+y62ti3qDkIET4fLk72imt0MAwR1HBysJOdcHtdgxC4OrXcM0NFGaoj8g8nIMjbWo1QIR5eLrwDta+/O7QipdSl4ORsDw+estae3i5wFHgIS8XOTsNbS8TF1RFuEuqliMEwBK15epdotRr4SahlIoU2Ar1LggJbpuS1UJfYkCCfFikcFRzIrxEW4qM4f0OKhtK8B46jCBaoH9M2SLmGheUQ0ob/OELbeCvWYDkOIQEC1zzAS/m0CUcREsB/rkIDvJVPJxMgpLWnsn2otBiqI/IPx9PHFX0Gd1a02oRhCMZO68E76Li4OmJwYpQyDQ3BqAndeB0ae3stRibHKV41M3R0NK9DxTAEY8fHK141039QJ3h48vciSRofr2gAZxiC7j3CBR2a5OQ42ftv0ugSGYgQgTLp48bGKRowCAHahrVChECl2ORRsYryNwiA1n7uiI3ibyU/bmSM4twKLw9n9O4Wzrs9eZhyDWcnewzq2YFfY3CUYg07rQYj+nbi3Z40oKvyfBoCjOnHXyl2TN9Ixc4O5SiSB0Qp24lKi6E6IipImtFLUTIpBZA4qbuwxrQeijQ4lmLMlARBmzETExSvmhk7Wfg4xoyLU7xqJmmS8HEkjo4Bo+CXyXEU4yYIV6ccPrwr7O3lT51wHMV4EY1BAzrBRcF0AKXAxGThkt99eraHl4BTJwoBJiYJO5fxUaEI8POA3LGPYQgmjIoVnA6L7NAa7UJayR5gGYZg3LBoODjwRwXDg1shumMbRTU+Rg3oIjjF08bXA72jw2RHXjQMwZAeHeEt0DTQx8MFQxI6KNLoHR2GgFZqte47BdURUUFMj3C07dhaVjSBYQgGjIyCr0gvmIgugYiMCZZVr4TREPTo1wFBIi3MQ8N9kdCrnawHLaNh0DUuBB06BQja+Qd4YsCQzrI0NBqCdh38ER3L//YNAF5eLhiRGC0rKqLREAQGeaF7T+EeKi4uDkhOipM18GkYAl9fN/Tv11HQzsFei8kTuskawBmGwMPDCUMHRwraaTUMpk/sIUPBmifg7OSAxGHCb8YMQzBrUg+bK54C1jLydloNkkYI92khhGD2hJ6yEj0Jsa5mmZAYK2o7O7mHIkd6ykjxSNrs0Qnya3xQihmJ8aJ2MxO7KaqHMnuU8MuAyp+L6oiogBCCd76ZA1d3J5scBUZDENrOD0+/NV6S/RufToN3K1ebHB5GwyAg0Asvvj9Jkv1L701C60AvGzUIfFq54o2Ppkqyf+6VsQgJa2WTM6LRELh7OOPdT6ZJcjCeeHoEOka0tkmD0RA4Ozvgw/9Ml3T88+YNQnRUsG0aDIG9gxYffTRVUkRlzsy+6NE93KbkQoYh0Go1+M97UwUTPJuYOqE7BvWPsMlxI4SA0RB89PYkeEjI+0keEYMxQ6Nscqqs/WyAD14Zj1berqL2I/p3xtQx0vqs3KhBKfDOv8Yi0N9T1L5/t3a4b2IvmzSaeP2RRLQLFu9hlNAlBE9M7y9L48W5Q9GlnfDLAAB0CQ/Ai3OGytJ4cmp/JHQWfhlQ+XNRHREVAIBfgCc+/+0htPJ3Fx+Yrj1gO3YJwsfzH5S8YsW7lRs+n/8AWgd5iSfIXtMIa+eHT3+5X3KSqLuHEz798V6EhPtKetsnhCAw2Aef/3y/pKZ6gHWlyKffzEGHTgHXBxtBDYbA198DX3w/F74SE1EdHOzw8acz0DU6+Nr3FLZnGAJvL1d8+c0ctJGYiGpnp8EHH0xBQkLbaxrCIgxD4O7uhC+/mIW2YdK6CGs1DP79+nj062PNXRBzSBiGwNnZHl98NB0REprqNX3m9efGYsS16InY/athCBwdtPjk31MQ3UW8qR5gPTfPPzYC4xJjJGvY2Wnx0asT0SOurSQNAHhy7iDMHNddsoaGYfDec0kYKJAbcivzJvfBg5N6X9+HmAbDELzxaCIS+wtHp25k9ugEPDl9gGQNQoCX7h2KiUOld/idNCQGL94z9FpESFwDAJ6aNkCNhtyBqN13VW6ivlaHDUuPYeOy46ipbIBGy1xvc08YAtbCITDUB+Nm9kbipATYC8xJ89FYb8CmVSlYv+w4Ksvqm9VoHeiJcdN7YvSkBDjKWNJq0JuwaXUqNqw4gdLiWmg0zPXESUIIWJaDr787kqf2wJiJCXBxtT2XwWS0YMuGDKxbmYLCq1XNanj7uCJ5UgKSJ3aTteLGbGaxfWsW1qxKQf6VymY1PDydkTwuHuMmdoOnpzRn6kZYlsOOHaewek0aLl0qa1bDzc0RSWPjMGFCN/j4iL/d3wrHUezaewZr1qfhbG7xdQ1KrQMuy3JwcbbH2FGxmDS+G/x8bf+9U0qx99A5rF6fhuycQmtUiFJwN2g4OtphzIhoTE7uhjY8K5fENA6duICVG9OQkV1wfYBr0uBYDvb2Wowa0hVTkrohRGA1jhBH0y9hxeZ0nMi6DIYhICDgKL3Wzp6DVqNB4sBITB3bDeESohTNkZJ9Bcu3puNIxiUQQkCajoNYtTQMwbA+nTB9VDwi2vrL0sg4dxXLtqXjQNpF68sFrNMvDLFef0KAIT06YkZivKRISHNkXyzG0h3p2JOSC0qt9yxHOTCEXOvDAwyIb4cZI+IRFyHN8VRRji3jt+qI/MNgWQ4mkwWOjnaCb8CshcWx/WeRcfQi6uv01jl7Lxf0GRKJqIQwwc82dYEV1WA5pB65gJTD51FfqwchgLuHM3oNjEBsj7ZgBDI2mzQcHOwE3xw5jiLjxCUcP5SLuho9KCjcPZzQvU8HdOvVTnAKg1IKg9EMB3thDUopTmZcweEDuair0YGjFG7uTohPaItefTtAo20ZjdPZV3Fw/znU1ujAshxc3RwRGxeKvv07CiZCNmnY22lFj/fsuWLs23cWNTWNsJitGtFRQejfP0JwKoZSCqPJAjutRnRa6PyFUuzZn4Oq6kaYzBa4ujiiS+c2GDygk2CyZZOGVquBVkTj0uVy7Np3BpVVjTCazHB1cUSnjq0xdGBnwdojtmjkF1Zh+77TKK9sgNFohquLA9q39cOIgZGCSbqUUhjNFmgZ8XouV0tqsG3/aZRW1MFgsB5HeEgrjBwQCXeeuj1NGM0WMNdyVIQoLq/D1oOnUVRWC73BDFdnB4QFemNU/0h4ugsnAkvVKKuqx5ZDZ3C1tAY6gxkuTvYIae2F0f0j4eMh7DybrrV1sLcTngqsrG3E5sNnkF9SjUa9Cc6Odgjy88SYfpHw83IT/KzZwoKjFA4iGirSUR0RlZuoKKvDlg0Z2Lo+HVWVDdffQoNCfDBuSncMHRmluCBYdVUDtmzKxOYNGSgvr7uu0aaNF5IndMOIxGi4ugk/OMWordVh69aT2LQxAyUlNdfefoDWrT2RnByPkYlR8BDItpdCfb0B23eewrqNGSgqrr6u4dvKHUljYjFmVDS8ZEQdbqRRZ8SO3aexdlM6CgqrwXEUBICPjyuSEmMwNjEarXyEH5xi6A0m7DqQgzWbM5CXX3Fdw8vTBWOGRyF5ZAz8ZUQdbsRoNGP34XNYtTUdFy6XX0+C9HBzwpghXTFuRAwCFdZqMJkt2HvsPFZtz8DZiyXXkyDdXR0xakAkJgyPEaydIQWLhcX+tItYuSMDpy4UX1955ersgMS+nTBxaAzCg+RFHa5rsBwOZV7Cip0ZyMwthOWahoujPYb3jMDkoTHoGOqnSIPlOBw9dRnL92Qi9Ww+zNeq5zo52GFotw6YMiQWXdpKm+7ig+MoTpzNx/J9mTh6+gpM1xonOtprMTi2PaYOikF0eICi5eeUUqSdv4rl+7Nw8PQlGK81C3Sw06B/l3BMGxiDbh2CFGtkXS7G8oNZ2HPyAgxNzo5Wgz6dQjG9fyx6dgxpkRo1/1TuSEfko48+wiuvvIKnn34aX375paTPqI6IMurr9Pj6P1twYG+ONSR6SyZ7U6Kbg4MW46f2wL0PDRZ8e28Onc6I/36xHXt2ZYPjcHvdiGvhWK2dBknjumHeI0NgZ2NxM4PBjP/7dhe2bz8JluWaXVlAiLVGR+KoGDz22FDBN+vmMJks+OHnfdi4JRMWC8ujQcAQYNjQLnj68eE2V0G1sBx+XngAqzekw2Rqvvw5wxCAAoP6R+DZJ0bATeSt91Y4jmLB8iNYtjYFBqP5+jW+VYNSin492uOFx0fC00bnjVKK39eewG9rjkOnN4EQctt1t04hUPSKb4uXHx0pKVnzVo0VWzMwf9UR1Dcar08X3IiGIWA5im5dg/HqI4kIkOFYrdt7Et+vOIyaer2gRmxEIF59cIRgoS0+th4+g6+XH0Blre76eWlOo0t4a7z+wAi0k+H07E7NxWfL9qGsukFQIyLYF6/fOwKdw2yfajl46hI+XroXRZV11/fXnEa7Nj54ffYwxLRrY7PG8bP5+GDZbuSX1whqhPp54tXpQ9Ejwvak04xLhXh3+W5cLKkU1Aj0dsfLkwdjQBf++i8q/NxxjkhKSgqmTp0Kd3d3DB48WHVE/gQqyurwwhO/ofjaG7coBOjeqz3e+miK5PoS1dWNePGZxbhypUKSBiEE0bEheO+jqZIH8YYGA158YRnOny+RrBHRKQAffzwNrhIHcb3ehJffWIVT2QWSlk8yDEHb0Fb47OPpkiMwRpMFr7+7FinpeZI1Att44csPp0mOjlgsLN7+dCMOHD0vyZ5hCPxaueGr96YjwF94+XUTLMvhw2+3Ydt+aR1QNQyBl4cLvn5nquR8CUopPpu/G2t2ZEnWcHNxxNdvTEH7UGlJtJRSfLv8IH7flCpZw8nRHl+9NNGmXIZf1h/DD2uOSLJlGAIHOy2+eHYC4jtJz2VYvCMNXyzfL02DEGi1DD57Yhx6dw2TrLH6wEl8sGQ3QCG6jJkh1gTXjx8ag8Gx7SVrbDmRgzcWbQcFFf2NEGJtjvfuPSMxukdnyRq7sy7gxYWbwXFUdPkvufY/b0wdhkl91OJntmLL+P2Hr5ppaGjArFmz8NNPP8HLq2XKSqsI09hoxCvPLEZJkUQnBAAokHrsIj7593pJnzEYzHjtxeW4ki/NCQGsD/9TWfl47+21kgqPmUwWvP7aKslOSJNG7rlivPnmapivhXSFYFkOb7+3Dtmnr0qu4cBxFHlXKvDyG6tglNB0jeMo3v9kE1LSL9ukUVRUjedfX8nbIO9GKKX45NsdOHhMmhPSpFFeUY9n31qBunq9pM/8d8E+yU4IYK3ZUF3biKffWYGqmkZJn/lx+WHJTkiTRl2jAU+9txIlFXWSPvPbphTJTkiThk5vwtMfr0F+CX/H4RtZtTtTshMCWK+HwWTBvz5fi4tXKyR9ZtORM5KdEMCaKGq2sHjuv+tx5nKJpM/sTj+P9xfvBpXghDRpsCyHF3/YjPTzVyVpHD59GW8s2g6OijshgDXKx1GKNxZtx5EzlyVppF24ihcWbAbLcpJqkNBrOv9evgu7sqT/rlRs5w93RB5//HGMGTMGw4YNE7U1Go2oq6u76U/FdpYvOoyCK5VgWduCXZRS7N99BscO5Yrarll1AudzS8DZqMFxFMePXsDe3adFbbdszkR2doHNBZg4jiIrMx9bt4gPZjv3nMaJ1DxZGudyS7Bmfbqo7aGj57H/cK7N5c5ZjuJKQSWWrjohapuadQVb92TbXBCL5SiKS2uxYPlRUdvs3CKs2iJ+vM1pVFU34ofFB0VtL+ZXYOHa4zZrcBxFfYMB/120T9S2qKwW3y0/ZLsGpdAbTPh0wR5R24qaRnz2+16bNZoSWT/4daeobb3OgA8WidvdrmGdJnz7l+2i96TeZMbbC3fYXJTOOohTvDF/m+hvy8yyeGPRNlAZJeMoKF5fuA1mVvilg+MoXvt9GzjK2axCALy5ZAf0JuVdmFWa5w91RJYtW4b09HR8+OGHkuw//PBDeHh4XP8LDg7+I7/eXYnJZMGmtWmyqycyDMH6lSmCNizLYf2aVNl9RBiGYO1qYQ1KKdaskf7GeiuEAGvWin/HNevSZCe9UUqxdkOaaHRn9cY02UlvHEexfkumaHRnzeZ02SWvOY5i886T0Au0XgeAtVszZWuwHMWOAzmoazAI2q3ZoUxjf8oFVFQ3CGvsyZLd6JHlKE5kX8HV0hpBu/X7T8mqkgpYr8epC8U4n18uaLfp8BlJUb9mNSjFpaJKnLxYLGi3PeUcGg0mWVVlOUpRXFWPYzlXBO32ZV1EdYNe1vmiFKhu0GNf1kVBu6PnrqC4ul6eBoBGgwnb08Vf0FTk8Yc5IgUFBXj66aexePFiODpKm6t/5ZVXUFtbe/2voKDgj/p6dy2H9uagoV74YS8Ex1FkpOahsKCS1+bEsQuorBB+2ItpnMspxvlc/tBwVlY+CgurZT/MKQUK8itx6hR/aPhcbjHOXyhV1JitvLweKWl5vNuvFFQi86TtUZ0bqa3T46BA3kdpeR2OpFxU1NBMbzBj98GzvNtr6nTYdfisIg0Ly2LbPv5IWKPOiC37TytrzEaBjXtO8W42mixYt+eUouvBMARr95zk3W5hOazanSm7BDlgzUlZvYc/okcpxbLdGbL336Sxck+moMbSPRmKGsxpGILle/k1AGDpvkybKu/eCkMIlu0X0Tgg38G1agBLDig73yr8/GGOSFpaGsrKyhAfHw+tVgutVov9+/fj66+/hlarBdtMKM3BwQHu7u43/anYxoE9ObLf9ppgGIKDe/kHpf37zipe1qbRMDi4P4dfY/9ZRZ10mzQOCGkcPNciGvsP8J+rA4dzFZ8rhiHYK+AkWPNClGkQAkFH5HDqRUUNBQGrc7jzIP/1OH7yCow8q4mkwlGKHYf5jyPj7FU0SMi5EdTgKHYc4dc4c6kElbU6RRosR7Hj2Dne7ReuVqCwvFZWpOJGjV2pubxOWVFlHc5frZD9MtCkcSg7Dwae61rdoEfGxUJFThtHKdIvFKK6ofk8J73JjEM5eYocXI4C5wrLUVhZK3sfKvz8YdVbhg4dilOnbn4zue+++9CpUye89NJL0GhsW8KpIo3KivrrVUrlwjAENdX8iYXVVQ2KW30DQE0N/8O6ploHjlM68FFBjeoaHaSl3/HDshyqBQad6prml2zaAnctx4JXo1YHDUNgsTFf50YohSQNpW3khRJWq2t1IFB6RSB8PeqUOQhN1PIMegBQ1UIaDXojWI6DppnCflX1LaNhYTnoDKZmO+q2lAalQG2jHo72t6/+aikNAKhu0MHL9fbqxbWNBkXO1I1UNegR6CNthZmKdP4wR8TNzQ1du3a96d9cXFzg4+Nz27+rtBysRdngLWU/lj9Bg69eiC1QSq8XjvqjNADAYhbWUDyyArAI5AIojVRc17AIaVyr7NYCjhvvNo5Ds4VPWlKjhc6V6HG0ECzbvCPSUscBgPc38qdotOC54tOwtKiGvJwcFWHUpnd3GR6eyiqLAtZxQKgKqoeHk6Kqhk0Iabi5OSqeNmEYRrAgmJubo6K5acBat8TdXUDD1bEl/BB4ePD3qXF1cVQU2r6uIdALx9XFQXGECoDw9XB2UJSv00Rzb/dSttmCixP/ftxaSMNOq+Eta+7uoqxK8Y3wnRN355bTcP9TNJrfl7vAtWopDRVl/KmOyL59+yQXM1ORR3R8qGIngWU5RMeF8mvEhCgeMFiWQ3QMf1XE6JhgxW9kLMshOpp/5VVMVLBgxEQKlFJERwlrKD0OQghio/jPVVxUsOKpMoYhiI/mv+ZxXYIVR480DEGCgEZMZ+UNyTQMQXeBcxXVoY3y/CaGoFsk/zXvHOYv2ntFDIYhgkXN2ge2grOj7Q0nb9IgBF3DA3h76gT7ezY71WELhADhAd5w5XEG/DxcEeCtrJ0BAAR4u8HPo/nqvW5ODgj391aYRQV4uTohxNdT4V5UmkONiNxlJCbFKUpWJQQICPRCbEIYr82wkVGSq6/y4e3jil69+VuXDxrUGS4K+9+4ujpg4CD+qot9e3eAp8IIkoODFiOGduHd3i02FAH+HooeghqGYPQI/sqOkR0DEB7aStHqBkqB5JExvNvDQ1ohqpOyQZzlKCaMjOXd3sbPA71iwhRrTBoZx7vdx9MFg7t3ULSCguUopoyI5d3u5uKI0X06K9LgOIqpw/mPw9HBDuP7RynToBTTh/Fr2Gk0mDwwWlHUkFJgxpA43pcjhiGYPjBW0b1LCDB9YBzvfUMIwcyBcYoikwwhmNYvBnZqbuMfguqI3GV4ebtgwJDO0Gjk/7LHT+0hGFVxdXXEiMQo2VMnhCEYPzFBsK+Ng4MdxoyJlT0oMQxBUlK8oMOk0TAYnxQvW0OjIRg1IhrOAqF4hiGYmBwve1GLhiEYMrCzYD8YQggmj+0mO2LBMAR9e7SDXyvhN9PJo+MV1aeJ7xosWuZ98qg42RqEAJ3C/RERLtxHZcrwWNlJtwRASGsvxImUYJ80NEZRYq+vlyv6RIcJawxSpuHu4ogh8cIl2Cf2V1ba3MneDqN6dBK0Se7Vpdk8GKloGQbJvSIFbcYkdIKjws66E3urZd7/KFRH5C5k9v39obXT2jxFw2gYBAR6YeQY/jfjJqbN7ANHJ+HW9c2h0RC08nHF2HHxoraTJne35nHYqMEwBO7uTpgwMUHUdnxSHLy9XGx+s2QYAicne0yb3EPUdszIaAT4e9isQQiBnb0Wc6b3ErUdPrAzwoJ9ZGgAWg2D+6b3EbUd2LMDItr5264B67E8NLO/qG2vmLaI7RwkyzkkIHhMgkZMRCD6xITJetOnAJ6YMUD0t9UpzB/De0bIniZ9atoA0cE5tLUXJgyIkh1NeHJSP94clCb8vdwwa5j4b5WPR5N7w9lRuK+Up6sTHhwp/jvi44GRPeApMoXk7GCPx8eI3+N8zB4UD39P2xo3qkhHdUTuQkLCfPHOx1Oh0TKSH+gaDQMPDyd8+NUsOEuYEglo44n3P54GrVZjgwaBs4sjPv58JtwFEiObaNXKDR99PB0ODlrJGgxD4Ohoh4//Mx0+PuIPDg8PZ3zy4TQ4OdtLjiIxDIGdnQYfvTsFrVuLL+VzcXbAZ+9Pg7u7k00aWi2DD9+ciJAgH1F7Bwc7fPr2FHh7uUp2FKzNyRj8+6Vx6CASRQAArVaDT1+biNZ+HpKvB7F6IXjz6dHoGiHejZVhCD5+YRxC23jbpgHg5YdHICGKPwflf/YE7z05Fh3D/Gx2eJ69ZzAGdGsnyfbNB0cipkMbm52Rxyb3w8jewlGEJl6aNQS9uoTZ7IzcN7oHJgyMlmT71MR+GBrX3uag3rTBsZKdmIdG90JST+nN65pI6hWJh0aLO+oAMGdQPKb3F3/JuhECYFhMezyT3M/m76YiHdURuUuJ7xGOT7+95/rKFN452msDY3CoD7759UEEtJHemLBrVDC++vae63kWfA/1pn9vHeCJb3+8DyGh0tucd+zYGt98Mxc+16YNxDR8fd3wzbdz0b699DbnYaGt8P3Xc9H6WgdaMQ0vTxd888VsdIkMlKzRJsATP3x5z3WnQmg+G7CuLvn64xmIjxUfWJvwa+WGHz+bjXZt/QCA1yFpuhWcne3xxb+nok93aQMrAHh5uODHj2ahS8cASRqODnb4z6sTMLSvtIEVsOZY/PDuDHTrEiyqQQhgb6fFB88mY+xg6WUBnB3t8d1rU9E3tq2gRpOOnVaDtx5JxNQR/DkVt+Jgr8V/X5iE4T07StAg0GoYvHLvMNybJD06oNVq8MWT45DUt6uoRlNX3H9NG4jHJ0kfWDUMg48eGoNpg2MlaRACPJbcBy9OGyTZCSOE4O3ZI3HfiO4g4P994No2AuC+Ed3x9qwRNmm8PGkwHh/dB4RAMCLWdIzTB8TiP/eOUTR1pCIOoS2xXu4PwpY2wirNYzSYsX/3GaxbeQIXzt1eUr1Hn/YYN7k7uvVsJztXwmSy4OD+s1i3OhU5Zwpv2x6f0BbjJyagZ+/2svNKLBYWhw/lYu3aNJw6dXvp/+iYEEyY0A19+nSAVuaKBZblcPT4Raxdn4b0zNv7Y0R2boOJ47qhf9+OspN1WZZDSnoe1mxMx/HU20vDR7T3x8Rx3TC4XwQcHOStiuA4irSTV7BmczqOpFy8LXckPLQVJid1w9D+neAkEjbng1KKzDNXsWZrBvYfP39bXkdooDcmj47HyIGRcHaSr5GdW4TV2zOx+1jubauPAv09MGVUPEYNiISbguWsZy6WYPWuTOw4ehbmW2rbtG7ljikjYjG2fxd4uMlfQZJ7pQyr92Rh8+EzMN1SE8bXyxVTh8UiaUBXeLvLT56+WFiBVfuysPHQ6dsqmXq7OWPKkBiM7x8FXy/5UwxXSquxav9JrD10CrpbOk97uDhiysAYTOwfhdYKVsJcrajB6kOnsOrQKTTob66C6+rkgMn9ojCpXxSCWnnK1iiuqsPqo6ew4tBJ1Opubonh7GCHib2jMKVvNML81I7xcrFl/FYdkb8pNTU65JwtQl29ARoNAy9PZ0R1DRIcIK/klaO0pBYGvQnOLg4ICWsFP3/+qYX6egOycwpRX28AIYCnhzOiuwYJDpAF+ZUoLq6BXmeCi4sDgoK90TrAk9e+odGIUzmFqKvXgwDwcHdGdJdAwQGysLAKRUU10DUa4ezigMA2XmgTyP/A0OlNOJlTiNprre7d3RwR3SkQLgJJpsUlNSi4WgWdzgRnJ3u0bu2BkGD+KRKD0YzMaxocpXB3dURUxzZwF6ibUVpWh/yrlWjUmeDoYIfW/u4IC+GPFhlNFmSeu4qaOj1YjoO7iyO6dgiApxv/4FVeWY/LBZVobDTCwcEOfq3crq2wad7pNFtYZOUWorJWB5bl4ObigMjw1vDxcOHVqKxuxKX8CjToDHCw18LX2w3tw3x5NSwsh6zzhaisbYTJwsLN2RGRYf6CA2R1nQ4Xr5SjrtEIBzsNfLxcEdHWj1eD5TicvFiE8ppGGM0WuDk7IiLYFwE+/M+R2gY9ci+Xo15ngJ1WAx8PF3Rq68/roHMcxcm8YpTV1MNgssDNyQEdg3wR2Ir/N9WgMyLncinqG60anm5OiAxvzfvGTSnFqSslKK6qg8FsgaujA9oH+CBUYIBs1JuQc6UUdY3WZ4OnqxO6hPnzOuiUUpwpKEVhVR30JjNcHO0R7u+NcH/++11vMuPM5VLUNhrAEAIPV0d0DWvNu2yZUopzReXIr6iBzmSGi4M9wny90CFA4H43W5B9uQR11xwFd2erhoNAXsv5kgrklVej0WiCs4MdQnw80SmA/140W1iculKCWp0BHEfh4eKILiH+cLLnf8ZdLKvEpfIqNBhNcLK3Q5CXO7q08W+R2kp3E6ojcpdCKcWZnCKs25COvfvP3vaG6ObqiKQxsUgaEyspd4GPs7nFWLcpA7v2nrmtiqqzsz3GJsYgeUwsgmyYxrmVC3llWLc1E9v2nIbplrc3Rwc7jBkRhfGjYhEqIT+Cj7yCCqzbloXNe07BYLxZw8Fei1GDu2BCYizahfrK1sgvqsLanSexcc8p6PQ3d6+102owsn9nTBoZK7qSQ4iislqs3ZOFdbtPov6WPilaDYPhvTth0rAYdGkfIPthWFpZj7V7T2LNnizU3tIhV8MQDO7eAZOHxSK2Y6BsjYqaBqw7kI2VezJvK4POEIKBce0wZWgsEjoFy9aoqtNh/eFsLN+bhfKamxszEgL07doW0wbHoldkqOwIYG2jAeuPnsayfRkorqq/bXuvziGYPjAW/bq2lR3Sr9cbsfHEGSzZn4mCiprbtie0D8KMgbEY1LUdbx0QMRoNJmxOy8GSg5m4VFp12/bYsADM6B+HYdHtZddF0ZvM2JpxDosPZSC3uOK27V2C/TGrXyxGRHcUdDCEMJot2HYyF0uOZiL7ault2yMCfDGrTyxGx0QIOhhCmCwsdp25gN+PZiCz4PaOxeG+3pjdOxZJMZ3h4iAvAni3oToidyEGgxnvfbgRh4+eh0bD8BbJYhgCSinm3T8Q06f2tOmBbjJZ8PEXW7F7Xw40GmIt682jwXEU98zojfvm9LNJw2Jh8cX3u7Bxx0lBjaa+JtPGJ+DRewfZNGiwLIdvF+7Hik1pgv1RmraNGxGDf80batMDnVKKH5cfxsI1xyVpJA7ojFceGWnTA51SikUbTuD7FYdABPrVNGkM6t4Bbz82Co42PmyX70jHl4v3AwSiGr2iwvDBE2PhYuN0y/oDp/Dhol2gFLxVYJs04joG4dMnk22uHrrtxFm8vWA7LCzlLbjXpNElzB9fPTkeXgLRpObYm3URr8zfApPFwrtcukmjfZtW+PaJCfCzcbXFkZwreG7+RuivTX00J9OkEernhe8enWBz/5PUi1fx9C8bUK838vb3YQgBRynaeLnju0cmoK2f8NLrW8nOL8Fjv6xDdaOet3J/k4avuwu+e3ACItrY9lJwrrgcD89fi/L6xuv7ul3D2rTOy8UJ3907HlHBrW3SyCuvwryFa1FUU8er0XQO3Rwd8O3sZCSEKS/O93dHdUTuMgwGM557cRnO5hbbVGNh+tSeePjBQZJszWYWL72xEhknC2yqmpo8Jhb/eny4JGfEwnJ4/YN1OJp6e96CEMMHReK1Z0ZLckY4juLfX23GLoFOsrdCCNCve3u890KypBwWSik+/nEnNuzmbzffnEZC1xB89spEyTksXy/ehyVb0iRrMISga4cAfP3KZMnOyM9rj+KntUelazAE7YNa4YfXp4kuy2zi922p+GrFAckaGoYg2N8Lv7w6XbIzsnr/SXyweLfkpnkahsDf2w0LXp4OH3f+aacb2Xw8B28s3AbYoOHt5ozfXpwBfy9pORO7sy7g+fmbAFBI+alrGAJ3J0f89ux0BEus+nnk7BU88dM6cJRKag2gYQic7O2w6KlpaC8wlXIjaZeu4qEf18DCctI0CIG9nRa/PjYFXYKkRQ+zr5bg3h9XwWS2gJWgwVxLCP75wUnoFiYt0fx8aQVm/bgcepNZsgZDCL6bMx59O0hPNL8bsWX8VlOB/wZ8+J9NNjshALBsxXFs2JQpyfaz/25Hxsl8m0u3b9iciRVrUiTZ/t/8vTjaTPKkGDv3ncGCZUck2f6y7LBNTghgfVM7dOIC/m/Rfkn2izek2OSENGmkZufjk593S7JfvSvTJicEsEYass8X493vt0my33L4jE1OCGB19C5crcCr32ySdK/sTTtvkxMCWCuX5pdW4/n/rpd0zx89fQUfLrGeV6m3FstRlFbV4+mv18Es0OyviYwLhXhz0XZQGzWq6nV49L9rbksebY6cglK8tGAzKJXmhDRp1OkNePj/Vt+W2NkcF0sq8cz8DWCpNAehSUNvMuPh79egppG/63ATVytr8fgv6yU7IQDAUgqj2YJHflyDstoGUfuyugY8PH+tZCcEsP4+LCyHR39dh6tVtaL21Y16PLhgDfRmaU5IkwZLOTy5ZAMullVK+oyK6ojc8Zy/UIoDh3JlV5ucv+AAzAKdWwHgamEVtu3Mll2Zc8Hvh6E3mARtyivrsWZThuwyy4tXH0f9LbkLt1Jbr8fitSdk7Z8CWLk5HRVVwg9BncGE+atsG7yva1Bg455TuFpSI2hnMlvw/YrDsjQ4SrH7eC7O55cL2rEch2+XH5SnwVEcPXkZpy7cPld+I5RS/HeVfI2M3EIcP3P76qVb+WbtIVkaLEeRk1+G/VkXRW3/b6M0R7g5jbySKuxIOydq+/3WY+Aotfk3wnIURZV12HDijKjtL7tSYGZZm3/rLEdRWa/DyiPiDviC/WkwmM02N2LkKEWd3oglhzNFbX8/nIE6vVGyg3CjhsFsxoKD4k7+8pSTqGzQ2Vy9llLAzLL46YC0FzQV1RG541m3IV1RufbaOj0OHckVtFm/OVNRfw+9wYw9+4WjEBu3Z8kucw5Yc0u27s4WtNm8O1tZW3EKbNol/KDdcTDntsRXW2AYgvW7sgRt9p44j/pGYadLCA1DsEZE40hWHipqGhVprNqVKWiTfu4qCkprFGms2C2sceZyCc7ml8kvb08Ilu8R1sgrqULq+auyOxwTAizZmyFoU1Jdj/2nLykq2b5kf6ZglKq6QY9tGedka3CUYumhTMHfWKPBhHUppxVprDh6EiYL/2/MaLZgxfFTsq8Hy1GsTT2NRiP/y5OF5bD0eJYijS0nz6FaQgRJRXVE7mgaGgzYufs0b0KnFBiGYO36dN7tRqMZm7adVNS9lRBg9Xr+NwyLhcXaLZnKOsRSYPWmdN4HLcdRrN6SrqhDLEcp1mzLEOzIu2JrhqIGXRxHsW7XSRgFQvUrd2QoajTGchSbD5xGo44/VL9ypzLnk+Uodp3IRfUtq19uZMXuTMXN5Q6fvISSyjpem5X7Typu/JZ+vhB5xfxh9FUHlWlQCpy7Wo7Tl2+v43Nd48gpEAWeOgVQUFGDE+dvr7HTxNrj2TZHEG6loq4RB87cXv+miU3pOTCZ5TvqgHXF0I6T53m378g+j3qD+DSUEAazBZsycni378/NQ3m9fEcdsE43rUk/rWgf/xRUR+QO5sLFMtFpFTE4juL0mULeAfxyfiX0euFpFTEoBS7llfMOrsWltaitU/ZmQEX2U1XTiNKK25dS2kpVjQ6l5c0PfDqDCZevVipydgCgUWdCftHtyyWBa9frYrHsN7EmTGYLzhfwT89k5RYqcwxhXZ2Uk3f7cskmMnKvKnrDB6z31qmL/FNAaeeUawBAloBG6nnlGgwhyLxUxLs9/YL8iEsTGkZYIzOvyOYcsFvRMgwyLt1etLCJjMtFipzo6xp5AufqciG0CiudMoQg44qAxhXlGqAUGVf4z5XK/1AdkTuYBpGcCKlwHIXBYG52W0ODsjeLm/fV/Petb2w5Db48kQaBt3+bNXimRRpa8jh49qUzmBQ7OmIaLMdJSp6UpsF/jzaK5A1J1hC4tlISNMVgGHK9aFZzCG2zRUPoOG6t7ikHQgjqBc5HjYLpvhsR0qjTGRRHXThKBTXq9UbFThtHKWoFrke93ig7n60JCqC6Ba7rPwHVEbmD0drJKyLU7L54loza2bXcLcCrof3jNeQWdWoOvlofLavR/L7+jONoWmLYEggtRW6p/hxC50Ruy4AboZTCTsN/HHYtosF/PQBAK6BvC4LH0RK/QyJ8HHZajZJUMKsEET7ndpoW0ABg/wcfB0Q0VP6H6ojcwXh5SqtvIIaTkz3seJyaltLQaBi48nTt9RIoD24L1hLwzff78HR3bpEHBwB4eTRf5Mrd1VFRXsXNGs2fEwd7LRxk9rG5Fb6+JYQQwdLzNmnwnCsANhcL48NHQMNHQW+WJigV3k8rd1fF9xbLcfAS6FXj5+Gi2DnkOApvgXPu6y69MzMflFJ4uwpcD1dnMIodUCKs4eYMRYlaABiGgY+Qhouz4mksDSHwdWuZZ9/djuqI3MF0aO9/vSOsXDQagmFDInm3BwV6ISy0laLftUbDYFC/CN6301Y+rugS0UbRIK5hCHolhPM2UHN1cUD32DBFD1qGIYjpHAhvHudMq9VgYI8OijQIIegQ6otAnutKCMGI3p2UaQAI9PNAB4HS9Yl9OiselHw8nBHVvg3v9lG9OykeXF2dHJDQOYR3++ienZWOSXCw06JP1zDe7aO6RygO02sYBoOi+bscJ8ZHKJ5uoKAYFtOed/vIuI6Kc11YjmJkXEfe7YmxEcpWrsHqtCXGRfBrRHdsGY1ogeOI6qh4iomlFKOi+DVU/ofqiNzBMAzBhHHxih60LEuRPJa/dTkhBJPHdVOUl8CyHMYnCbdHn5wUryg5kuUoJo6JF7SZNDpO0YOW4ygmiWhMToxVpEEpxZRRcYKVaCcNV6YBAkwdGS+oMXFItCINhhBMHhYrOG0yYWC07P0D1vt/4qAowR4kSX26KJoC0jAESX0i4erE3wBxdI/OcFQQpdIwBIkJEYLRiuGxHeAm8B2kaAzsEo4Ab/4KlgMi28JXYhXZ5mAIQUL7IMFS793bBSHEx1N2BIkhBJ0D/QSrq3YNao3ObfxkPxcJgFAfT3QP5y/DHu7rje5hQYocaT83Fwzo2Fb25/9JqI7IHU7iiCjZbe0ZhqBzpzZo385P0G7o4M5wcpLXDIphCMJCfNA1Urhk8oDeHeHh5iSrmRnDEPj7uqN7XJigXa+4tvD1cZX18CCEwNPdCQN68L9RAkBs5yCEtPGSFd0hAFyc7DGsbydBu05t/dE5nL/rq5iGnVaD0f35o2AAEBrgjYTIYNlRKkKAcQOjBG38vd0wIDZcduSFUirqzHi5OWFk9wjZGixHMXlgjKCNi6M9xvXuKvtcsRzFVBENezstpvaLlj3wsRzF9AGxgjYahsGM/rGyNThKMbO/sAYhBLP6C7+UiGnM6iesAQCz+8Yqenma1Vf4ZQAAZveOlR2lYgjBzF6xLZYndbejnqU7HHd3J7zwr1E2f44QAkcHO7z0vPhnnRzt8epzY2RpaLUavPrCGNEftZ2dBm88P8bmNyVCrD/qN58bIzoQaDQM3n52rM0DBrmm8/azY0WdPkII3n5qDLQaxvY3MgK8+eQoODqIO31vPJwIB3utzRoUwOsPjYSbhB4tr9w3HK5O9rIG2BfnDoWPhPyi52cNgYerkyyNp6cORJCfp6jdM5MHwNdTXv7Dw0m90CFIvH/KY0m9EdTKQ5bGnKHdEN02QNTugeE90C7Ax2YNAmBynyj0iuCfwmpi9oB4dAnxt12DAKPiIjA0SthRB4ApvaLQvX2wzQ4PQwgGRYZjbLfOorZJcZ0xqHO4LI0e7YIxtaewEw0AQzu3x+joCJtfnjSEICqoNe7pIxxdVfkfqiPyN2D4sC548rFhAKTlaDEMgZOTHT7+YApCQ6Q1qerftyNefCYRhBDJGg72Gnz0ziR0bC+tm2WPuLZ44/mx0DBE0o+bYayOznuvjkdUpLRuljGdg/DeC8nQahlJgx9DCJhrDkxCtLQmVZ3C/fHJSxNgb6eVpEGurVJ57dFE9E8Qf5ADQHhQK3zx4kQ4OUhzFJosnr93KEb0EX+QA0CQvye+fnEyXJ3sbRqYnpjWH+MHS5t28fd2w7fPT4aHi5NNGg8k9cTMEdIe5N7uzvjuX5Pg4+Fik8asYfGYN7aXJFt3Z0d89+QkBHi726QxoW9XPDOhvyRbF0d7fPfoBIT6edk0wCZ2i8ArU4ZI+k052mvx7bzx6NjG1yaNgZHheHfmCEkadloNvro3CdGhrSVrEAL0aB+M/8weLSmKoGEYfDpjNHq0C5bsrDOEIDqkNb6ekyS4uui6PUPwwcQRGBQhfXqFIQQRrX3x3ZxxcBSYUlS5GbX77t+IQ0fO4/sf96CwqAYaDQP2lgqg19uox4TgmSdHICTEx2aNE2l5+PaHPbhSUAmNhtxW1bXp37pGBuKZx4ejfbjwtE9zZGYX4Ksfd+Pi5fLmNa4dR+cOrfHMI8PQuYP42+StnM4txuc/7cK5i6XX99ecRvswX/zrwaGIkejo3Eju5TJ89vNunMotEtQIC/LBM/cOQo/oMJs18gor8cmvu5GeUyCoEdzaE0/NGoT+8fwJkXxcLa3BfxbuxvHsK2AYclsuT5NGQCt3PDl9AIb2sD0Br6SqHh//thuHsy6BCGj4ebni8Un9MLqP8NRSc1TWNeKjxXuwL9PaO+bWsHrTsfm4O+OR5N6YOMD2HJbaRgM+XrEHO9JyQWkzGtfaxHu6OmFeYg/MGCw+BXAr9XojPlm9D5tTz4Kl9LbVG00abk4OuG9YAu4b2t3miJPOaMZnG/Zj3YkzsLAsQG9u5keIdTWRi4M95gyKx8Mjeto8zWA0W/DllkNYefQkTNcaC96kce3/O9nbYUbfGDyR2EeSg3AjZpbFf3ccwZKjWdCbzLd1X246K/ZaDab0jMZzo/rBXmubg8ByHL7bexwLj6Sj0WgCQ3BTU8ImDTuNBhPiI/HCqIFwltj9+m7GlvFbdUT+ZlBKkZGZj3Ub0pGeeQV6vQkMQ+Dm6oghgzsjeWwcQoJtd0Bu1Th1uhBrN6YjJS0POr0JhFg1BvWPQPKYWISH8a/IkKqRk1uMtVsycCTlEhp1RhAArq6OGNCrA8aNikXHdtLagQtx7mIp1mzLwMETF9DYaC1S5OLsgL4J7TBpVKwsJ+dWLuaXY82OLOw9lov6RiMoR+HibI+eMWGYnBiHqIg2snJjbuRyUSXW7MrCzqPnUNdoAOUonJ3skdAlBFNGxCG+c5BijYLSaqzdcxLbjpxFXaMeFpaDi6M94joFYfKwWPToEqp4+XJRRS3W7j+FLUfOoLpeBwvLwcnBHjHt22Dq0Fj0jgpTPK9eVt2ANQdPYsPhM6iq18FiYeHkaI8uYf6YNjgW/aPDFddrqahtxLoj2Vh7JBsVtY0wW1g4OdghIsgP0wfFYHBse5sH1Vupqtdh/fHTWHM0G6U19TCZWTja26FDm1aY3j8Gw2M7wF7hW3edzoD1J85g5dGTKK6uh8lsgaO9Fm39vDG9XywS4yIUJeoCQIPBiI1pOVh+5CQKq2phvKYR0soL0/pEY0xcJzg7NL8aTiqNRhM2Z57FsmNZuFJRA6PZAgc7LYK8PDCtdzSS4zrD1VF+MjBgLQu/5eQ5LD2eiUvlVTCYLXDQahHg6Yap3aMxPj4SHk4tsyz+bkB1RP5mnM8rw9a9p1FWUQej0QJXFwe0D/PF6CFdRet8UEolDUCX8iuwZV82SsrroTea4epsj7bBrTB2SFe08nIV1QAgqnOlqAqb92ejsKwWeoMZLk72CG3jjbGDuqJ1K+HrJ1WjsKwGG/efRkFpNXQGM5wd7RDs74XkgV3Rxk94qbNUjZLKOqw/kI0rJdVo1Jvg7GiHQF8PJPXvitDWXi2iUVbdgPWHs5FXVIUGvRGODnZo4+OOpD6RaBcoPp0m5bpX1jViw9HTOH+1AvV6I5wc7ODv5YakXpHoGCTuSErRqG7QY8Px08gpKLMeh70Wvh6uGNO9M7qGik/ZSdGo0xmwMeUMsvNLUa83wl6rQSt3F4zu1gkxYQGin5ei0WAwYlPaWWReLkLdNQ0fV2ckxkUgITywRTR0JjM2Z5xFWt5V1OmN0GoYeLs4Y2R0B/RsFyLq5EnRMJgt2HbqHI5fKkCNzgANw8DbxQlDI9ujf4ewFtEwWSzYfuYCDl+8glqdAQwBPJ2dMKRTOwzs0FbUyZOiYWZZ7Dp3EQcu5KFGb61O6unkiAHt22JYRDtRJ0+KhoXjsO98Hvacv4hqvR4ctWr0axuKkZ3ai0ZOpGiwHIeDeVewI/cCqvR6sBwHTydH9A4JxuhOEXf19I3qiPwNoJRi96FzWLExFTnnS6DRMOA4DpRaQ68U1lDykL4RmDG+Ozq0tX0KhFKKAycuYOnGVJw6Z50+4Kj136/P3RJgQI/2mJXcHZEyowNHMi5hyeZUpJ0puF2DWP+7X3w7zBqTgJhOtk+BAMCJ7Cv4fUsqjp+yTh9Q+r8HASHWpbe9osMwZ3QCErqIJ+01R/q5q/htayoOn7xkfcBcC703JcyyHEX3zsGYlZiAvtHyluWduliMRTtSsS/jf63nb9WI6xCIWcPjMThOWj7JreTkl2LRzlTsSj8PjlpDx7dqdA1rjVlD4zGiW0dZkZQLRRVYsCsV29LPgeU4EJAbNBiwHIdOQX6YNSgOY7p3lhVJuVxahQV707ApNQcWCwtybUqCwPrbYDmK9gE+mDUgDuN6ylvGW1BZg4X70rAu5QxMZsstGtbjCPP1wuwBcZjYs6usKEdxTT0WHEjDmpRs6ExmaAgBe4tGkLcHZveNxbRe0TZPHQBAeV0DFhxOx8qUU2gwmq5Pdd14rlp7uGJW7zjM7BkDJxlTB1WNOiw4mo7lKSdRazDeNFXY9N++ri6Y1SMGs3vFwVVGlKNWb8CiExlYkpqFKp2+WQ1vZyfMTIjBPT3iZEUgGowm/JaSgd/TslDe0NishoejI2bER+PenvHwduYvRMeH3mzG7+lZWJSWgeL6ButzkaOgwPXr7+Zgj2kxUbi/ezz8XIVfBv+OqI7IHY7FwuLT73di8+7s6/O9fGgYAhCC158ejWH9hZd93gjLcvjvwn1YuTWj2Xn/WzUoBV56ZDjGDhHPJm+CUorvlx/Cog0nJGmwHMWzcwdjykjp2eSUUizYcALfrzrcbI5EcxqPTe2He8Z2t2mAXbIjHV8s3Seq0XSc9yf1xCMT+tiksebAKXz4+24QAmGNa/fEzGFx+NeUgTYN4ltPnMWbC7cBkKYxoW9XvDJjqE1TFXuyLuClXzeDo1RQoynPILFbBP49a4RN0wiHcy7j2fkbYWZZYQ1YcwIGdQ3HR/eMtmmATb14FU/8sg4Gs0VUAwB6dQzBF3OT4OIofYA9WVCCh39Zg0ajSZJGXFgbfDN3HDycpQ+wZ4vLMW/BGtTo9KK1YQgBOgf44Ye5EwQri97KpfIq3L9oNcrrG0ULfTGEILyVN365ZyL83aUPsAXVtbh/8WpcrakTXTbLEIIgT3fMnzUJwV7Siz6W1DXg/qWrcamyWlRDQwj83Fwxf8ZEtGvFXzvlViobdXhg5VqcLisTXWKsIQRezk74depEdPZTNt19p2HL+K2umvmToZTio2+3Y8uebAC3J7vdCstRcCyHdz7fhP1HcyVrfPnrHqzcmmHVEHk4sRwFRyk+/G4Htu6X3rb6u2tOiFQNAPh84V6s3pkpWWPhRqsTcuM+xDT+b8Uh/LY5RbLG8l0Z+GLpPkkaTcc5f+NxfL/miGSNDYdP4/3fdokO3sD/7okluzLw2Yp9kjV2puXitV+3guWka6w7ko33Fu+SXM764Ok8PP/LJlhYTlSjaZfb08/htUXbJBe0SzlfgCd/WgejRdhBAP6XmHjgdB6e/9X6vaRw8koxHv5xDfQmaRoUwPHzBXjs53UwWaQ1DDxXXI77fliJBoOwE3KjRtaVYsz7eTX0puabVN7K5Ypq3PPTCtQ0ijshgPWanCspx72/rESDQVrDwMKaOsyavwLlDeJOCGC9t/IqqzB7/gpU66R13S6rb8CshStQKMEJadIorKnDrIXW7yWFap0es35bgTwJTghgrYxaVt+Amb+tQGFt8x25b6XeaMSspSuRU1Yuqc4JSymqdXrMXLICeVXVkjTuRlRH5E9m/fYsbN93xqZiPBTWN5l3Pt+EotIaUfsdh85izfYsWd/vg//bjksFFaJ2B9Mu4LdrToitfLZgN3IulYjapZ7Ox3crD8vS+Hb5IaTnFIjaZV8sxmeL98rSmL/pOA5lXRK1O3+1Au8u2ilLY9nuTGw/cU7UrqC8Bq/9utXmOi2UAhuOnsbaw9mitmW1DXj+l02goDaVPKcU2Jl5Hov3pYva1jYa8PTP669NvUnX4CjF4ZzL+HnncVFbndGMx35eBwvL2VSwiqMUGZeL8PUWcQfUZLHgkflrYbawNmmwlCKnqBwfbdwnbstxeHjhWujNZpvKkbMcRV55Nd5at0vUllKKx5asR53BYFMlXpajKKqpw0trtkmyf2b1ZlRIdHSua1CKioZGPL1qkyT7FzZsQ1Ftnc0adXoDHl2xQZKz/vq2XbhUVW2zhs5kxoMr1ykuXf93RXVE/kQ4jmLJ2hRZ5Y8ptf6414s4GJRSLF53QlH54zXbMkXtft+UKnsFBUMIlm8VH5QWb02VXTFTwxAs2Zomard0Z7r842AIftuaKmq3Ym+m7JLXhBAs2i6userASVBqm4NwXQPAop2pog/aNYdPwWxhZVe0XLQnTfRBu+7EaehMZlkVLSmAxfszRCMWm9NzUKszyNOgFMuPZEFnNAna7cy+gLI62wbWJjhKsS7tDGoahaMJB3Mvo6CqVlapfo5SbM8+j5LaekG7lCuFOFdaIUuDpRQHzl9GXoXwm352cSnSCopknSuWUqQVFOF0camg3aXKKhy4eFm2xtmycqQWFAraFdfVY8vZXFn3FUsprtTU4EDeZZs/ezegOiJ/Iumn8lFcViu7gRbHUWzYfhJGE/+D9syFElzMr5A9WLAcxZZ9p9Go4w/bXrpagZPnCmX3jmE5il3HzqG6TsdrU1ReiyNZl2X3Q2E5ikOZl1BSwR9SraxtxO6UXNkaHEeRfu4q8ooqeW3qdUZsOnJGtgalFGfzy3DmMn8EyWCyYM2hU/I1AOSX1SDt/FVeGzPLYsXBLEWN2cprG3HodB7vdo6jWHogQ1Hp7jq9EbuyLvBup5Ri8cEMRd10DWYLNqWfFbRZfDhTUZ8SluOwNlV4mnTx0UzFjQtXppwS1jiuTENDCJalnhS0WZqapUyDIVgqorEs/SQ0Cq6HhiH4PVX4JXB51ilFS+g1hOC3tEzZn/87ozoifyIbdpxU/OBo0Blx8Dj/g3bj7lOKNUwmC3Yf4Z8O2Lg3W7EGx1FsO3iGd/umA6cV16wghGDTAf6H+ZYjOYoGPcD6gFp/gH9aY0fKOZivFXNSorHuIL/G3swLaDQIv6FL0VhzkH9QOnLmMqoapM3388EwBKsO82ukXryK4mrhN3RRDUKw8jD/oHTmaikullYp6qZLAKw4wj8o5ZVXISu/WJHTRimw7Bj/cZTU1uPIhSvKmjxSiuUn+DVqdAbszLmgSIOlFKvSTvHm7ujNZmw4dVaZBkex/lQO9Obm82osHIeVmdmKuumyHMX2s+evLyW+FUoplmSeVHTNWUpxMO8KiuuU/Qb+jqiOyJ9IfmGV4jbcGg2DopIafo2iltEoFNC4WlqtXIMhKCyrFdCogdLe6wRAYbmARlmNYmeH5aigRkGZtQquUo38shp+jfIaxQW6WI7iShl/CD2/vFbRGz5gdT6FNK5W1CjaP2AdXK+U82sUVPBfK6lQAAWVAtdcYJstFFXX8U6XXa2SH1m9kapGPQzm5iOsRbXSEkfFaDSZUcszgJfVN8LEKnPUAcDEsiivbz5ptUanR6PE5F8hOEpRxJO0arBYUCUxMVcICuBqbcvcP38nVEfkT0Sn8K0VsA6uOj3/fhp1yjUA4e/a0AIalFJBDZ1RXp7AjbCciIbB1CIP2gY9/zSWzqj8AQhYp3j4NVrmmjcI3Fd6k0mxIwJAMHKjM5pbRENoxYmuBQYkwDodxqvRQteDoxRGnmhaSwys1/fF8311ppY5DgBo5NnXn6LBEylpUY2WvB4tuK+/C6oj8ifi4qSsjDFg9Zidnfn34+qsrIxxE85O/PtpCQ1CCJwF6jG4ONopjlZoGBENJ3vFAx8B4CpwrlwcW6bnhJvAOXd2sFccPQIAV4H708nevkWcNleB6+HsYNciGkLlwluqB4hQvRKl5cqbYAiBA083aBcJHZylwld4zMW+ZY7j7tJo/nf4ZxzH3YzqiPyJhAXb3uL7VliWQ3Ab/jLjYUHKNSwshxABjdA2XsqPg+MQEsCvERIgvYAQH5QCIQIl2UNaeyueYmIYIqgR6u8tubYFHxqGoK3A+Qj194JF4bI/DUMQHsDfoyjM30uxk6BhCNq2FtDwEy6fLwWGELT14z9XLaFBCBDq68m7PbRVC2gACPbx4E1+DPX2lL0y7kZ83VzgwFNoro2nO7QK+/4AgJujA28FVD83V15nyxYctVr48lQn9XRyhBuPA2ELWoZBoIdb8/p2Wvi6CLfjkAIBEOIpvUDb3YLqiPyJJI+IUTzwubs6ol93/tLfycOiFGs4OthhSO8I3u1Jg5RraDUMRvbjb1c/dkCXFnnLHzugC++20b07t4BDRTF+QFfe7cO7d1TcNIzlKCb05694OzimHdwEojJSNSYJaPTpHApfD2UPWpajmNKPv+NtXHgggnw8FK1o4SjFVAGNToF+6BToCyWXnVJgep8Y3u2hrTyR0DZQcbRtRm9+DV93Vwzs2FbR/csQghk9+TU8nByR2KWj4lUz07pF8Zbfd7TTYkJMF8WrZibERPL2bdEwDKbFdVW8amZ0ZEe4O/JXvJ0VF63ommsIweD24XdluXcx/lBH5MMPP0T37t3h5uYGPz8/jB8/HufOiRdnuluJ7RIkGM0Qg2EIxo2MgZ0d/xtERLg/IsL9ZC8j0zAEY4d0hZPAlEJoG2/ERwbLnjrRMAQj+naGhyt/Dwd/bzf0iwtXVEdkYLf28BVo6Ofp5oThPSIUafSIDEGwP/81dXG0R1If+Q9ahhB0bdsaHYP5yz/b22kxqX+U7OtBCNC2tTdiwtvw2mgYBlP7xyh60Lb2ckPvTqEC34Ng5oA42fsHAE8XJwyOaidoM7NfHJT40c4OdkiM43fUAWBmn1hFESQ7jQbjukUK2szoFav4hWBSAr8TDQAze0QrXpkzLUG4bcSMbso0WI5iRjd+hwoApsdHK141M1NEY2qM8LkU1aAUs+OENe5W/lBHZP/+/Xj88cdx7Ngx7Ny5E2azGSNGjEBjo7SSvHcbhBDMmtBD5mcBrVaDcYniN+rs8T0kl+y+VYMQgokjY8U1xnaXXUeEApgqod/MzNHdFNURmTmqm7jGiHhFNVfmjEoQtZs2JBYAkfWmz1GKuYniGpMHxEDLMLKL5d07IkHUeZ3Yuysc7LSynZF7hyaIOktJ3TvDzclBtsY9g+JFG9ONiouAj5uzLA0CYGa/WNGeNkO6tEMbT3dZDighwOSeUXAXaejWt30own29ZWkwhCApthN83YSjXHHBbRAV6C9bY2indgj29hS06+Tvi15hwbIiFhpC0DssGBH+wh2rQ7w8MaxjO1nXXEMIogP8ERco3BTUz9UVyZGdZGu09/FGv7b8jvrdzB/qiGzbtg333nsvunTpgpiYGCxYsAD5+flISxOveHm3MnpoV4wfaZvXa21iS/Dei8nwbyXe/G9I7wjMTBYfvG7SgHVAeuvp0QgNFM/P6B3bFvMm97FJo4lXHhyBjmHi3YTjIoLwzKxBsjSemzMY0R353/Cb6BTmj1fvHSZL47FJfdGra5ioXdsAb7z7QKKsmaZ7R3XHkPgOonZtfNzx8bwxgLVHok1MHRiDsb2E374BwMfdBV89lHzNYZW+fwIgqUdnTBsgft+7Ozvim4fGQ8MQmx7ohBAMiW6H+4Z2F7V1tNPi+3kTYK/V2KTBEILeHUPw2MjeorZ2Gg1+eGACHO3sbBpgGUIQG9IGL4zpL27LEPwwdzzcHB1schQ0hCAiwBdvJA8VtSWE4JvpyfB2drZZI8zHCx9OGCnJ/otJY9Da3c2mc6UhBK3d3fD5pDGS7D9OGokwby+bNbxdnPHtlGRJUeZ/jxiKzn6+Nmu4Ozrg5ynjW2TV2N+RPzVHpPba+mhv7+YHOqPRiLq6upv+7jYIIXhm3lBMHmONCIi9IWoYAq1Wg/dfHofe3cIl6zw6awDumdBTsgajYfDOM2MEc0Nu5b4JvfDw1H7X9yGqwRC89tBIjB0kPYQ5IzEe/5o9SLIGYHVCpo6QHuIfNyAKr947DAwhkjWemNwP946RHt0a2SMC7z04ChpGusaDY3viiQl9JWsMjG6HTx9KgpZhJGvMGhqPF6YOkjyV1yMiBP99ZDwctFpRjaaH6oQ+XfHWzBGSNWLatsEPj02Co710jcS4jvj4ntGSp6c6Bfph/mNT4OpoL1ljYJdwfHlfsmjEpYlwP2/89uhUeLo4iQ4wTZt7tQ/GDw9MgL1WWl5RoJcHfn9oGnzdXMU1rv3FhARg/v2TJK8g8nd3xdIHpyHQw12yRqfWvvjtvilwc5SWu+Tt7ISl905DWx8vSU4uIUB4K28svXcavJ35p3hvxM3RAYvnTEEnf9/r31MIa4dfDyybOw3+btLyNpzt7bBo+iTEtgmQrOHn6opls6YhyOOfl6TaBKFyYvgy4DgOycnJqKmpwaFDh5q1efvtt/HOO+/c9u9S2gj/HTmcchErNqYh/VT+9YchR5tazXOws9Ni1KAumJIUj9Ag/tUGQhzPuowVm9JwLPNysxoaDYMR/Tpj2thuaB8qrw11+pkCLNuahkPpF0GIdQqCo/9rNc8wBMN6RWD6qG7oFO4vS+NkbhGWbkvDvjRrVVmrBgVDiHVqhQCDEzpgRmI8ojqIR0Ka40xeCZbsSMeuE7nX981RzqoBa+2TAbHtMGNEPLp1CpalkVtQjiW70rHt+FlYOA4MYW7W4Cj6dA3DzOHx6BUpL0x7qbgSi/ekY/OxHJgtLDQMA47S6w94lqPo2SkEM4fEoX+UdOf2Rq6UVWPJvgysP34aRpOlWY34doGYNSgOQ2Lay8pZKqysxZIDGVhzNBs6kxnamzQIWI5DdFgAZg6IQ2JcR1kapbUNWHwwA6uOnkK9wdisRmSQH2b1j8OY+E68SZdCVNQ3YvGRTCw/dhK1OsP/NACAWDU6tm6FWX1jMa5bpGRH50aqG/VYciwTS49noapRf10D/8/eW8fXdVzr38/sQ2JmZmaZmSF2zAzBJmlTTuE2pbRNuSnepmkaNDPHjh2wHTOJJYuZWTqSDp897x9H8rVlbTh7K236e/18PrnNzR7t79k0s2bNmrVg+w4tLItIX09sm5KJtROSRRs690urN2Df7ULsuZWPjoGhMRmhnu7YPjkDGyakcQaP8mnQaMKB3ELsupWPVu3AmIxAN1dsn5SBTdlpkrbNGswWHMgrxK7b+Wjo6x+T4efijG0TMrAlO403QJVLRosFhwpLsDMnDzU9vWMyvJ0csS0rA9uy0uHpKM6Y+m+SVquFu7u7qPH732aIfOUrX8GHH36IK1euICQkZMw2RqMRRuP/JW7SarUIDQ39rzREKKUwmi1QMgyUAtvTGlt6ce5iCTq6B2A0WuDspEFMhC8WzU6CizP/jMIw3EELMVra+3H2UgnaOrXQG8xwdtIgKtQHS2Ylws2V/yMwmi1gCIFKgNHercWZS3fR0tkPnd4EFycNwgI98disZHi6OfH+rclsARHB6OobxOnLd9HU3ochvQnOjmqEBnhg2YxkeHvwr3ebhjNIqgU6yB6tDqev3kV9aw+GDCY4OagQ7OuBZdOT4O819va9EY1UW+XaEjmivkE9ztwoRXVLN4b0JjhqVAj0csXyaUkI8uGfGZmtVrCsMGNAZ8DpW2WobO7EoN4EB7USAZ6uWDY5EWEC21jNViusLIVGqeAd4IcMJnx4pwylTR0Y0BmhUSnh5+GCZRMTEMWzVRewbRM3W61wUCl5GTqjGWfzylHS0AatzgiNSgEfN2c8lp2AuCB+41ksw2i24KOCCuTVtWBAb4RKoYCPqxMWZ8QjOZTfeLayLIwWKxwFGCaLFZ8UV+FObRO0OgNUSgU8nR2xODUOaWEBvH8rlmG2WnGhtAY3axrRrzdAwRB4OjliQXIMssODef+WpRQGs0WQYWVZfFZZiytV9ejXG8AQAg9HB8xPiMbkyNBxYbCU4kp1PT6rqr2XVt3D0QGzYyIxIzqc1zNDKYXeYoGDkj+eiVKK63WNOF9ZjT69rQiiu4MDZkaFY3ZMJK/RaQ/jTlMzzlVUoVevh5W1MaaGh2J+TBSv0WljmKFRKCUZwP9pfeEMka997Ws4ceIELl26hMjISNF/Z8+FfBFksVhxKbcahz7KR1Fly738Ec6OaiyaloC1C9IRw7P7QYysLIurBbU4+Eke8sqbYLbYGE4OKiyYGI9189ORECHN6zAilqW4XlKHQ+fzcbO04V6tFEeNCnOzYrBhbgaSI/k7TiFRSnGrvBEHLubjWnEdTMMMjUqJOelR2DAnAxnRQbIZudXNOHCpAJ8V1cA4bIhoVApMT4rEplnpmBjH33GKYRTVt2H/5QJ8Wlh5L+OmWqnAlPgwbJqZganx4bKTs5U0tuPA1QKcy6+4l8VRpWAwMSYUW2ZkYEZihOzOqry1E/uvFeDDgnIMDmdAVSoYZIYHYcv0DMxNipadSr66oxv7bxbiVF4ptAbbpEPJMEgNDcC2qRmYnxQDtcy8EnXdvThwuxDH8+7eG8QUDEFSoB+2Ts7AkuQ4QSNOSE19/ThwpwhH8ovRM1wllyEE8f4+2DYpA4+lxMNRJS/pWJt2AAdyi3Awvxhdg0OgsMUTRPl4YfuEDDyemiA7kVbH4CAOFhTjQH4R2gcGQYevI8LTA1uz0rE6JUn08gqXunU6HCoqxt6CQrRotRh2YCLMwwNbM9KxNjkJHjI9An0GPQ7fvYs9BQVo6O+7xwhydcOWtDRsSE6BtxP/ZEhIWqMRR8tKsLuwALX9vfe8Wv7OLtickoZNyanwc5a39XbQZMLxirvYWZyPqt7uewxfJ2dsSkrF5qR0BLrwT4a+KPrCGCKUUnz961/HsWPHcPHiRcTGCgfd3a//JkPk1GfF+MeBy+jV6oeXPR68rQqGwMpSpMYG4UfPLUJEkP0Ju87dKMNf93+Grr4hXkZihD9+9MwixIXZb/RczKvCa/suoK1n4N75xmLEhvjgR08sREoUfyT5WLpaUoff7T+Pps5+XkZkgBd+tHU+smLH9qDx6XZFI3514FPUtffyMkJ9PfDDDfMwNdH+ZZDCulb8Yv8nqGzt4mUEerriB2vnYo7AttKxVNbcgZ8e+BilTR28DD93F3xvxSzBbaVjqbq9Gz899DEKGlrHZIwssXm7OOGlx2Zg5QTu3Cxcaujuw0+OfozbtU28DA8nB3xjwTRsmmL/NsbW/gH8+PhHuFbdAAUhD23XHGG4Omjw4uzJeHJalt1GaOfgEH566mNcrKgFGT7fgwzbsqSzWoXnpk/E8zMn2R2A2KvT46dnPsFHZVVjMghsO88cVUo8NSkL35g91W4jdMBgxM8+Oo8PSm3pFMZiADaDeltWBr4ze7rdS0Y6kxm/OH8Bx+7ehZVlHwrWHmEoGQYb01LxwzmzobFzychoseDXlz7D/uIiWMZgjHAUDIM1iUn46Zy5cLLTQDRbrfj99cvYVZh/ry7OaM7IM348Nh6vzl0AV7V9xpuVZfHn29fwTsEd6C2We894LMbiyBj8es4ieDp8sZdzvjCGyIsvvoi9e/fixIkTiI//vw7S3d0djiIs4P8GQ4RSijcPX8P7J26Kas8wBI4aFf7y/TV2xTLs+OAWXj88dmzNWAy1UoE/fmsVJiaFiWYcOJ+HP+y9II5BCBQKBn948XHMSBMfZ3DiWgle3fUxKKjgtllCAIYw+PWzS7EwO04041xOOX6440OwVByDgOBnWxdixRTxA+zF4mp8990PYGWpYL6IkU7lh+vmYeNM8QPsjYoGfP2dEzANL/eI0UvLZ+LpeeJ3TOXWNeMr7xyDwWwRvVX6ywsm42uLxO+YKmlux7PvHsGQ0SSa8eT0LHz/sVmiDYXKji48/d5h9OkNohnrs1Pws8cXiPZW1ff04ckdh9A5OCSasTwlHr9dvUR0htKWfi227zqMln6t6LwX8+Ki8Le1y6EWaSh0Dg5h+77DqO3pFfVeEQDTIsLwz7UrRcd99On1eOLQEZR2dopiMIQgMzAQ765bAxeRXp4BoxFPHz+G/DZxlY4ZQpDo44tda9fCQ+Qgrjeb8dzp47jW2CBq1xtDCKI9vbBn1XrRmVZNVitePHcSn9RVi2qvIAQhru7Yt3IDgly/mOMiYN/4/bkuPL3xxhvo7+/HnDlzEBgYeO+fAwcOfJ7Yf6v2fZgr2ggBbMseeoMZ3/r9UdS39Ij6m+MXC0UbISMMo9mCl/5yHBUNnaL+5uzNMtFGCGCbQVksVnzv9ZMoqm4R9TefFVTjF7s+EmUgALbtxFaWxQ/fOYNbZQ2iGDfK6vHy+x/CyopnsJTilT0f4VJRjShGfm0LvvPuB7BYWVEd4EiLXx8+j7O54hL6lTZ14GtvH4fRYrErMdafPriMYzeLRbWtbu/GV945Br1JvBECAP/85CZ2XckV1bappx9fevcIBg3ijRAA2HE1F29evCWqbbt2EM+8fwR9OvFGCAAcyinGnz8R9131DOnw1M7D6BwQb4QAwOnicvzqwwui8vpoDQY8vecoWrTijRAAuFBZgx+e+kgUQ2cy49mDx1An0ggBbO/v9fpGfPvkGVhFlBIwWiz40rHjKBNphAC2bzCvtRVfPXESZhHVeM1WK1784JRoI2SEUdbViWdP2L4rIVlZFt88dxrXmxpFb71nKUVNbw+eOnlEVLE9Sim+f+EsPhVphAC2xGdNA/3YduoQ+o1jVzX+b9PnaohQSsf856mnnvo8sf82tXcP4O/7Ltn9dyyl0BvN+N17nwi27dXq8Ptd5+1mUGoLnPzlO+cE2w7qjfjljo/sToZFYdsd8cq75wQ7QaPZgp/uOCcpbTulwE/fPyfYCVqsLH688yyoxNzwP9l17l5AK/dvofjR7rNgWfspBMDP9n0sWJ2VUoqf7P8IZisrKdnaq4c/Rd+QcEnyXxz9FAaTfYbOiP5w6hLa+wcF2/3q1AUMGqVVOf7bx9dQ19Ur/FvOXUKvTi8pc+bbV+7gbmuHYLu/XriGDu2g3QwKYN+dQuQ0NAu2fePKLTT09tmdxI9S4GRxGT6rrhNs+86tHJR3dtl9HSyl+KSyGmfLKwXb7skvQH5LqyTGlfoGHC25K9j2aOldXG1ssPu9slKK/NZW7C4sEGz7YVUFPq6tlsQo6+7C23l3BNtebKjF8YpSu/sSK6Wo7+/D6zk37PzLL6b++0Jxv0A6fr5QeKM4h1iWIre0SdArcvJysahZCBejrL4DpXXtvO0+vFEKo8kiafhmKUVDey9yK5p4232cU4EBnVEyo6NvEFeL63jbXSquQZdWJ2nwphTo1xnwaUEVb7tbFY1o6uqXNLBS2ErUn8kp421X3NCO8hbxs8nRsrAsTtzm78yr27uRU9ssK+314ZtFvMebe/txqbxWcnZcBSE4cKuQt033oA5nSyqkMxiC/bf4B6UBgxHHC+5KvlcKhmDPbX6GbUtpkXQGIdh9O5+3jdlqxZ7cfMnvFUMIduXwM1hKsSMvT9L5AVt3uiM3j3diQynFe3m5smoS7cjPE7wP7xfmSU4wxlKKXYX5gsUo3y/MlVwDx0op9t0thMEi7Hn5ouuRISJRZosVRz8tkJzmHLB1UMfOc3e0VpbFoU/yJacgH2Ec+ZS7E6SUYv+n0juOEcbB8/m8bfZfyJdXEIohOHBRgPGZPAZDCPZ9xs/YdzlfVoEuAmDPZ/wd7f6r8hiUAnsu5/G+mweuF8pisJRi//UCXjf6wVtFsnYkWSnF4dvF0Ju4O9ojucWyvg8rS3GioBRaPbeL+0Rh6b1dXVIZH5VWonOQu7TFmbvlGBTwlPEyKMXl6jo09vZztjlfVYNunbCnjEsspchpakFlZxdnm2v1DWjq10quV0kBlHd1oaCtjbNNflsrKrq7ZTGatFpca+Re7i3v7kJOa4usekFdeh0+reVecmno78OlxjpZk4EBkwmnqyok//0XRY8MEYkqKG9G/6C89TkrS3HuGvfsuLyuAx29wu5vIcbHt7jjEurbelHf1iur0K2VpbiQV8VZ7r6jdxB369tlfdRWluL63XoMGcburLU6A25VNMpisJSisLYVnRxLDmaLFZ8V18gq0EUB1LT1oKGzb+zjlOJsvvQZ/ohaewdQ1sK95HAmv0w2o3dIj/y6Vs7jpwvKZD0PABgymnCzppHz+AeF8hkmixWXK+s4j58pll+ok2UpzpdzD0of3q2Qnd6bEIKPyrk9eh+WVcpmKAjBh2XcyzNnKipkbyNXMgzOlHEPrmcqK0UH//IxPqzgZnxYVSGrWi9gu1dneIyEc7WVsgx1wDZ5+qCK38P636BHhohE9Wp143Ie7aCec3bcM04MvdHMGfvQMzA+DJalGNQZxzw2XgzAlghsLPVy/Hcp4jpXv84ge9AbUc/g2PdEb7LImn0/wBgY+zpYlqKfxwNgF2OI+9n2iohTEaNujnsFAN08fLEihPCep3M4h4ccKRiG9350DA7JfrcYQtCj47mOIfkMQgh69NzX0T2kk7yUPCJKKbr1PM9cp5NU1PN+WVmWn6HXyTYSrJSig6fAa5deJ9vYYSlFh+6/v4jsI0NEorhm//aK5dndIfeDvl9cv9c6TtdhY4w9gP5bruMR42EGO/bzELN1WjSD5/2R43IWzZDp1QFsy2X8jPF5Jmae8wjFEogV33WMV5/F9Z0DgIXKZ1Dw3w+ufCH2Msx892rcnjn3vfp3vFf/LXpkiEiUq7P99QfGkqODmjOPgauTvIyGI1IM5y75PBkA9z1xdRw/hhvH73VzGp/nwcdwFSjLbh9j7HM5a9R2V8/lZHD8XgXDwFEtL7Po/zG4n62LRl7WTzEMN5lZPwHbrJLrXgGA+zg8d1v6cO7f6imycBuf6HCKcm6Gg6wAzxHx1V7xcHCQPctnCIG7hpvhptGMyxIT371y08h/rwgAL558JW4ah3HxsHpJqIXzRdMjQ0SiUmICZae7VjAE2UncxdPiw/2hkTlgMAxBRhx3jYnIIG+4yDQUCCFICPfjTJsd5OMOb4FaM4IMAGF+HvBwGfvD9nZ1QoiPu+yO1s/dBYFeYyffcdKoEB/sC5nZ2uHu5IBI/7FrvTAMQWZksOyO1lGtQkIwd2bdiVGhsoJVAVuK+dTQAM7jU6LDZDMYQpAVHsx5fNo4MABgQgQ3Y2pk2Li40CeEc2cInhIeKvuZWynFxDDu65gcKq1Q4/2ysCwmhXFfx6SQENmeMAvLYlIoz70KCZXtsbBSiskcNc8AYEqwfAYATA7mvueTg+TfK4YQTA0Wn7Tyi6pHhohEebg6YsGUeFmdoJWlWL8og/O4s6May2cky9vdwFJsWJjJeVyjUmLN7FRZtVAopdg0P4vzuFLBYOOcDNkd7eZ5mZwGFSEEm2dnyDo/Qwg2zU7nDbbbPCsDclYDGEKwfnoab/XTLTMyZM2UFAzB6knJcOLxSGyZniFrWUPBEDyWkQAPZ+4Z35ap6bIZ8xKj4e/OXb9j0ySZDEIwNSoMEd7cRQA3TUiTNWAwhCA1yB9JgX6cbdZnpkg+P2Az1KO8PTGBxxBZkyatsu/9CnR1wayoCM7jjyck2J1CfbQ8HR2xKDaG8/iimBh4yvQCOKlUWBGfwHl8VliE7JouKoUC6xK5szVPCgxBlIen7MnT5qQ0mWf4z+uRISJD6xbK6wSD/NwxQSAF+7p58hhebk6YmcFf42Tt7HRQGQwXRzUWTuRPwb5qeoqsJQe1SoFlkxN52zw+OUmwCjGfCCFYNZV/QFiSFQ8njmUuMaKgWDstlbfNvNRoePIM8EKyshQbp/N3TtNiwxHoIb2jtbIUm6bxp6vPDAtCtJ+X5I7WylJsmcrPSAjwRXpIgGQj10optk7O4G0T5uWB6VFhsnJKPDGZezIAAP6uLlgYHyPZ80IBPDmJ21AHAHcHB6xISpDMIIRg+4RM3vvgpFZhQ2qKZAZDCLamp/Gmq1crFNiali75eSgIwcaUVN6ihAqGwRNpGWAkvr0KQrAqPpF3+YcQgqfTsiWdf4SxJCpWdqG9L4IeGSIylBwdiFnZ0ZI/iK9vniXoiYgO8cHSaYmSI7i/vnGW4BJSsK871s1Nl2wofHXNDMFqpj7uznhiofgaKKP1/LIpgktIbk4OeGHpFMmMpxdOgJcr/xKSo1qFry+fLun8BMCmmRkI4lj6GZFKocC3H58pjUGAlROTEOXvzduOYQi+u3yWJAZDCBakxPAuy9h+C8H3lkpnTIsJw+Qo4eWElxZKu1cKQpAZGojZccIVwb85bzoYQuwelhQMQWKALxYnCRf8/NqsKVAq7B/6FAxBpLcnVqYmCbb98rRJcFDxl64fk0EIglxdsTFd2HPz7IQJcJUQx6EgBN5OjtieyW+0AcATGZnwdnS02+BhCIGrRoNns4QNgC0paQh0dZXEcFCq8JXsSYJt18YnIdLD024GAaBkFPjGhKl2/d0XVY8MERkihOAXLz6GxCh/uz+6b2yZjbkTxVUj/tHTC5GVEGK3MfL86qlYNl24cwKA72yai+mpkXZ3gtsXZ2P93AxRbb+6cjoWTRBfvG5Ea2ak4qnFE0W1fXbRRKyeZr+be+mEBLy4TFwht80zM7BtDvdS1FgiAGanRuG7q2aLar96UjK+vGiyfQwCTIoJxSvrF4hqvzgtDt9ZZt8gzhCC1NAA/HbzUlHtZ8VH4icr5tl+nx2MuAAf/HXr46Le+UmRIfj16kUgdjAUhCDc2wP/2LpKVKxXWnAA/rj2MRBCRMcIKQhBgJsr/rV1Ne9S3Iji/Xzw+voVUDCM6P5EwRB4OznhvS1r4KQW9tSFe3rgzXWroLSHQQjcHBzw/qY1vIGqIwpyc8W7a1dDo1SKHmAVhMBJpcKOdWvh4ywcT+bj5IQda9bCSaWyi+GgVOK9VWsQ5CrsDXTTOGDXqnVw02hEMxhCoGIYvPP4akR4cC/3jchJpcaux9fD29HJLoaCYfDm0pVI8La/wvoXUZ9r9V25+m+ovgsABqMZP3vjQ1y8UzVmifMREUKgYAh+8OxCLJ9lXyl1k9mCX777Mc5eL+VlMISAEOClLXOxfkGGXQyLlcUf9p7Hkc8KBRmAzRPyxJIJdhlIVpbFX49exu5PcvkZDAGlFM89NgUvLJ9iF4NSijdOX8dbZ2+CMIQzw+gI/8kF2fjmipl2xclQSvHuJ7fx99PXADxcRn00Y8OMNPzPmrl2BzjvuZyH3x//TBRjxYRE/GzDQqjsXJ46ersYvzj6qa1UO0dvMMJYlBqLX29aIroK64jOFJTjh4fP3cvEOhZmhDErLgJ/3LwMznbuujlfVo3vHjoDw3DOHD7G5MhQ/G3Tct7dMmPpanU9vnnoAwwaTWOWar+fkR4cgDc2r4SXiIH1ft1uaMKLB0+h32AAQzBmTNIII8HfF29tWgV/V/vc84WtbXj+0Al063RgCBnz3VIQAiuliPL2xDvrVyPEw90uRnlnJ545cgxtg4OCjFB3N7y3di0ivYQH7/tV09uLp48dQaNWy8kY+e8BLi54d9VqJPjYN3g3afvx1MmjqO7tufd7uRjejk549/HVSPPn9xaOVvvQIJ764AhKuzs5GQQEFBQeGge89dgqTAzkDrb9Isie8fuRITKOKqttx+GP83HuWinMlgcjrv29XLFuUQYen50CD1fp6/+VjZ04cr4Ap6+UwGh+cI+6t7sz1s/PwIpZKfDxEFeCeizVtnTj8MUCnLxS8lB6bU9XR6ybk47Vs1Lh5yk9xqChvRdHLhfh6JWih7KlujlpsG5WOtbMTEWQt/Tn3tzdjyNXinDoSiEG9A8mW3NxUGPN9FSsn5GGUF8PyYz2vgEcvlaEg1cKHyo056RWYfXUFKyfnoZIfy/JjC7tEI7cLMb+K/noGpUcTqNSYtWkZGyclobYQB/JjJ5BHY7dLsHea/kPFbNTKxR4PCsRG6emISnEXzKjT2fAidy72H0tD8192geOKRkGy9LjsXlKBlJD/CUvRQ4YjDhZUIpd1/NQ39P3wDEFQ7A4KRZbJmcgKyxIMmPIZMKpwjLsvpWHqs4Ha0UxhGBBQjS2TMzA5Aj7vZgj0pvNOF1Sjp2381HW/mAFbQJgbmwUtk3IwDQZsStGiwVnyyqxMycPha0P1qMiAGZEhmNbdgZmR0VIzpZqslrxUWUVdubmIafl4SrdU0JD8URWBuZHR0vOlmphWXxSU42d+Xm40fRwzavsoCA8mZ6JRTExvLEnfLKyLC7W12JnYR4uN9Q/ZICm+wfgybRMPBYTB40I79dYYinF5cY67CzKw/n6mocYid6+eDotC4/HJPDGt3xR9MgQGUdZWRaFNa3o6B2EyWKBi6MG8aF+vAOkdsiAiroOaIcMUCkV8HRzQmKUP+fHzLIURXWtaO8dgMFkY8QG+/AOkIN6I8rq2jEwZIRCwcDD1RFJkQGcM25KKYob2tHao4XeZIazgxoxgT6I8OOegegMJtyta4d2yACGIfBwcURKZABnQCilFKXNHWjq7ofeaGNE+nkhOoA7XsFgsqCkrg39QwYQArg7OyAlIgBqnhl3eUsnGrr6MGQ0wVmjRpiPB+KDuGc5JrMFxfVt6BuyZRN1d3ZAclgAHHi2Rle1d6GmsxdDRhOc1CqEeXsgIdCXc2AxW633GCxL4e7sgKRQf97A1trOHlR2dGPIaIKjWoVgDzekBHMPwhYri+LGNvQO6mFlWbg5OiAp1A8uPPkp6rv7UNHRhQGDEY4qJQLd3ZAeEsDJsLIsSpra0TOog9nKws1Rg8RgP17PQVNfP8raOjFgNEKjVMLP1QVZoUGcAyTLUtxtaUfXoA4mixWuDhokBvnCgyeXRqt2ACVtHdAaDFArlfBzcUZ2SBDnN0Upxd3WDnQODMFoscDVQYN4f194u3B7JzoGB1HY1g6twQCVQgEfZydMCA7m3GlCKUV5exfaBwahN5vh5qBBjK83/Hi8E106HQpaW9FvNELJMPB2csLE4GDeAbKiowst2gHoTTZGtI8XAty4JwG9ej1y21rQZzBAyTDwcnTEpKAQ3gGyqqsbzf1a6MxmuGg0iPLyRLA7dx/XbzQgt7UFvUYDGBB4OjpiUmAw7wBZ29OLhv4+DJnMcFGrEeHpgTAPD872gyYTbrc1oc9g+249HBwwMSAELmpuT1lDfx/qevswaDLBWa1CmLsHIj25+zi9xYxbbU3oMdiKZXo4OCDbL5g3j0mTVouavh4MGI1wVqkR4uaGGC+ePs5qwe32RvQY9LBQFh5qG8NDw/O+Dw6gqrcbWqMRTioVglxcEc+zDGOyWnG7oxHdhiGYKQt3tQMyfILg4yB9QipXjwyRcVDvoB4nrhbjwIV8tI+q90IATEuOwMa5GZiaHC55tqDVGXDyxl3sv5iH5m7tQ8cnx4dh4+wMzEyJlJyzZFBvxAe3S7H3szzUj1HfJDs6GJtmZWBuWrTkrX06owlncsuw53I+qtu6HzqeFh6AzTMysTA9RtRa+VgymC34ML8ce6/mo7T54RoqScF+2DojA4vT4+1eNhiR0WzBR8WV2HM9H0WNDxfdivX3xtZpmViWkSBqPX4smSxWfFpahd038pHb8PAMMdLHE9umZGJFegKvgcEni5XFhYoa7L6Zj5t1D9dpCfV0x/bJGViVnmT30sSIrCyLy9V12H07H1eqH54hBrq5YtvEDKzJSIaXxGRdLKW4WtuA3Tn5uFD18AzRz8UZ27IzsD49GT7O0jpcSiluNDZid14BPq6sesi17+3kiK0ZGdiYngp/F2m7EyilyGlpwa6CfJypqHjI7e7h4ICtaenYlJqKYIn9HKUUBe1t2F2UjxMVZQ/lwHBVa7A5JRVbU9IR5u4hiQEAxZ3t2F1cgGPld2EclWHVWaXCxqRUbE1OR7SndA9geU8ndpcU4FB5MQzWB0tTOCqVWBeXgm3J6Yj3kh4fUd3Xjd1l+dhfXgjdqOq1GoUCa2KSsT0xE8ne0j2A9dpe7K3Ix96KAgyYH/TIqhgGKyOTsD0hC+k+gZIZzUP92FuZj70VuegzPVi2QUEIloUnYntcNrJ9uXNJfV56ZIjI1Mc5FfjJu2dhtloF18wTw/zwt6+vgrebfR3h5eIafP/t0zBaLADlX2uOCvDC619bgwA7l0JuVjTg22+dgm64qudYjJG1zVAfd7zxlTV2L1Pk17bga+8ch1Zn5Fwzv7dG6+GKf76wWnBHx2iVNLXjxXeOo3tQx7lmfm+N1sUJb35pNRKCuXM2jKWq9i48/+4xtGu517MJsVW2dXdywBtPrkJ6mH0dSH13H57bcRSNvf3cjOH/ddao8fctK0TtGrlfLX1afGn3UdR09fKsNdvkoFLirxuWY1as8K6R+9UxMIjn9x1HaTv3ejZgu18qRoHXVi/F4kRxgdkj6tHp8cLhE8hvbuVlMIRAQQh+/dhCrBKxa+R+DRiN+Mrxk7jR0CjIIAB+tmA+NmfYl7NBZzbjG6c/wPnaWkEGpRQvz5qNZ7Oy7Bo0jBYLvvPxhzg9XKiNi6EYfue+NXkavj7Rvrgrk9WKH178GIfLSgQZVkrx5ayJ+P6UmXYtG1lZFj+/dh47S/JFMZ5IzsAr0+bZNRFkKcXv71zCG4U3BRgMrJTFhthU/HrGIqgY8ZM0Sin+t/Aa/px/BYyI61gekYDXZiyDg0L8BIpSindKb+PXuZ8KMGzXMS84Bv87cyWclOOT7ViMHhkiMnT8ajF+sfNjzkF1tBQMga+HC3b8YBN8eRIv3a9zOeV4+b0zACCq5oeCIfBwdsTO720WHTPxWXENvv32SVDKHeQ4muHsoMaulzbzLtfcr5uVDfjKv46BZalohoNKiR1f34g4nqWU+5Vf14Jn3zwMs5UVzVApFHj3y+uQJtJQKG3pwBNvHoTRYhGVs4UZDjr+1zNrMEmkoVDT2YPN/9qPIZNJNIMQ4PWtK0VtLwWApt5+bHx7H/r0BlEMAlsA9Z/WPYYlyeJ2M7UPDGLDu/vQOTAkKsnXyDD06xWLsCZdXIB2t06HDTv2o7lfa1cisVcWzcW27AxRbbUGAzbuPYCanh67GN+bNQMvTBbelgnYjJAthw6iuKPDrgR1X500Gd+ZLm6LuNFqwRPHj+BOa7NdjKfSMvHTWXNFGSNmqxVfOnMclxrq7Krxsj4hGb+ft1gUw8qy+PonH+DD2grRDAJgaWQc/r7wcVEGD6UU37v8IQ5VFosk2BhzQqLw9sI1omJYKKX4+a1P8X5ZjmgGA4KJ/iHYuXADNCKNkdfyP8PrxdfEMwhBilcA9i3c8m8zRuwZvx9t371Pt8oa8MtdnwAQZ4QAtqRLnX2D+NrfjsEsompqYW0rfvT+WZsXRCTEylL0Denx4t+PQm80C7avaO7Ed9/9QLSBMMIYMpjw5dePQKsTrsxa19GLb7xz0m6GwWzBC28eRbeIirwtvVp85Z3joo2QEYbJYsWX3z6G1r4BwfZdA0N4/t2jMJjFGSGAzbCzshRf3XEC9V19gu379QY8+/4R0UbICINlKb657xQq2roE2w8ZTXhm5xH06cQZIYDtHaeU4rtHPkRh08NLUaNlsljw7J6j6BwUZ4TcYwD40amPx1wmGi0Ly+L5g8ftNkIA4BcfXcDFqlrh30QpXjxxym4jBAD+cOkKTpeVi2J888xpu40QAHj91k0cKhE3WP7g049wu6XJbsb7hXl4vzBPVNufX7lgtxECAIfKSvB6zk1Rbf9w+wrO2GGEALb36kxtBX5/67Ko9n8vuGGXETLCuNhUg1eufyKq/ftlOXYZIQDAguJWexP+59qHotofqi60ywgBbP1JcU8bvnnlpF1/9+/SI0PkPr1+/Kqkv7OyFJVNXfg0t1Kw7RsfXLNVP5XAqO/oxZnbpYJt3zx707YVUwKjrW8Ax26UCLZ99/xtmCwWuztAK0vRO6jHwWsFgm13XsqFzmSym8FSiiGjCbsvC3e0e68XoE9nkMQwWix47/IdwbaH7hShfWDI7gy5I1VI37wk3JmfLCxFQ2+/3QMrhe1a/v7ZdcG2H96tRGVnt+RMv3+9KNx5XqyqRWFru+SU6q9dvCJYIv5GQyNuNDRKZvzh0mXB96WwvQ2f1tRITtX/hytXBGudVPZ043h5qeRKtH++cRUGC//EplHbjz3FBZIZ/3vnJgZMRt42XfohvFVwWyIBeLvgDrr0Q7xttCYj/jdf+B0fSxTAnrJ8NA3087YzWMz4U544o+hhBsXxmruo6uOfdJhZK36Xd0ESg6UUnzRVoqDr4di0/7QeGSLDqmjsRFFtm+SOgyEE+y/k87Zp7OzDjbIGzrwWQiIE2Hshj7ej7egfxPnCKsmDBaXAvs/yeH+jVmfA6ZxSyQyWUuy/WnAvp8RY0pnMOHqrWDLDylIcvln00Pbj+2WyWHHgZoHkZ25lKU7k3oVWz+1BsrIs9tzIFxwc+RjnSirRNcjd0VJKsetmnuRU6iyluFxZh6Ze/o521+08WWnOcxpbUNnB39HuvJMnK815eWcXClr4vTs7c6UzAKCpX4vr9Q28bXblF8hidOl0+LSmmrfN7qJ8WYwBkwmnKyt42+wrKZQV5GiyWnCs/C5vmwNlxaK9w2PJSlkcLOP3dBytKoFpVOCrPSKEYG85/+Tpg7oyDJhNvG34pCAEu8vzedt82lSFboOwN5mPsasiV/Lff156ZIgM69ClAnnF5ShFYU0rKpo6Odscvlwoi0EpUNPWg4LaVs42R6/Z53ocS629A7heXs95/MTtu7IrU/YO6nGhmLujPZNXBh2PESFGQ0YTzhZwd7Sf3q1Cn4hlKD6ZLVaczOX2Ul2pqkebdpDzuBhRChzJ4fZS5TQ0o6arV/KsFbB1tAdyijiPl7Z1oKilXV4xPkKwL6eQ83hdTy+u10v3VAC2GKE9udwDRtvAAD6trpHHIAS78vI5j/fq9ThZXia7UN7OfG7GkMmEQ3eLZTN28CzPmKxW7CmRbqiP6L1C7smTlWWxszgP9vtv/08UwM6SPFg5+iRKKd4vsW+5ZLRYSrG7NB8mnsnT+6U5kmvTALa6RwerCjHEY8zsKL8jq3iolVKcrLuLPqNeuPG/UY8MkWHdKW+SVVxuRIXV3G6vnCr5DIYQFPAw8mrsC1obSwqGQX4ND6O2Wdb5AVsCq/xabkZ+XYvs8u4KhiCvjuc66lokJ1EaESFAXj0Po14+g6UUOfUPJ2oaUU5Dy7iUqb9dx8NobJFdJdRKKW7yXEduk3yXsZWluNnAzShole71vMegFLfHSJw1ouKODtmGOju85ZdLFT1d0Fukz/BHGEUd7Zy/ta6vF/1G/mUVIVEAtX290HIsz7QNDaJdJ89QFzqP1mRErVaeoQ4A/SYD6rS9Yx4zs1YU97TLMqgAQGcxo4JneSanU37/bvutwjFh/049MkSGNaCT98EBtoFPy3Me7ZC82TdgS33OF0zaNw4MQgCtnvs6+nUGWa5UwLYmOjrb6f3S6o2yjTaWpRjgWTYZMBglL5ncY1BbMCqXtAb57xUA9PI88wGDcVxyBPQJXIfUfDkPnIePYTTKmu2NaIDnnmtlDqwjGjRyz1oHxolhslpt2/vH0Hhdh+1cYz8TLuNBGmPsc40ng8to6jfJ7xOFziUUBzMeDKPVAjMrvCFCjMbzvo+HHhkiw5KaMOx+UQreOh9KiQnDHoTwM1TjcB1C55Ga+Ox+ERDe86gUjORqwPcYhJ+hVDDiq6TxiC8r5ni8V0KM8XgewgxGttFmOw//dcid7QH891w1DsaUIGOcnjkATm+aXC/b/eJ6JuPK4DjXeD0Pfsb4fB8AoOY4l/LfwSDjea/G7/eOhx4ZIsPycZefCpellLeMvK+7s+zB1cqyAgwX2bNKlqXw5mH4uDnLXjah4L9X3q7OYGR+eIQQePGk9OZL9y1WCobw3ytnJ9mDK0MI/Fy5309vZydYqbylAIYQ+Apch5x4BMBm8/m6cF+HmKqrYsR3HqkZWEfLy5GH4TQ+1+Gu0XB6oXycxuc61AoFXFRj55XwcRwfBkMIPBzGzq7rzXMf7RXXuTw1DuPiaQO474mrSj1ug7uPw9jXoWAYuKmkZVsWy/hP6ZEhMqylkxJkGwkqpQKz0riTTy2ZmCB7SYMQgnnpMdyMrDjZAx+lFAszuDNhLs6Ik71sYmUpFmdwJ9Fakh7HGXwmnsFiSTo3Y3Hq+FzHktR4bkaK/OfBUoqlPIyFiTEgMl07LKVYnprAeXxuXPS4zPQf52HMiAyXXcyLIQQrkxM5j08MCYaHiFL2gowkbkaaf4DkdPAjUhCClYncjDgvb0R6eMp66gpCsDw2nnNZL8TNDam+/rIGcQUhWBARDQeO0g6eDo6YGhQqK8ZJQQimBYXBk8PYcVCqsCA0WhaDIQRpPgEIdhk7MRchBI9HJMhiEABRbl6I9eAuXrkyMll2PFiAkyvSvKWnlf889MgQGdbjU5NkLZ0oGIJlkxPh6sTdyS2ZEA9HniJoYhjzM2N5vTfz0mLg4Sy9uq+CIZiWGI4QHw/ONtPjIxDgIb2jZQhBRkQQb7XYzIggRPl5Se5oCQFiA3yQHs79wSUG+SE1NEByR0sABHu6YWpMGGebcG8PTIsOk9V5eDs7Yl5CNOdxfzcXLEiQ19G6aNRYmsxt7Hg4OuDxlARZnjC1UoGVqdyDq7NajfXp8jpaAmB9egrncY1SiS0Z6bIGV0opNqdzp3pXMAyeSM+Qvbtha1o653FCCJ5Kz5R8/hHG9tQM3jZPpWXKMqStlOJJAcaTKVmyvG1WSvFkCv+9eDJJHoOlFE8nZfO22Z4gjwEATydm88Z7bYuTxyAgeDI+e1zivcZTX6xf8x+Um7MDlk1OlNzRWlmKDXO4Ow4AcFSrsGZ6KhgZjI2z+BkqpQIbZ6ZJ7gStLMWmWRm8bRiGYMvMTMkeJJZSbJnJzyCEYNuMTMkx6JQCW2dkCAZxbpsmr6PdOi1T8Hlum5IhufNgCMHmyRmCsSZbJ8ljbMhOFSwWuGVCumQPkoIQrEpLgqtAIb8tWemSr0NBCJYmxsFbYIlnU3qqpPOPMGZHRfJWpgWADSkpkg0qBSGYGByMWG/+mkyrE5KgUSolGesKQpDo44t0/wDedsti4uGu0UhiMIQg3M0d00K4DXUAWBAeDV9HZ0l9FkMI/JycMT+c21AHgGlB4Qh39ZDEIADc1Ro8FsltqANAhk8gEj39JD13ApvnZlUUfxmEOA9fTPQLkfxuKRmC9dH8Y8h/Qo8Mkfv09TUz4O/pKskYeXbpJCSECRdae+GxKQj39ZTE2DwnA9mxIYLtnpo/EfHBvnYzCICVk5MxM0m4tsmWmRnIiAiym8EQgoXpsVjEs2QyojWTUjAtLszuzoMhBDPiI7BqgnBtk8fS4rEgOdpuhoIQZEcGY/MU4SJoc+KjsDIj0e7OXEEIkgL98Mx0/pkYAEyKCMGWien2MxiCKB9PvDhrsmDb1KAAPD9top0E23UEubvh23OF66dEe3vhpdni6qyMZvi4OOPl+bME2wa5ueHH8+ZIYrg5OODnC+YLtvV2csIvFyywm8EQAie1Gr9ZuEiwratag9cWLLHbWGcIgUahxB8XLhU01B2USvxlwWN278oisAW7/nXRMsG/VTIM/rZgma32kZ0MBgR/nb9MMLCWIQR/nbMcSmJ/pg8Cgr/MWc65vHSvHSH404xlUDMKSflE/jj9MbiqhWNAfjvlMTgp1ZKMql9PXgrvL1h8CPDIEHlAni6O+Oe318LXw8WuAXbjnHS8uHKaqLaujhq88fU1CPZ2t4uxYkoSvrN2tqi2ThoVXv/KakQFeNv1si7IiMVPNs0X1emolUr877MrkRDsJ5pBAExPCMevtywR5RVSKhj8efvjyIwIEu19IQTIjgrGn7YvE7VjhWEIfrfxMUyLCRPddTCEIDnEH/+7fQXUAp2T7TcR/GLlQsxP5J+1jWbE+HvjzSdWwVEtvJxHCMGPls7B42ncSx+jpSAEYZ4eeGf7WrgIeCpG9O1507E5W3wFWgUhCHBzxXvb1sLTSdyS4ZenTsRzUybYxfB2dsLOzWvhJzI244msTHxrurhvdoTh5uCAnRvWCXpDRrQ+OQU/mm37ZsW8WwpC4KJWY+eaNYjyFFd4cllsPH41d6HoQVxBCByUSryzYjUSfcQVnpwbEYU/zl9yrwqxkBhCoFYo8a+lK5HhLy4WYWpQGF5fuAIKhhHVn9gKTzL4+8LHMTWI3+Myoky/ILy1cDXUCoUoBhnm/Gn2Y5gXKu7bTfTyw3sL1sNBqRTltRh5br+auhiPRXDHT92vKDdv7Ji/ES5KtWgGAPw4ez7WRdtXPfrfpUfVd8dQ74AOv9134V7tmNGue4YhYFkKL1dHPL98KtbPTrN7xqDVGfC7gxdwLqd8zAq5I2Xi3Z0d8OziSdg2z77S4AAwZDDhD8c+w6lbd221Z0Y96RGGq6MGT87LxrMLJ9m9bGQwWfCnU5dw9GbxvaJ/92MYYsu14aRRYdusLHx50RS7t7SaLBb89cOrOHC9EEaz5SEGGf7/HVRKbJqWjm8umc67xXksWawsXv/kOnZfy4POZAYhDxYlHLkrKqUC6yem4qWlMwWXMkaLZSn++dlNvHc1B4NG0737f78Isc0QV2Uk4X+Wzoazxr5KmZRSvHstB/+6fAv9BuOYDIYQMIRgWUo8frR0Dtwc7QvepJRiz50CvH7pBnp0ek4GACxOjMVPl8yFl4QdMYcKivHnS9fQOTg0Zsl2hhBQSrEgLhqvLJoHf1f745ZO3i3Fa5euoGVggJcxKzICv1i4QLQRcr/OVlbiN5cvobG/f0zGyH+bHhaGX8ybj0iRRsj9ulBXg19evoiavl5exsSgYLw6ZwHivbnjs7h0rakBP798AeU9XbyMTP9AvDp7PlJ8/e1m5LQ145Wrn6K4q4ODYStrn+Ljj59Pn4fsgGC7GUVdbfjJtY+R19nKy4j39MErU+ZjelC43Yzy3k78+MZHuN3RxMuIcvPCTybOw9wQ8ZOUEdVqe/CTW+dwta2O93mEuXjg5ax5WBLGv7Q03rJn/H5kiPCos28QRy8X4cS1EvRodTBbrHByUCMxzA8b52ZgVnqU7BwOPQM6HLtWjOPXitHRNwizxQpHjQpxwb7YODsD8zNi7B5UR6tvSI8TN0pw5FoR2voGYDJb4aBWITrQG5tmpmNRZhw0dg6qo6XVG3DqdikOXitES68WJrMFDmolwn09sWl6BpZmxYua2fNpyGDCqdxSHLheiMbuPhjNFmhUSoT5eGDj1DQsz0yEs4O8Etc6kxlnCsqw/0YB6jp7YRhmBHu6YePkNKzIEo5zEJLBbMHZ4nLsvVmA6s4e6E1maJRKBLi7YMPENKzOTIYHT9CzGJksFnxUWoU9N/NR3tEFvckMtVIBP1cXrM9KwdrMFNnbl81WKz4tr8aeOwUobm2H3mSGSqGAr4sz1mQkY0NmCvwkGAf3y8Ky+Ky6Frvu5CO/pQ06kwkqhQJeTo5Yk5qMTZmpCHRzlcVgKcXl2jrszM1DTnMLhkwmKBkGXk6OWJmUhM3paQj1cJfFoJTiakMDdhXk40ZjI4bMZigIgbuDA1YmJGJLWpokA2Q041ZLM3YW5uFKQz0GzTZj112jwbLYeGxLzUCsF3/ciRhGbnsrdhXl40J9DQZNtsRubhoHLI2OxbaUdCT5CC9RC6mwsw27ivPwcX31vWRhrmoNFoZHY3tKJtJ8+WNbxOhudwd2l+XhTG0FtMNJxFxUaswNjcYTiZnI8guSnSiwsq8Lu8vz8EFdGfqNBrCgcFGpMSMwAk8mZGOSf4hsRq22B3sqcnGi7i76TXpYKYWzUo0p/mF4Mn4CpgWEj0vCQ3v1yBDh0IDeiFO376KooQ1anRFqlQI+rs5YmhWPzEjhl45SKthmyGDC6dxS5Na12BhKBbxcHLE4PQ6TYkLHhaE3mXGmoBx3aprQrzdAyTDwdHHEwpRYTIsJF/RqiGEYzRacK67A9eoG9OuNYBgCTydHzE+Mxsy4CMGoazEMk8WKj0urcLmqDv06AwgBPJwcMTc+CnPjogQ9J2IYZqsV5ytqcLGyFn16W30FD0cHzIyOwIKEGN4kXmIZ1uHB8pOKGvTqdGAp4O6owfTIcCxJiIVGYPlGDMM2WNbjXEUlevV6WFgW7g4OmBIWimUJcYLbXsUwKKW43tiIMxUV6NbpYLZa4e7ggOzgYKxMSICzmt/IE8u43dKMUxVl6NQNwWy1wk3jgMyAQKxKSIKbht/IE8vIb2/F8fJSdAwNwWi1wE2jQZpfANYmJMNdYPuuGAYAFHe240hFCdqGBqC3WOCm1iDZxw/r4lME82OIZZT1dOJQRTFahrTQm81wVWsQ7+WLDXEp8HPiN/JGunYhTlVfFw5VFaFpsB9DZjNcVGrEevhgQ2wqAp35+12xjFptDw5VF6F+oBdDZhNcVBpEuXlifUwaQl08xoXRNNiHQzUFqNX2YNBigrNSjTAXT6yPTkOEq9e4MFp1WhyuzUeVtguDZiOclGqEOHtgbWQ6Ytz4vUxiGZ2GQRypy0OFtgMDZiMclSoEObpjdXg64t2FvUxi3q0e4xCON+SjtL8VA2YDHBQq+Du6YWVoOpI8ggQZYvTIEBml+s5e7LiQg5O378JssYIZdlkRAAzDwMqyiPL3wrZZmVg1OUVSNszmnn68fzEHx2+XwGC23Fu+uZ8R5u2BrTMzsX5KqiQvR1v/AHZczsXh20XQGc33XG/3M4I83bB1WgY2T0mX5OXoGhzCjqu5OHi7CAMGIxQMubdTQjHM8HdzwdYpGdgyJQNOErwcvTo9dlzPxb7bhejXG0YxbP/u7eyErZPS8cSUTNHxC/drwGDEjlt52HsnH91D+jEZnk6O2JydhicnZ8HDzuUJwFZ4bNedfOy+k4+OwaEHGcPPxs1Bg02ZaXh6Upbgbo6xZDBbsDuvALty89CsHYCCYcCytooWIwwXtRob0lLw7MRsScsTJqsV+woL8X5uLhr6+6Ecfs73MxxVKqxPTsaXJkxAsIRv0cKyOFBShPfzc1Hd2wPlsGt6hMFSCrVCgTWJyXguawIiPOz3DrCU4khpCd4pyEF5dxcUhAE7zBhZYlEpFFgZl4DnMyciRoJ3gFKKE5WleKcoB0Wd7Q9cB0MIQG3/+3hMAp7PmIhEb3FxGKMZZ2or8HbxHeR2tDx0HSN6LCIOL6RNkuwd+KSxCm8V38LN9sbhZ2BLNHiPQYEFYTF4IWUSsv2Eg+TH0mctNXir5BauDC8fUAqwoGBAQIjtmc0JisLzyZMxNcD+JRAAuNFej3+V3sBnLdXDz/lBhpVSzAiIwJcSpmBWUJQkRm5XI/5Vdh3nWyrvrQWzoCAgw+MJi0m+YXg2fgrmBwkH4o+lot4WvFNxDedaSu+tPdsYADP8nmV6heCp2ClYHJQoyctR3t+Gd6uu4mxzyb2xYzQj2SMQT0RNxbKQVFmelEeGyH26UdGAb75zEiaLhXfr4UicwcykSPzhiWVwsiPfR15tM1585zj0JrMgAwAmxoTiL08+DldH8QNsSVM7nn/3KAYM/DVYyPD/SQsNxD+eXGlXTpHK9i586f2j6BnUCW6hZAhBnL8P3nxyNXx5sn6OVl13L57ZcRRt2gHBbbMMIYjw9sQ7T6xBoLt493tznxbP7DmKht4+UYwgd1e8u3Utwr08RDM6Bgfx7L5jqOzqFmSMBFO+v3ktYnzFD349Oj2+dOQYilrbBXdGKAiBu6MD3lu/Bsn+4l3jWoMBL5w8ea+IGx9HQQic1Wq8s3o1soLEz5qGTCZ89cwpXG6oE8XQKJX41/JVmBYqLggRAIwWC7710Rmcram89y3zMZSMAm8sfRxzI8QPTCarFT/47ByOVtwdMy5mNIMQgv9dsBxLo8QPTFaWxU+vfYLdZQWiGBTAa7OWYG0sd+6U0WIpxW/vXMS/Sm6NGVswmsFSilenLMK2BPF5Syil+EvhFfy18KoohpVS/DBrLp5LmiR68KOU4u2ym/hN3nnRjG+mzsQ3UmbYNcDuqcrBz3I/vDdQc2nkeT0XPxXfS5tn10aB4/UFeDn3JAiIKMbmyGz8JGMpFHZknz7XUoLv3zkCCvAzQMCCYkVoOn6e8TjUjLRl+0eGyLBya5rxpX8cHjNQk0sMIZgYE4p/vLBKVPxHSVM7nvz7AZitrOh8FApCkBIWgHe+vE6U16KqvQubX98Pg9kinsEQxPj7YNeXN4gKeGzo7sOGf+7FkNEkOleEgiEI8/LAvhc2iQp4bOsfwNo396JPr7eL4e/qgsMvbBEV8Ng9pMPat/eiY3BQPIPYlp2Ofmkr/N2EPQr9egPWv78PjX39onNeKAiBq0aDI89sERVvMGQyYcOeA6jq6raL4aBS4sj2zYgRyEEBAAazGVsPH0ZRW5tohm1HhAIHN21Csp+wwWO2WvHUiSO42dwk+t1lCIGCEOxduwHZgcLBiFaWxQtnTuBCfa1oBoHNRb5zxVpMDxWeibOU4lufnsapqjLR22VHhqF/LVmFhRHc2ZBHRCnFy1c+wv7yQru35P5t7nKsjBa3Y+pXt8/jrZLbdhKA30xbjM1xGaLa/qXgCv5SeMVuxo+z5+FLSZNEtX279CZ+nfep3Yxvps7EN1Nnimq7vzoXP845Yzfj2bgpeDlD3PbtU41F+O7tY3adnwBYH5GFX2QKb48GgPOtZfjGrf0A+I300YylwSn4XfYaSeU27Bm//5/dvjugN+Jrbx0Hy1K70qqzlOJWVQP+ee6GYFuD2YIX3z5mlxEC2FyFRQ1t+PPpy4JtzVYrvvzecRgt4o0QwJaYrLKtC78+eUGwLctSfGXXcbuMkBFGQ3cffnzsY8G2lFJ8bf8pu4yQEUb7wCC+c/hDUe2/feQ0OgbEGyGA7Xn06vT42qFTogq7/eCDc3YZISOMAaMRLxw8LorxykfnUWmHETLCMJgtePbQcVFl6H9z6RIK7TBCANv3YbZa8czRo5yVYe/Xn29cw42mRrveXZZSWCnFsyePiapk+8/c2zhfV2MXg8K2K+r50yfQrdcJtt9ZnIeTdhghIwwA+OrHp9AyqBVsf7iyBPskGCEEwEsXz6Cmv0ew7Yd15ZKMEAD40bWPUNLdLtjus5YaSUYIAPwq5zzudDQJtrvT2SjJCAGAvxZdxqWWGsF2d3vb8NMccf3OaL1TcQNnm0oF29UOdOMHd07YfX4K4GBdLo41FAi2bdX34zt3Dt37O3sYZ5qLsa9W2vtij/4thsjrr7+OiIgIODg4YPLkybh169bnzjx1+y6GDCZJWTMpBfZdzofBxN/Rns0vR8+gXhKDpRSHbxRhQM/f0Z6/W43WvgFJGS1ZSnEqrxTdg/wd7bXqetR29UpiWCnFp3er0Nzbz9sur7EVxS3t0hgsxfWaBlR1dPO2K2vvxM36JkmZOa2UorClDUUt/B1tQ28fzlfWSGZUdfXgel0jb7uOwUGcLC2T9F5ZKUWzVosL1fwdbb/BgIPFxZIZXTodzlRU8LbTmc3YWZgnKTsuO2y4HS/n78xNViveyc+RxKCg0FssOHS3WPC3vJkvrTOmsMXH7L1byN+OUvyz8KakDKYUtmvZeTdPsO2bxTellzQgBDtKcwTbvTW85CNFDCF4p1T4Xr9bdsuuZYn7pSAEb5fdFGy3o/KW5BgJhhC8XSY8md1TcxtUYv5oAuCdimuCE5uDtXdgGY4xkqL3qq6BlVlUU0ifuyFy4MABvPTSS3jllVeQm5uL9PR0LF68GB0dHZ8bk1KKPZfzZZ1j0GDCufxy3jZ7r+RDThFak8WKUzn8He3ea/ky62IAR2/zd7R7b+TLqiFCCMHB20X8jFsFshgKhmDfbf7OfF9OoWzGnjv5vG325xXKLgK2O4efcbCQ/3mJYewUYBy9exdmq1UygyEEO/L4B75TFWXQmc2SGQCwIz+Xt6P9qKYKvQa95PNTUOwozOMtsPhZYy1ahwYkM1hKsbskHyae+32nvRlVfT2SBwsrpThQXgSd2cTZpqS7HfldrZJLGlgpi2M1Jeg3Gjjb1Gl7caWtTnKafiulONdYgXYd9/1u1w3go8YKyRWnrZTiSlst6gd6Odv0GfU42VAimcFSivyeZpT2cU9shiwmHK7Lk3yvKICqgS7k9nBPbExWCw7U3ZFVxqJV34+rHdWS/16MPndD5E9/+hOee+45PP3000hKSsI///lPODk54d133/3cmPm1LWjs6pP8UQO2jvbgNe6Br7ylE6XNHZBZvBUHrnO71hq6+3CntlnWS8RSiv03uBmdA0P4rKJWVhVallIcuF3IOWAMGIw4W1Ihi2FlKY7mFcNkGbszN1osOFZQIpvxQXE5Bo1jd+YspTiQVyy7QNf5yhp0D3F7qfbmF8ouNHa9oRFN/dxeqj0Fwi5dPrGUoqi9HeVdXZxt9hYVyKoOSwHU9PUir62Vs82+kgLZJd7bhgZxtamBm3G3UHbF0z6jAZ/Wc3fm+8rlM3QWM07XcnupDlYWSvYijMjCsjheU8LNqJZ/HQBwuJrbGD9SW2RfLvgxpCAEB6u5v4GTDcWwsNINdRuDwcEabmP9XPNd6K3yDHUFYXCwNpfz+MX2CvSbpRvqNgbBoTphT5gcfa6GiMlkQk5ODhbcV3OBYRgsWLAA169ff6i90WiEVqt94B8paujqk/qT74mlFI2d3Odp7JbPoACau7kHi6Ye/uUOsWrrH+Sc8bX0ae2KoeGSVm/EEMcA3qYdEBWzICS92YJe3dgfVdegDkYOI8UeWVgW7QODYx7TGoyiYhaExFKK5v6x322z1YqOwSHZDABo7ON5t/r7ZRnqI2ro6+M8Vt8nbzJwj9HPzajp7ZVltAG2MY2X0dcju6qqghA0aD9fhpJh0DDAzajV9kqe4d/PqOdh1A/0yp6cMSBoGOT2VtQP9Eqq5XK/KAUvo2GwV7bRZqUs6gUYynFg1A1yxwY1DvXINgytlKJuiH9ZXK4+V0Okq6sLVqsV/v4PJmHx9/dHW1vbQ+1/85vfwN3d/d4/oaGhkrj64RTdcqU3cVurOqM8S3ZERosVLMeXO14MgPtauIwHKRriYOh47qPdDI7fqzON53X8BxkylzLu1yDHfbewLMzjYBgC3NcBAHrL+FzLIA9DNw4MhhAM8ixpDI3DMyGEYIiHwccXzQB4GQNm+UY0pfyMQbNJcszDiFhKee+HziIt9u8BBigGee7HkGV8vnWtmXsZS2c1yXXsAAAG+BgWE8RVCOLX0Di8O3z6Qu2aefnll9Hf33/vn8ZG/qA+LjmqVeMyy3fkySViT54RPmlUSs5MqOPFIACcOLJi2lvLhE8uHOeSkvSMS1y/d1yvg+teCWQWtYvBda8EMqTaxeD4vUqGgUogM65oBk8mVKFsr6IZPBVJnVXynwlLKW/VU677aI8opby/dVwY4L8fbiIquwqJEH6Gi0ot21vBEAIXHobUyrMPMEDgquJ5r5Tj8627q7jTGjgp1OPiMXTjYyjVsg1DAHDhuVfjoc/VEPHx8YFCoUB7+4MBO+3t7QgIeDgboEajgZub2wP/SFGEH386XzFiCEEkz3kifOXVhQBsBkK4jwfn8XBv7mP2KMjTjdPYCfF0l/1RA4CXsyOnwRHo5gqVhGy1o+WsVsGLI0Gbj7PTuAx8aoWCM5eIq4MGHgLpwcWIIQTB7mPnElEpFAh0lVc7BRh+tzw9OI9HeHqOy2yMrz5KtKfXuLxbfIyYcWBQAJE8mVxjPb3Hxb0d6c7dn8R5+MhmWFgWkW48z8Pde1xiRKLcuK8j2s1LtjeapRRRbtx5cKLcvOQvxxEgkuc6Il29ZS9jKQiDSFfu64h09YZlHBjRrtyp5SNdfMZhWZFBlKv9GYLt0edqiKjVamRnZ+PTT/9vvzfLsvj0008xderUz42bFh6AKH8vWR0tSyk2TE/nPB4T4IPUsAB5O1oAbJzGzQj2cseUmDBZHRQhwOap3AxvFyfMS4yWtduEIQQbJ3FXIHZx0GB5aoK8HS2EYF12KmeSObVSiXUZybJ3zaxMTeT0fDCEYFNWmrxdMwzB4oRYeDlxZ7zdmimTQQhmRoYjiKcY3NZ07ndCjBhCkBUYiGgv7s58a2q6rAGDAIjz9kGaH3d9ja0p8hkhrm6YEsy9DLwlKV12Z+7t6IS5YZGcxzcnpMlmuKjUeCySO4vrptg02YOrWqHEiqgkzuPrY9LGxUhYG8WdKXZNZOq4GDsbojM4j68IS4aKkVds1EpZbIzizka7KChRtufFSllsjMzmPD7TPxZeannFLa2UxcaICbLOIaTPfWnmpZdewltvvYUdO3agtLQUX/nKVzA0NISnn376c2MSQrB5ZoYsh5SbowYL0/mzIW6eniHro3NQKbE8K4G3zdapGbI6KAVhsDo7mbfNlsnpsnabAMD6Can8jEnyGFZKsWlCGm+bTdlpsnfNbBZgbMxMFZWQjI+xNYvfCFifmiLLiLZSim2ZGbxtViUmChb84xNLKbZn8qf8XhYbD1cZSw4UwJPpmby5HOZHRsNHoMCckJ5Iy+Q1/KYFhyHU1V3yM2EIwfbkDN5MzRm+gUjw9JHcISsIweaENDgoub2CcZ6+mOAXLNnIVRCCNdHJvEs8oS4emB0UJXnypCAMHgtLgI8jd9kIX0cXLAlNkJVHZHZQNIKduTMcu6odsDoiTTKDAcEEn1DEunN7EhyVKmyIyJKecwUE8W7+SPXkLregYhTYFDlR8nIZARDi5InJPtxG9HjoczdENm7ciNdeew0//elPkZGRgfz8fJw9e/ahANbx1vLsRHg4O0j+6LbNzoJaoGrqorRY+Lk5S5qFEwJsmpYOJ4HYhlkJkQj1cpfEYAjB6gnJgvVmJkeFIj7ARzJjSUocAgRqwaQGByArNEgSQ0EIZsdGItKHfzksxtcbM6PDJX3YCkIwISwYyYH872WwuxuWJMRKeq8UhCDR3xcTw/jTlns7O2FNarJkRoSnB2ZHRfC2c9VosDU9XVL3pCAEAS4uWBzDb6hrlEo8lZElicEQAi9HR6yI4zfUlQyD5zKlzdYYQuCiVmNdAr+hzhCCL2dOkjSxYWBLib85kd/AJYTgK+mTIcVfMVKw7IlE4VowX06dImvy9FQi9+x7RM8nTZI8eWIpi2eTJgq2+1LCZMlJtqyU4rnEyYLtnowV/h1cYkHxXIKw139L1AQwRJqZwILiufhpgknX1odnQ61QSk6W92zsdFnF78To3xKs+rWvfQ319fUwGo24efMmJk8WfgnkytlBjX88vxpKBWNXh84QgtnJUXhuoXC9A41KiTeeWwO1Umk3Y2J0KL6xdLpgW6WCwZvPrIaTWm3XIM4QgpQQf/zg8TmCbQkheH3bSrg7OtjFUBCCGD9v/HyVuJoKf924HL4u9hluCoYgxNMdv1+7RFT711Y/hmAPd7uMEQUh8HN1wd/WLRfV/lfLFiHGx8tuhqeTI95cv1LUR/3K/LlICfCzm+GiUeOddauhEBGM+r2ZMzEpJMSud3ekKN17a9ZAI2CoA8DXJ03F7PBIu78PFaPAeyvXigoQ/lLmBCyLibOro2WIrWrqO8tXw9NRuDDklsQ0bEiwz1M1UoDyX4tXwd9ZuIbRqpgkPJsiPNA/xADw93nLEebmIdh+QWgMvpE+zS7GiF6bsQzxnsKxAtMCI/By1lxJjFcnL0aGj3BBxXSfIPxiorg+YbR+kDEPU/0jBNvFufvhd5Mel8T4WtIMUVV4w1y88OeJawHYnxrlyZjJeDyU3xMNAH6ObvjbpI0gxL79MwTAmrBMrA+3752Uoi/UrpnxVkpYAN76ylo4aVSCg99IR7kgPRavPblMVEcOAHGBPnjvxfVwc9SIYNj+d0ZCBP736ZVQKcW5xsN9PLHryxvg5ewk2KGPHJ0QGYJ/PbsGDiKK6gFAkIcbdj+3Ef5urqIYBEBKSADef3ad6B0rvq7O2PPsRoR6eohmxPr5YPczG+AuoqgeAHg4OmDPk+sR4+st6qNjCEG4lyf2PbUR3iKK6gG2HS87t65HcoDfvd8pxAh0c8W+7RsRwBO3cb8cVEq8v34NskNs3hMxDB9nZ+zfspE3SPV+qRUKvLVqFWaG24q+Cb3xCkLg7uCA/Rs3Is6HO0DufikZBv9Y9jgWRsXc+51CDBe1GnvWrEcqT2zI/WIIwZ8WPobVCUmiGQ5KJXauWIuJQeLK2xNC8OtZi7A1KV00Q61Q4p0lqzErNEIUAwB+PHkuXkideO8cQgwlw+D1+SuwJEJ8hd9vZ8zAtzKmi2YwhOCPM5ZhdTS/5+h+PZ80CT8cNkaEljcUwx6BX05ejG1xwl6dEW2NzcIvJi4GEckAgJcz54nyhoxoVXgq/jBpxXAhRiGG7V5+M3kWvpk8WzRjYXAC/jJ5HRSEEc34Utw0/CB1kWjGdL8YvD55M9SMUpAx4p/ZGDEBr6Qv/9y9IcD/49V3R9Taq8WeS3k4cr0YQ0YTlAwDltLhgCcCK8siNSwAW2ZlYGlmAucOEz519A9i79V8HLxeiAG9cRTDFhuQEOSLLTMy8Xh2IpQSdpF0D+qw91o+9t8oQJ/O8H8MACC264j288K26ZlYlZ0MtUhD53716QzYdzMfe28WoHtQd48B2DpfC8siwtsDW6dmYl12iqjqwaM1YDBi/+1C7L6Zj/aBwTEZIR5u2DY5ExsnpMJRwvZfncmM/TmF2H07H8392jEZAW4u2DYxA5uz03i3oXLJaLHgQF4Rdt3JR31v35gMXxdnbMtOx5asdNHG1P0yWa04UlSC93PyUN3dAyXDgFLbhrwRhpejI7ZkpmF7Vga8neyPl7CwLI7dvYv3c3NR1tU1JsNdo8GW9HQ8kZEBPxfh2f1osZTiZHkp3i/IQ2F723Bn+CDDRa3G5pQ0PJmeiSBX+793SilOV1Xg/cJc5LS23Ou0KWydq4WycFKqsCEpBU+nZyHM3UMS4+O6arxblIMbLY0PMoa/QY1CiXXxyXgmLRtRHtJ28F1orMG7xTm43Fw3bPTYyr/bytmzUDEKrIlJwrOpExDnKc4oHK2rLXV49+4dnG+qvmdYsdQ22LGUgmEIVkQm4pmkiUjxlraUfqu9Ee+U3sbHjZUAsT0H6/B1jPSPj4Ul4NmkiaI8IWMpv6sZ75bdxoeNpaB0+DmMMEABCiwMicPTCRMxyS9MEqOktw3vVdzEBw0lw797+B4RAoCCpRRzAmPxdNwkTPOXFk9R0d+BHVU3cLKxCGbWCgVhYB3FmO4XjSdiJmF2QKwkRu1AF3bV3MCJhnwYWQsUhLl3PYAtMHWSTwS2R03B3IB4WUaIPeP3/y8MkRHpTWacy6tAUX0rBgxGqBQK+Lg5Y2lWPBKC+UuaW1kWJosVDiol78MxWSz4qLASebUt0OoNUCkU8HJxwpL0OKSEPbxleTTDaLHCUZBhxfm7VbhV0wSt3gglQ+Dp7IRFqbHICAvk/VuxDIuVxcXyGlyvbkC/zgCGsS0tLEiKwYSIYN6/ZYerwAoxrCyLy1V1uFRZh369AQQEHk4OmBcfhSmRYbwGIUspDBYLHASWxVhKca2mARcra9CrNwCUwsPJEbOiIzAjOpzX80WprSiaEINSilsNTfikohq9OlsRRHdHB0yLCMPc2Cgox4mR29yCsxVV6NHpYGUp3Bw0mBIWioWx0byBkDaGGRqFUvB6C9vacLqiAt06HczDBsjE4GAsjo3lXYqhlMJgtUDNKAS9iSUd7ThVWY6uoSGYrFa4ajTICgzCstg43mDLEYaKUfDeUwAo7+7CifJSdOiGYLCY4aZxQJqfP1bEJfLmarGHUd3Xg2MVd9E2NAC9xQI3tQZJPn5YFZvIm5eEUgqj1QIFwwjuzKjX9uJIZQlaBgegs5jhptYg3ssXa2KS4K7hN2yNVgvIcIwKn5oG+3G0uhiNA/3QWUxwVWkQ4+GNNdEp8HLgN2yNVgsIIVALXEebbgBHqotQP9CHIYsJLio1It28sDYqFb48gamArV4KYNuxw6dO/SCO1hahdqAHg2YjnJUahLl6YG1kGgKc+D2RZtZW3Vyj4J/09Bh1OFZXiGptFwbMRjgr1Qhx9sDqiFQEO3uMC6PfpMeJhkJUajuhNRvgpFAh0Mkdq8LSEObCb9haWCsslIWG4e97B80GfNBUhLL+NgyYDXBQqODn6IoVIemI5NkObI8eGSLjpIaePhy4XYhjeXfvpRZnCEFioC+2Tc7AkpR40UsfXGru0+JgbhEO5RXfqz/CEII4Px9sm5iOZSkJshOCtWkHcTC/CAfzitA5OHRv9hbl7YltEzKwIiWRM8GWWHUNDeFgQTH2FRShTTtwjxHm4Y5tWelYk5IEN5k5OHp0ehwqLsa+wsJ7KcoJgBB3d2xNT8e6lGRR6/186jcYcOTuXewuzL+XopwACHR1xebUNGxMSYWPBK/D/RowGXGs7C52Feajpq/3nlfLz9kFm5NTsSk5Df4SvA73S2c24URlGXYU56GypwvWYYaPoxM2JqZhc1IagiV4He6XwWLGqZpy7CjORWlPx70ARS8HR6yPS8HWxAxRcQt8Mlot+LCuHDtKc1HY1XaP4aF2wJqYZGxLyEQUT34OMTKzVnzUUIn3y+4gr7PlXm4HV5UGq6OSsS0+E3Ee8vIoWFgW55ursKP8Dm53Nt7LbOuiVGN5RBK2x2YhyUteAL+VZXGprRo7K+/gekc9zMO1UpyUKiwNScS2mGykeUvzOoyIpRTXO2qxq/o2rrRXwzTMcFSosCAoHlujJyDTK0TWTJpSittdddhbewuftVXCyNoMEQ2jxOyAWGyJnISJPhGyGQW9jThQdxPn20thGK75omaUmOYbg00RkzHZJwqMzLwrJX1NONRwA5+0lUBvtWVrVREFJnpHY2P4FEz1jZWd26VC24KjjTfwcVsBdFZbBlQlUSDDMwLrwqZhuk8ClDK3IkvRI0NEptq1g/jx8Y9wpap+2BX64C1iht1yLho1vjx7Mp6Znm33R9E9pMNPPvgE58ur77n57pfNGWfLSvrs1Al4cdZku3dR9OsNeOXDT3G2rBIAOBkOSiWemJSJb82eJjgLHK1Bowk///g8Tt4tA+VgALZEXVsy0/D9OTPt3jaqN5vx6oWLOFJSAis7djlrAkDBMNiQkoIfzZkNBzuTmxktFvz2yiXsKyq6V5V2NIcZDvZalZCIn82dZ3emVQvL4rXrV/B+Qd69Wd5YDAB4LCYOv5q7AG4Cs97RYinFX+9cw1v5d6CzmO8949EMSikWRsTgN3MWwdvO7a+UUrxRcAv/yL+BQbMthfTo7I0jrvfZoZH43cwlooI1RzPeu5uDv+ZfQ7/JcO+bG82wUoppgWH4/YylCHHh3o7JpX0V+fhD3mfoMep5GRP9QvC7aY/xJvPi0rHaYvwm9zw6DUNj9icj/y3DOxC/nbIM8RKMnrONZXg17yO06Qc4GAyslEWShz9+PXEZUr0C7WZcaK3Eq/ln0aTr42XEuvni1axlyPK2v0TH9c4a/CL/A9QP9dw731iMCBdv/DR9Gab4RtnNyOupx6uFJ1E92MHLCHb0xA9SlmGWf7zdjNL+ZrxadAzlA61jM0BgBYW/gzu+k/gY5gdw503hUvVAG35dcgR3tY1jMhjYlqa81a74WtxjWBIkPv5mPPTIEJGhms4ePPneIfTq9KJzUqzOSMIvVy0SHVvS2NuP7TsOoWNgUPQ2t8WJsfjjmqW8Lvj71aYdxBO7D6Gxr18UgwCYFR2B19c9LrhteUTdQzps338YVd09orYEEgCTwkLw1tpVor08WoMBTx4+guKODlEMhhCk+vtjx7q1cBUZ9zFkMuHZE8dxu7lJ1BZNhhDEeXtj99r18BLpgTFaLHjh9AlcaqgTxVAQgnB3D+xdvUG0d8RsteLrH3+As7WVotorCEGgiyv2rdiIUDdxg7iVZfG9S2dxtJK7Autoho+jM/Yu24hokfESlFL85MbH2F2WL5rhrnbAniUbkejFv8R6P+O3uRfxZslN0QxnpRo7F260K5bhb0VX8OfCy6IZGoUS787ZgMn+4mMZ3i2/iV/lfyKqLQMCFcPgnzPWY1ZgtGjG/ppcvJJ3GsDDhu1YDIYQ/HXyWiwM5t9+fb9ONRbi5dxjoBQPGbajRWALIP5N1mo8Hsq/Nfp+fdp6F9/PPQCWUow9pXmQAQA/SV2JteHit4hf66zEd3J3w8JaBRkjeinhMWyNFN5FOaK8nhq8lPc+TFazaMbzMQvxdNR80Qy5smf8/n9614y96hwYwjPvH0HvkHgjBACO5d/FHz66JKptr06Pp3cfQcegeCMEAD4qrcTPz5wXlUxr0GjEM/uOiDZCAFvncqmmDt8/eU7UgK83m/Hs4WOoFmmEjDBuNzbjmydPc1YDvl9GiwXPHz+BEpFGCGDzCBS3t+PLJ07AZBWuxmthWXz19Ae409IsOk8ESykqu7vx7PFjMIgousZSim9/dAaXG+tFM6yUor6/D0+ePMJb9G1ElFK8/NlHOCfSCBlhtA4OYPsHh9BnEFcq/NUbF0QbISOMLv0Qtp05iE6duKrCf8y9ItoIGWH0mwzYdu4gmgfFVez+Z8lN0UbICGPQYsITHx9ArZa72un92lWRI9oIGWEYLBY8c/Egyvs6Rf3N0dpC0UYIYMs9YWKteOHKIRT2tIj6m3PNpXgl7zQohI2QEYaVsvjmzSO43dUginG5vRIv5xwDS6mo2igj3teXc47hSnuVKEZOdx2+l3sAVsqKGrxHrvcXRSfwSau4d76krwkv5e6yxYPYkXnmT2Vn8EFznqi21QNteCnvfRjtMEIA4F9VH+NIw8NV778IemSI3Kc/fXwZXUNDkpLxvH8tF4VND1cUHq2/f3YDLX1auzOAUgCH8opxo064EOCb126jprvX7uugFDhTWoHzFTWCbd+/k4e77Z12M1hKcaG6FqdKywXbHigqwp3mZrsZVkpxo7EJh4qKBdueKCvFpfo6u5M8WSlFYXs7duTnC7b9qLoKH1ZXSmJU9nTjzdzbgm2vNNXjcHmJ3Um3rJSiUduPv94R7qBy21vwfkmunQQbo0M3iD/cFjbWy3s78fdC+ztLK6XoM+rxy1vnBds2DvTh97kX7WawlGLIYsJPb34k2LZTP4if3xFvINxjwBbI+vLNDwXbak0G/PiOcLvRogAslMX3bp4SnNjoLWa8fOeUJAZLKb53+7jge29mrXg555ik4mwUFD/IOXovHoZLLGXxo/wjw4aOfSIAflpw7F6MB+dvoRQ/KzrCuXwspF8VH8eAWXhC8Ju7R2CymiXdrz+Xn0K3cUDCr/t89cgQGVavTo8PCsslpwhXMAT7bhXwthkymXA4v1hy1kEFQ7Dndj5vG5PFgv25hZKzJyoIwe47/Awry2JXbr5kBkMIdubwW/+UUuzIFTdDGEsEwI68PMGOdkd+nuTsuxQUO/PzBb07OwrzJKdxZinFnqICQe/OjmLpDCulOFBWBJ1AKfpdd/MkB9ZZKcXxqlL0G7lLlgPArjJ513GuoRIdukHedrsrpD9zK6W40lqH+oFe3nb7qwoklwKwUoq8rmaU9nbwtjtSVwjTcCCnvWIpRZW2C7ndzbztTjeVYNBilDSwsqBo0fXjajv/xObT1jL0mHSSGBRAj0mH861lvO2ud1ajVd8n0dgBhixGnGvhn9gU9DWgZrDDLi/F/TKzFkGvSOVAC0r6GyUzKKU41Sw8sfl365EhMqxjuSWyCkJZWYrTRWX3dteMpVNFZTCYpXUcI4xPy2vQruXuaM+VVaHfYJTOoBTX6hpQ18Pd0V6sqUXHoDg3+1hiKUVhaztK2rk72ptNTagb3rUiRRRAdU8P7jRzd7SF7W2iY0+41Do4gEv1dZzHq3u6caO5UVa9oF6DHh/VcLufWwa1+LSuWhZDZzbjVBW3l6rHoMOp6jJZ34iZteJwBXdnPmAy4nCldEN9RPsrCjmPGawW7KvIl8VgCMHeinzO4xaWxa6KHMmDBWCbEOyp5PY+UUqxo0LegKIgBLsr7/Azqm7KqnukIAS7q/l/556am5JroQC2mJQ9Nbd42+yruyFrdwoDgr21/J66g/XyGACwv/46rwF7tPGmLAYLisMN12ER8CD9u/XIEBnW6aJyyA3bNVtZfFZRy3n8TEm57NLrlFJ8Us49KH1YWiG7LDpDCM6VcccafFhWKbtkuYIhOFvOwyivsHsHz2gpGQZnKiq4GZWVsjsOBSE4U8lzHdXy7xVDCE5XchsJ52qqYH+C6AdFAJysLOU8/kl9teyS5RTAqWrumevlljoYrNINdcBm5J6ouct5/GZbA7Rm6YY6YDPWT9RyxwwUdLeg0yDdUB9hnKzjvo7y/g40Dkk31EcYHzaWchriTbo+lPd3yGZcbKu6tz12tHqNQ8jpbpBltLGguNNdj17j2PdcbzXhSkelLCOaBUW5tg3NurEnaCxl8WmbvMksBdCk60HVYDtnm4/b8mVXUO42DeCutknWOcZbjwyRYXXJmOGPiCEEPYM6zuOdA0OyPmrAtkV1JN/IWOoYHJJdhpshBN1D3J4dqXE094uAoEfHfR3dep2ogFY+sZSih8dD1a3TQVz4HbeslApeh9wUySylvIGe3XqdpGKC94sC6NTzX4dcgwoAOvX81zEeyaS7DTzXwXPMHvXyBPeOF2PAbOT8BsaLYaYshjgMsx7j+DAobPE7Y6mbw3iQoh7T2L+336SXtCQzJoPj9w5ZjLDQ8fEycBlUFtaKIYs8I3pEfSb+5ct/tx4ZIsOSUz5+RITY3LJc4jtmjyxWHgbPMbsYn/N1UFD+65AY8PUAg1Le3yp3ZjEiE4+bc7yeuZknRsRC2XEZwC08DCtlYV/JrLHFF1RoGScG77s7Ts/cwmOIjxeD71zjyTBzMcbp3bUxxn7u4/UNAuBcbhjf5/GfY8jvEe9jjOOzHQ89MkSG5eZof62R0bKylPc8HjKzfgK22bEbT80STyd52UtH5O7Afx1yJ8cEhDfTqrvGYRyWfxi48VyHu8ZBtreCIQSeDtzPVSgNt1h58SQdc1NrZHvBAMCD5zrc1JpxGTQ8eO6Hm1ozLp2tG096db5j9shFxZ3Mzk01Pgw1o4CGI625u3p83isAcFONfa7xZLirxn633Dj+uxRxnYvr+saT4aocP4YrB0PNKKEi45MhlYvxn9IjQ2RY06LDZbu3AWBiBHc1z6mRobLjN1hKMSmcmzE5XD7DwrKYGMbNmBQaIndFAxaWxaTQYF6G3OUfC8tiUgjPdYSEyJ4ZsJRiYjD3dUwOls9gQDA5mPs6pgSFyr5XDCGYHsKdRGtKYJj8ZUVCMD04gvP4JP8Q2f4QBSGYEcTNyPINlv19KAiDaQHhnMfTvAMFa68IMwhvUrN4dz84K+WVZWAIQYZ3MGcsVriLF7zU8soZEAAxrj5w5TDO/B1dEeRof0bc0QpydIe/49j1ZFyVDohy8ZX9bnmqnRHm7D3mMSWjQIp7qKygWwBwVmgQ48qd6j/TS37aeTWjRIIbd5/1n9AjQ2RYmyamyVqeYQjBhPBgRPuO/aICwMbsNMlb+mwMIDHAF2nB3MXz1mekyPoUCIAwD3dMjeBOz7w6JVFSZd/75evsjLkx3OmZl8XFwcXOFOqj5abR4LE47vLoC6Ki7U5vPlqOSiVWJyZxHp8eGo4QNzdZz4RhCDYkcaeAzvQPRLyXj+yOdstwifuxFOflgwn+8gZxK6XYlpjBeTzU1QOzgiNlecKslOKJBO5U1n6OLlgaFi+TweLJhGzO425qB6yOTJEVCG2lFE/GcTMclSpsjMqQdR0spXgydiLncRWjwObobFnPnAJ4ImYyp+eRIQy2RE2StSRHQLA1ajLnAE0IwZbIqbIMaQYEG8Mn8RYp3BQxVfZOqVWhE+Co4O731oVOBSvDM6kgDJYGZj3yiHxRFePnjexw6R0tSym2TcngbRPs4YbZsdI7WpYC2yfx1wvwdnbC0sQ4WR3UE5MyeZcsXDUarElJksxgCMH27AzeXTEOKhU2paVKZigIwZb0NN6KsUqGwfb0dMnPXEEI1iUn8xpMDCF4Ki1L0vlHGI/HJvAuzRBC8HRqluQuUEEI5odHI9CFv0Lpk8lZsnLHTA0KE0zz/lRilmTvDgOCVG9/pPjwV7l+MiFbMoMAiHTzwmR//joq2+KyZC1l+Tu6YE4Qfwr2LTHS7xVgKxy4OIS/jsrGiCxZ3k9HhQqPh/HXUVkTnimrv1ISBqvDM3jbLAtOg4NA1VshrQnjT/M+3z9Z1lKTlVKsC5vE22aabwJ8NPzfKT+DxZrQKZL//vPSI0PkPr20YLqk2AcFIUgN9se8BOHaDd+YMxUKxn4HnoIhiPPzxrJk4QJML86cDLVSYfe1KAhBmKcH1qQlC7Z9fvJEOKnVdg/iCkLg5+KMLRnC9SGeyc6Gu4ODJIaHoyOezBQu8rQtPQO+zs52d4QMIXBWq/FclnANig3JKQh1c5fE0CiU+OrEyYJtV8UlItbT224Ggc0g+9bEaYJtl0TGItXHXxKDAcF3J8wQbDs7JAqT/UMkD0w/mDBHsM1EvxDMDY6SbID+MHuuYGxRilcAlocnSnbV/zBrHhQC29cjXb2xMSpDsi/he2nzOGNQRhTg5IanYoXfPy59M2mO4BKSh9oJL8TPksx4Pn4mPASWkJyUGnw1XnqdlW1R0+DvyF8vRa1Q4uvxiyWdnwBYHTIBYc4+vO0UhMHX45ZJZBAs8E9DnJu8Csyfhx4ZIvcpKzwYv1uzxFZQSeTfKAhBsKc73ty2WlRBuuRAf/xl3TIwDBHdESoYAj8XF7y9dQ0cVMIF6WJ8vPGP9SugZBjxDELg6eSId7esgYtGeEkk1MMdb69bCZWCET1oKAiBq0aDHRvXwoMn4HZE/i4ueG/tGjgolXYxHFUqvL92DfxEFIvzcnTEztVr4axW28VQKxR4d+VqhLgLr2+7qjXYtWodPBwc7WIoGQZvLV+FaE/hYnEOShV2Ll8HPyfxRhVDCBSEwT8WrUCyj3CxOBWjwHtL1iLEVbxRNVKc7C9zlyHbX3hdmiEE/5q/BtHu4o2qke/1tzOWYHoQd+zGvfaE4O+zViHZy99uY+SViQuwMDRWVNvXpi5Htm+I3cbI99JnY0WE8GQAAH6evQQzA6LsNka+kjgNm6LFVWP9Xup8LA5OsJuxPXoinhZpxLwYPxsrQ7mXBrm0KiwDL8bPFvd7IqdhU7h9RhUBsCAgCd9KXCSq/ZrQiXg6StzvuZ8xxScW/5O8QlT7RYEZ+HKMfQYPAUG6RwR+nLLerr/7d+lR9d0x9FlFLb598DQMJlsSnrFukIIhsLIU2eHB+PvmFfCwc7fK9doGfP3gKQwYTWOWar+fkRrkj39uWgkfF2e7GHlNLfjywRPo1Y9dRh34vxLkcb7eeHvTagS42ef2K25rx3OHj6NzSCfIiPD0wLsbViPMw8MuRkVXF545egytAwOCjGA3N7y7ZjVivLljdcZSXV8vnj52DPX9Y5c4B3CP7efsjHdXrkaSn7hKryNqHtDimZNHUdHTLcjwcnDE24+vRmaAfeXaO3RDeOb0URR3tXMyRt43N7UG/1q6ClOC7CvX3mvQ47mPjuFOe7Mgw1mlwt/nr8DcUPvKtWtNRrx44TiutNTzMgBAo1DiL7OWY0kEdzzQWNKZTfjG5ZP4pKmKkzHCUTIK/H7aUqyOsq9cu9Fqwfeun8ap+ru8jJGKtT+fuAhbYu0r125mrfjJnQ9xqLZAkEEI8IP0+Xgm3r4B2UpZ/LrgI+yqvs3PIASUUnwzaQ6+kjDDrl1pLGXxl7vn8U7lFTA8DMXwN/Js7Ax8K2meXcGblFK8VfUZ/lF+HgTcW2IVhIGVstgcMQXfS15qV7wPpRR7667hL2W2OkBCjJUh2Xg5eSVv/MlYOt50E6+VnuAtFDjCWBiQjh8lr4NG5vKUPbJn/H5kiHBo0GDEycIy7L6Rh9quB7PpMYRgUVIstkxOx4TwYMlbQHUmMz4oLsPOm3mo7Ox+iDEvLgrbJmZgSmSoZIbBbMGZ0nLsup2PkrYHU6oTALNjIrFtQgZmRIVLdlUbLRacq6jCzpw85Lc8XPhvengYtmdnYG50pKC7mUtmqxUfV1VjZ14ebo+Rtn1ySAieyMzA/OhoUZ6psWRhWVyorcGO/Dxca3y4uGBmQCCezMjE4pgY3tgTPllZFpca6rCjMA+X6use6j5S/fzxZFomlsXGwUEprdNgKcXVpnrsLM7DJ3XVDzESvHzwdFo2Ho+JhxPPNlQ+UUpxs60JO0tycbbu4YJ+0e5eeColC6tj+GNohBi5HS3YWZaL07XlD+VqCHf1wFNJ2VgTnSxrm3RBVyt2lefgRG3pQ3lOgp3d8FRCNtZFp/Fu0xbS3Z527K7MxdGaIhhHMfwdXfBk/ASsj06Dj4N9k437VdHfib1VOThcWwj9qEym3honbI+dgA1RGZy7S8SodqAb+2pycKguD0OWB2sTeagdsSUqGxsjsxDoJH0nTONQDw7W5eBg7R0MjErg5arUYEPkBGyIyEaos7CnkEut+j4cqb+Dg/W30D+q0JyTQo01YROwPnwiIlz4l0r41GnQ4ljjHRxquIEe04NJyhwYFVaGZmNt6CRE8+ySEVKPcRCnmm/jcOM1dI0qZqdmlFgalIU1IVP+I8sxjwyRcRSlFKVtnejQDsJoscDVQYM4fx9e70Tn0BAKWtugNRqhZBj4ODthYnAw5wBJKUV5RxfatYPQm81w1WgQ4+cNf1fupYVunQ75bW3oNxigYAi8HW0MvgGyqrMbzf1a6M1muGg0iPbxQiCPB6TfYEBOawv6jDaPipejIyYFBfMOkDXdPWjs12LIZIKrRoNILw/e5Qut0Yictmb0GvQgIPB0dMSkwGDeAbKutxcN/f0YNJrgolEj3MMD4TxeliGzCbfbmtE7nI3SQ+OICQHBcOXJKdHY34/a3l4MmkxwVqsQ6uaOKC/ujk9vMeNWWxN6jDpQassfMsEvmHeAbB7Qorq3B4MmE5yUKgS7uSHWi9uTY7BacLu9ET0GPSyUhYfGAZk+QfBy4F4fbxscQGVvNwZMRjgqVQh0cbXtsOEwOk1WK+50NqJLPwQzZeGudkC6dxB8Hbnf9w7dICp6u6A1GuGgVMLf2QVJXn6cDAvL4k5nIzoNgzCxVrirHZDqFcg7QHYbdCjr6US/yQCNQgE/RxekePtzMqyURW5XEzoMAzBaLXBTOyDJIwBBPANkn1GPkp52aE0GqBgFfB1dkOodwGmgs5SioKcJbXotDFYzXFUOSHD3R4izJydDazKguKcN/cMMbwcnpHkFchrolFIU9TWhVd8Pg9UMF6Vte2e4C/d7Mmg2ori3Df0mPRSEgafGxuCacVNKUdrfgmZ9L/RWE5yVGkS6+CLKhdvjp10uh8YAAK5NSURBVLeYUdTbgj6THgwh8FA7Is0rmHPbMqUUlQOtaNJ1Q2c1wUmhQbizD6JduQOLjVYzinqb0WeyGQoeakekegbzzuprBtvQoOuA3mKEo0KDYCdvxLgEcb4nZtaCor5m9Jt0YCmFu9oRyR7BvDtX6ofa0aBrh85igINCjQAHL8S5ck8WzawVd/ub0GfSwUpZuKkckegeDGcld//Tou9A/VArhqx6aBg1/DReiHMN533fS/ub0GsahIVa4ap0RIJbCFzGMYeKvXpkiPwHRCnFraYm7M4vwNmKh2eIno6O2Jqehk3paQh0lTYjoZQir7UVuwrycbqi4qH8FO4aDTanpWFLapqo2AUuFba3YVdRPk6Ul8I8iuGiVmNTciq2pqQjwoO7wxXS3a4O7C4uwJHyEhhH1RdxUqqwITEV21LSEeNp3xLL/ars7cLuu/k4WF4EveVBhoNCibVxydienIkEL1/JjJr+Huwuy8eBikIMjqpeq2YUWB2dhO2JmUgV2MnBp8aBPuypyMfeynxoTQ/OEJUMgxURidgWn4lMH+4OV0gtQ1rsq8zDnso89I5Kx60gBEvDErA9PhsTfUMkMzr0gzhQnY/dVTnoGlWLhQHBgpBYbI+dgKl+3B2ukLoNQzhcl4/dVbfRbnhwhkgAzA6IxbaYCZjhHy3ZA9hn0uNYfR52V99Ci77/oePT/aKwNWoSZgXESt7CO2A24FRTPvbU3ECjrueh4xO8I7Alcgrm+MdDKTFnyZDFiA+bC7Cv/jpqBzsfOp7uEYZNEVMwLyAJKkaaB9BgNeGj1gIcrL+GqsGHvaWJbsHYED4N8/xTJS8bGK1mXOgowNHGqygbeLiGSoxLINaEzsAC/ww48BgYfDKzFlzpLMTx5su4q6176HiYkz9WBc/EfP9sOElMbmZhrbjZU4QPWj5Dcf/D9cSCHHyxPGg25vlPgrPyi7X9diw9MkT+zRowGvG1k6dwpb7hXlzHWBrp+H4ydw6eyLJvHVhvNuNbH57Bx9XVvGu0I+un35sxAy9MmGhXh260WvD9T87hZEWZIMNKKb4+cQq+PXmaXQyz1YqfXv4U++4WimI8lz4BL0+bbdegYWVZ/OrGRbxbnCOKsTUxHT+fvsCuInuUUryWewV/L7guirEmOgm/m7EUajuWjSil+EfxDbyWd0lwzdxKKZaExeEvM5bbvaTzXtlt/PLOpwAB5/bckbXm2UFR+PvMVXCxM3vowep8/PjOWVBKBdfMJ/mG4p8z18FdbV9n+0FDMf7nzklYWZaHYbtXaZ5B+NeMTfDS2LcU8mlLGb5z+whMrJVnXX447srND29N2wY/O5dCrnVU4aU7+6G32gzbMePHQGAFRbizN/455QkEO9k3KcjpqcVLd/ZgwGLgjFFjQMCCItDRA69PfBIRLvYZ7Hf7G/FSzvvoM+tAQMa8XyMMH40r/pz9NGJd7YuJqh5owXfz30G3SXvvXKM1wnZXOeP3Gc8i0c2+mKhGXQdeLvgn2o29PIzhmCilI15NeRapHsI7KO9Xm6ELPyt+A836DjBgwOLhrd/3YqIYDV5OehZZnol2Mf7demSI/Bs1YDRi0/4DqOjqtivHwrenT8PXporbz603m7H18GEUtrfZxXh+wgT8YKa4bXEmqxVPnzyC601NdhWI2pqSjlfnzBdljFhYFi+cPY7zdTV2pSZYFZeIP81/TJQxwlKKb50/jZPV3JVkR4sAWBgRgzcWrBQVw0IpxctXz2EfT7n5sRjTg8Lx/qJ1ooPSfnXnPN66K77UO0MIsnyCsHvhRtHGyF8LL+MvhVfsYiR4+OHgom1wFhlf8nbZTfwm/1PRDAUhiHD1wqEFT4g2RvbX5OCnuWc4B9WxGIGO7jg472n4OAjvrgKAkw2F+EHOMUA0g4G3xhkH5nwJAQJbP0f0aetdfOfOAQDiaosoCAM3lQN2z3hedMzE9c5KfOPOLl6j8EEGgaNCjfemPi86niG/txbfuPMuLKxVFIMBgUahwhsTn0eCu7isn2XaRnwj958wWc2iGUpGgT9lPo80j0hRjLqhVnwr92/QW01jGgdjMQgh+GXqc5jglSCK0aLvxPfy/4hBi14Ugwynf/tB4rOY6mP/TqN/l+wZvx9t35UhSim+dvKU3UYIAPz56jWcLBU3WH7n7Fm7jRAA+NedO9hbKG6w/NGFj3G9qdHuKpV7igvwdl6OqLa/unrRbiMEAI5XlOKvt6+JavvnO1ftMkIA26DycV0Vfn3zoqj2bxbdsssIGWFcbanHj699LKr9rvI8u4wQwGaE5Xa24DtXz4hqf7SmyC4jZIRR1teBr14+JipL8LmmcruMEMCW2KluoAcvXD4s6p2/0laNV3Jt1yz23bJSilZ9P567sp+3EN+Icroa8HLucVC7GCy6jUP40tVdMIwKHh1Ld/ta8P2cQ6AQZyCMMLRmA56/sQODZoNg+5qBDryUsxesSCPExqDQW834yq330cdR4fZ+teh68J2cHaKNEMBmdBmtZnwz5110GrSC7buM/fhe/tuijZARhoW14vv576BF//By12j1mwbxPwX/FG2EjDBYSvGz4ndRP9Qu2F5n0eMnRX8XbYQAtoKhFBS/L3sPVYMPB9X/N+qRISJDd5qbcaW+QXK2yd9fuiL4tyUdHThb9XDMiVj98dpVmHiqqgJAbV8vDpeWSE6g+Jdb16Az83e0bYMD2FGcJ5nxRu4t9Bv4O9pegx5vFNyUdH4K4L3iXHTo+MtjD5lN+Ev+VcmMAxWFqNf28rYzWi14Le+SJAYLitP1ZSjt7eBtZ2VZ/D7vojQGpfispQa5XQ/vXrpflFL8Pv+8pIRbVkpxu7MRV9pqBdv+sfi8BIKNUdLXik9bygXb/q30gkQGi+qBLnzYVCLY9p8VF8C9qMTPaNH14mRTvmDbd6s/g4VyLyvxMXqMgzjScEuw7Z66yzCw4g2EEbGgGDDrcahBeNJxuPEqBsx6SQwja8aBhs8E237Qeh29pgHRBsKIKCjMrBX7Gz4RbPtJ+010GnskMGzf4f76s3b93RdVjwwRGdqVly8rNXHrwAAu1dbxMwrkMXr1enxc9XDg0/3aU1Qgi6Ezm3Gqooy3zd679nkQRsvMWnG4nL8zP1ReBKvMAnP7y/h/5/Hquw8FvtojhhDsKS/gbXO2vgL9JuHZLZcUhGB3eR5vmwst1WjX8xtd/AwGu8pzedvc7GhA3WCvrNTzuyrv8LYp6mlBSV+bZAYDgl1V/J6nmoEu3OqqkzwZICDYVc1vILfp+/FZe7mslO17am7weql6TUM411okOfU8C4oD9Td5/37IYsQHzTmyGMcab8LEcn9jRqsZJ5tvSK7rYqUszrTchs7C/Y1ZWStONl+x22AbEQsWFzvy0G/i/sYopTjV8pnkd5cFi1s9Regy8k9s/hv0yBCRqK6hIZytqJTVcSgIwa487gFDazDgeGmpLAZDCHbkczMMFjP2lxTKYhAA7xdwD0pmqxW7i/Nll6p/vzCXs6NlKcX7JbmyCluxlGJnSR5vtdz37ubIKi5npRR7y/Jh4DFm3i/LkV1c7nB1MQZG7bC5XzvLc2QXfjtdX4puA7erflelXAbFhZYqtAw9vDNlRHtr7sgqLseC4nZXA6q0D+8aGdH+2juyroOCorS/DUW93B6kw/V3ZBV+owAadT241c3tQTrRmCv7G+wyDuBKB7cH6WxLHkys8DIUnwYsBpxvK+I8/llHIQYtes7jYmRkzfi4jbtfvNlzFz0m4SUiPlkpi3Nt3B6kwr4KtBm6ZDEIgLOt4patv8h6ZIhIVEFbm+zS61ZKcbu5hfP43c5OwWUVIbHDW365BvDKnh4MCSyrCIkCKOvugpFjcG0c6EePQV7HQQXO06UfQsvgwJjH7FGXXoeWwbE7oCGzCZV93bKMHQAYMJtQ0z/2GjVLKfK7WmQPGEarBWW93IPrnY5G2e+vhbIo6m7lPH6rs0E2gwLI6+YewG911ssqLjeivO6Ht32O6HZXnezrYECQ2829np/TXSercitg81Ll9zRwHi/orZc8wx+RkjDI663nPF7YVy/LoAJs11HQx8Por5NlfAK251HUX8d5vLi/Fgoir8I4BUVxP7dheFdbI/s6WFCUaPk93v8NemSISJTWwD3btEc6k4lz0NEax4dhpZQzhkNrlL4EMFr9HOcar+v4jzN4PAx2MziWXgbNJtmGjhDDyrLQW6UvL4lhALbEWp83QzsODIYQaHkYcpbJ7mcM8ASTjs7uKUUE4GWICTQVIz6GlLiN0aKUYpDnfgya9aICpfnEgkJr5r4fNo+L/C9Rax7iPDZk1UN8VTNuDfAw/lv0yBCRKKlpxEeLrxKvSjF+j4fr99pb34CXwXGu8bpXfAx7coAIMjh+77gyuO7VuDLGPhdDxBdcFGZwP9vxul9c2ToB2wxdrijlv47xeCYUAvdK5ux7RPzX8d/BICAiGPLfX74kbeP3PD5/hpJISzb3RdIjQ0SifJz4y06LlaejI2cODh8n6XUn7pezSsWZSGu8rkPJMHDTjJ3kysdxfBgEtmq5Y8nL0WkcuiabvDlSpntoHGTFCtwvX4574qBQwkGgNLtY+XCkZSfDKbk/TwYAeNuZLIzzPDz1V3xF5gDhEwUVYLjKfreslOVNnubn4Gp3ld7RYimFl5r/OhQyGRT8DC+1i+zlBhDAU839XD3VrpD7GSoIAy8BhtwMWwwYeKu582d4qF3BylxWJCDw1kjPov1F0SNDRKKyg4Pg7SSvM1cQgpWJ3Elvkv38ECIzkZuCEKxI4M7AF+nhiTgvb1ndk4IQPBYdx5kMzN/ZBVn+gbJm4QpCMCc8ijOJlptag5khEbIMBYYQTAwIgS+HAahiFFgcHiePAYIkLz+EuXqMeZwQghWRibIYBECYiweSPLlrhayKTJZtVPk6OCPLhzv51MqIZNmeF1eVBtP8IziPrwhLkW0kOCiUmB0Qw3l8eWiqbCe9kjCYHxjPeXxpcKr8JQ1QLAhM4jy+KDAVVpkMK6VYFJjKeXxBQJrsmJ2RarFcmuefPi6Mef4ZnMdn+2bYvaV2tFiwmO3HzZjmzX1MrCgoZvpmyz7Pf1qPDBGJUikU2JaRIXt3w5YM7g+OIQRPZGTK3qWxLZ2bQQjBU+lZsronK6XYnpbB2+aptGxZAZhWSvFkKn9a/CeTs2QFFbKU4qmULH5GYqY8BiieSsrizUT7RLy86wCApxKzeRlbYuVdBwOC7fHZvMsvm6IzZS2zKwjB5uhMaHg8RGsiMmTNwBWEwZrwdN6U9Y+HpMJBRvl0BWHwWEgKr0dkYWAyXGUUKFMQBrP94xHo5MHZZoZfPHw10ivvMiCY4BXJm+o9yysKIU7SJzYEBPGuQbzZVRPcQhHrGiwrKDbE0QcZHlGcx8Oc/ZHmHi3LS+WtdsMkb27D0M/BCxO9ksHIGIZdlE6Y/gXOripWjwwRGdqYlir5NVUQghnhYbxVYwFgXXKS5BgLBSHICAhEoi9/jYiV8YlwVqkkXQtDCGK9vJEdyF9meklULDwdHCV1HgwhCHZ1w6zQCN52c0IjEegszcVNQODt4IhFEdwzYwCYHBCKKHcvSQYoAeCiUmNFFH+NiBTvAKR5B0jyWBAAaoUCa6NTeNtFu3tjmn+4ZK8IIQSbYvg7wEAnN8wPjpXMYCnF5hh+49NL44TlYSmSGVbKYnM0/4zSWaXB2vAMeYyoibxt1AolNoRPlDzwWSmLzZGTedsoCION4VMkM1hQbIzgL0tBCMHGsGmSzg/YZvgbwoX/fl3oDMk7gAiAtaEzBMtSrAqZJdlLRUCwMnimoJG8PGi2ZM8LA4KlgdOhYqQbyV8UPTJEZMjfxQU/mz/P7r9jCIGrRoNXFy4QbOvh4IjfLlwoieGgVOL3ixYJtnVSqfDHhUvtZowElf1p4VLBj1qtUOCvC5bZvbZLYLuWvy5YJjj4KxgGf5u/HAxjn7lDABAC/G3+44LBdoQQ/G32cqh4goz59JfZy+Aoog7Ma9Mfg0ahtHvQoAB+P+0xuKuFZ9e/nrIULiqNJKPq1UmL4esoHJ/xSvYieGqcJA3iL2fMR5iLcDG3/0ldAH8HV0mMryfNQry7cP2UryfORYiTpyTvy9MxU5HhFSLY7kuxsxDt6mc3gwBYFzYBU3yEC61tiZyGJPdgu+8VAcHiwDTM8+ee4Y9oVegkZHlF2f3uMiCY4ZuAJUHCBUEXBWRhmk+SJEamZwxWBgvX+Zruk4K5fpl2T54YMEhwC8OaEOE6Xxke8VgcMM3uvoQBgzDnIKwPXWznX34x9cgQkaktGen47swZotsrCIGbRoMd69ciTMAbMqJViUl4Zc5c24ApkuGkUuH9NWsR4+0tirE4Oha/nb8IDBH32SkIgYNSgXcfX40UP3GFsGaFReCvC5ZBQYioF48hNkPnzSUrMSFQXCGsiQEheGPBSigZRtQAy4BAQRj87/zHMT04XBQj1ScA7y5YazMURDAICBgQvDbzMSwMixXFiPPwxY75G+CoVIkaNEbejVcnLcTKSOHBAgDCXT2xa/4muKo0dg1M/5M5F5tjM0S1DXRyw845m+GhdrSL8bXk6XgmfpKott4Oznh/1jZbMKYdjKdiJ+NrieKKQrqrHfHOjO0IdnK3i7E+PAvfTRE3kXBWavDPKU8gwtnbLuNwSVAqfpi6TFThSQeFCn+b+ARiXQPsGsRn+cXj52lrRDFUjBK/z9yOZI9Q0YM4AUG2VxR+mb5ZlCGmIAx+lrIVmZ4xdjGS3MPwq7QnoRSxu4chDL6bsAVTeJZXHvobEES5BOGXqc9BoxAuCkkIwVdiNtgV58GAIMTJD79IeRGOCvuqYH9R9blV362rq8Orr76K8+fPo62tDUFBQdi2bRt+9KMfQa0WV7Xzv6H67og+KCvH7y9dRrNWO2ZZeAUhYCnFjIhw/GLBfNFGyP36uLoKv750CfV9fZwMK6WYGhqKX8ybj2gvcdU479dn9XX45eULqOrt4WVkBwbhF7PnI8mXOyCSSzeaG/GzK+dR1t3Jy0j3C8AvZs5Hur99pcEBIK+jBT+98gmKutp5GUnevvjZtAWYFCg8Yx2tku52/OT6x8jpaLlXwv5Bhu2/xXp445XJ8zEzOMJuRlVfF3588yPcaG/kvY4IV0/8eMI8LAjlX1oaS/UDvfjJrXO43FrLwbBdR4izO36QNRfLwu0vP94ypMUrOWdxoaUKZPhbGOs6Ahxd8d20OVgdyR0QyaUuwyB+lvchPmkuBwg4GT4OzvhG0hxsiuKPBxpLfSY9fllwBmeb745ZvXakTLyn2glfjp+J7dGTRQ3e92vAbMDvS87gdJMt4/HoJYgRhqvKAc9Ez8DTMTPA2OlF0VtM+FPphzjZnAvLcNG/+ykEBBQUzkoNtkZMw3Oxc+321BitZvyj4iyONd2CmbU8tMgxUinZQaHG+rCpeCFmoSgD4X5ZWCverjmHo41XYWBND1VfHrnzKkaJFcFT8OWYZVDzbKkdS1bKYk/dRzjSdBE6q/HevXnwWgiURIHFAZPwfMwKuw0ElrI40vQJjjR9giGL/t4zHs1gCIM5vhPwfPRaOCnHZ+fb5yV7xu/PzRA5e/YsDhw4gM2bNyMmJgbFxcV47rnnsH37drz22muizvHfZIgAto7vSl09duXl41ZTE4ZMJigZBh6OjliZmIAtGemCMSFCopTiemMjdhXk41pDA4bMZigIgbuDA5bHx2NbWjqiJBggoxl3WpuxszAflxrqMGgygSEE7hoNlsbEYVtqBuK9fWQz8jvasKsoD+fra+6lI3fTOGBJVCy2JqcjxVecp4VPxV3t2FmSh4/qKu8xXNUazA+LxhPJmUj3s9/IGa2ynk7sKsvDmbpyaI3G4UFCjdkhUXgiMRMT/ILtHoxGq6q/G7vL8/BBXSn6jAawoHBWqjE9MBxPJmRjin+obEbdQA/2VuTjeG0x+kwGWFgrXFQaTPILxRPx2ZgRGCl7F0zTYB/2V+fjWF0Ruo06WFgWTkoVsn1C8ETcBMwKiOLcfSVWbXotDtTk4mhdAbqNQzCzVjgp1Uj1DML2mImYGxgnO8dJp2EQR+pzcbguD52GQZhZCxyVaiS6B2Br1CQsCEqQnVOjxziE4425ONqQg3a9FibWAgeFGrFuftgcMRkLA5OhlrnVW2vW42RTLo423Earvm+YoUKEsy82hE/G4qA0WYG6ADBkMeDDljwcabiBVn0vjKwZGoUKoU4+WBs6BYsC0+GklDez11mM+KQ9D8ebrqFR1wUTa4aaUSHQ0QurgqdicWA2nJXSg4EBwGg14UJHHk42X0GjrgNG1gQ1o4KfxhPLgqZiUcAkuKrkpSows2Zc7crHBy2XUD/UAsMww1vtgcUB07AgYArcVfK3rP879IUwRMbSH/7wB7zxxhuoqakR1f6/zRAZLUqpqMGhpKsDRypK0Dqohd5igZtGg0RvX6yPT+HN02APo7ynE4cqi9E8qIXeYoarWo1YDx9sjE+DvxP/iz3yighxqvu7caiyCI2DfRgym+GiUiPa3RsbYtMQ7ML//MQyGgb6cLCqELUDPRgym+CsUiPS1QsbY9IQyrEl1l5G81A/DtYUoFbbjUGzCU5KFcJcPLEuKg1RbvxLXWIZbXotDtcWoErbiUGzEY5KNYKd3LE2Ih2x7vzBxSMcIUaXYRBH6/NR3t+BAbMBjkoVAhzdsDosHQkeAePC6DUO4URjPu72t2LQbICDQgVfB1c8HpKOFE/h5TQxDK1Zj1NN+Sjua8KA2QCNQgkfjSuWBqUh3VPYABPDGLQYcLYlH4W9DRiw6KFiFPBSu2BRYBoyPSPHhaG3mvBxWx4KemsxYNFBQRTwVLtgrl8asr2iBb0aYhhGqwkXOwqQ11eJAfMQGKKAh8oZM3xTMdErYVwYtsGyALm9ZRgw62wxb0pnTPZOwSTvZMHU6GIYFtaKmz0FyO0tuZc51FXljCzPZEz2Shf0nIhhWKkVeb1FyOktwIBlEJRSuCidkeaRhEleWYJBoGIYLGVR1F+E3N5cDFgGwFIWzkpnJLolYpLXJKgZ/tUBsf37F1FfWEPkxz/+Mc6ePYs7d8auqGk0GmG8L1W3VqtFaGjof60hwidKKU5Vl+Odwjso6GyDctj9TYF7s04GBMui4/F8+kQk+9i/BEIpxbn6SrxVdBt3OpqhIAzYYVfv/TPbJeGxeCFtMjJ8pXkHLjRV41/Ft3C9rWF4CQoPMCilWBAag+dTJmOiv/1LIABwpbUO/yq5icuttWAIAaW2KH4GBGTYDT8rKBIvJE3GtMAISYxbHQ34V+kNXGipeoBhc4nathBP8w/HlxKmYE6QcGDgWMrrbsI75TfwcfP/FQ77PwaBlbKY4BOKZ+KmYGEwd94JPpX0tuKdyms411x6zzCyMWzr3lbKIs0zGE/FTsHS4CRJHV2lth3vVV3Fh81FsFLbuUczEt0DsS1qCpaHpNm9dAAAdYOd2FFzFR8058PCWkGG3dX3M2Jc/LAlcipWhmZJCiRt1vVgd+1lfNCcCxNrHpMR7uyDjeHTsCpkot1LBwDQbujD/vpL+KDlFvRW0wNu95FlryBHL6wLnYFVIVPsXjoAgG6jFocbL+J0yw0MWQ1gCHMvWdYIw1fjgdUhM7AyeAYcRMQvjFa/aQDHmy/iTOtVDFp0DyxHjvy7p9oNy4NmYkXQbDhJ8EAMWnT4oOUCPmy9BK1lEAowsA7vKhn5dzelC5YGzsLyoLlwUdrvgdBbDTjb9ik+aruIPnM/GDD3dq6M/LuL0hkL/GZhaeACuKns3/JstBpxvuM8Pun4BD2mnjEZjgpHzPadjcX+i+Gh9rCb8UXXF9IQqaqqQnZ2Nl577TU899xzY7b52c9+hp///OcP/ff/1wwRs9WKH13+GAfLi8GMsV5+vxSEgIDgT/Mew4oY7uRno2VlWbx68wLeu5sjikEB/HbGYmyMSxPNoJTiD7mX8I+iG2PGFoxmsJTilf+PvfMOk6M69vbbPTluTtpVzllCCSUkQOQMJgdjbGycMdgYcMC+DhhwxnwYB2wTDJiMySJJAiGUc87Srjbv7OTUfb4/ZlcW0nZPz/QCQszvPvtco1PTb/eEPtV16lRNm8e1I40nZgkhuG/9+/x69UJDDEUIbpk4h6+OPj6nCfbBzUv5xao3upyB7IxvjJ7Jd8aekBPjiZ0r+fGKV5C6HA4tdU9U1w6dym3jT8lpOeS/+9Zx6/LnAQwxLh4wkTsmnJXTUsWbBzbxveVPoiJ0Gd1r6WfUjuXnE87PaRlhccs2blr+GCmhZGFkcgLmVo3gzokX48phgl3ZvoubVjxEQk1lZQBMKRvCXROvxJPDMsLGzr3cvOrvRJWEIcbYogH8asK1+HMI8e8I1/P91X+hMxXJuhVUQmKIt5Y7x19Pid34BLsv2sQP195HezJoiNHXXcXPxn6NckexYUZTvJWfbLiX5nhb1m2zMhKVzjJ+MvqbVDmNLxO3Jzu4c9PvqY81Zt3+KyNTYi/itpE3Uusy/pAWTAX57dbfsje61xDDa/Vy8/Cb6efuZ5jxadBH6ojceuut3HXXXbo2mzZtYsSI/02a9fX1zJkzh7lz5/K3v/1N83WfhYiIEIKb336FZ7dtNLxDvfsmdf8p53L6oGGGGD9+/w0e2qTd5lpLvz3hTC4aql+Dolt3rVjA/euW5Mz42fGncPUIY4mC961bzD2rF+bM+P7EuXx1TPYtegD/2rqMn66YnzPj66NncPO4uYZsn9q1mtuWv5jT8SXg6iFT+NFEY1v0Xt2/kRuXPpUz44L+E/jFcecYcqoWNm3lmx/8myNTKPUYEqfUjOSeyRcbiowsa9vFDR/882D0zohkJKZXDOEPk680FLVYH9jHV5b+BUVVDdeKkJEYV9KfP025zlDUYnvoADcs+xNJNZ0TY6ivlvsm32AoarEv2szXl/+emJI0XI9ClmT6uiq4d9K3DeVNNMfb+faqewinY4ZLkluQqXCW8LuJ38Vvy17qvz3ZyffW3E3AgKPTLRmZYrufX4+/hRJ79jLnoVSYH67/Ja2J9pwYHqubX4z9ARWO7DsQY0qMn2/8OY3xxpwYDouDH4/6MdXO7Eumnxbl4ojkHMu8+eab2bRpk+7foEH/q1jX0NDAiSeeyIwZM/jLX/6ie2yHw4Hf7//Q37GmRzet4ZkcnBD4Xxb4N998kX3BQFb753ZszMsJAfjeolfY2tGa1W7+3m15OSEAP14yn7Wt2u3ju7X4wO68nBCAu1a9w5Im7Zbo3VrdWs//5eGEANy3YTFv1Wdvwb2ls5kfrHgp5+ML4KHty3hx74astnvD7Xx32TM51yMQwDN7VvPk7uzfl+Z4kJuWPZGTE5JhCF4/sJGHd2b/vnQmo9y47BFEDk4IZJaFFrds42/bF2S1jaWTfGfFv3JyQroZazr2cP/W17PaJtU0313995yckG7GtlA9f9jyQlZbRajcvvavxHNwQiCTt7Av2sxvt/wnq60Qgp9u+AvhlHEnBEBBpTnewW82P2TI/teb/5aTEwKZEuqBZJB7Nms/3B6q/7fjwZyckG5GJB3l11vuM9Tx95+7/pmTE9LNSCgJfrf1d6Z7z3xalbMjUlFRwYgRI3T/urfn1tfXM3fuXCZNmsQ//vEP5F7sLPpplCoED6xellchLNH1+kc2rtG3E4I/r11qokCMxL82rsxq9cD6D/LeQSFLMg9u7DlP6FD9dePSvKtZWiSZv25cmtXuwS3L8spfgEwuz183Z59cH96+LO9y1DISf9vyfla7x3Ytz9lB6JYE/H3r4qw32qf3rCCl5t+t5F/bF2ftEfL8/lVElVReFS0F8O/dS0gqaV27Vw+spjMVzZMheHrvB0TTCV27Bc3raE0E82KoCF45sILOpH5796Vtm2iItR3MociVsaB5DS3xgK7d+s7t7I405FX9U0Vleccm6qPNunY7wnvZFNqZN2NTaCc7wvoPHQ2xRlYH1ufN2Bvdz+bQNl279mQ7yzqW5c1oTjSzrnNdzq89FvSReQbdTki/fv349a9/TUtLC42NjTQ2Nn5UyKNei+v3si/UmfeNXBGCxzatJZ7WvtGubjnA5o6WvNs1KULlqW3rD2517UlbO1pY3lyfd+8YRai8uGszbfGops2+cIB3Gnbm3Q9FESpv7d9OfaRT06YlFuaVfZvybqClCsEHzXvZ3qkdQQql4jy7e23+DAQbAo2sa2/QtIkrKf6za2Xe75UA9kTaWdq6R9MmpSo8vmuZqcZsLYkQi5q0b+aqUHls95K8S3dDZofNG43aESQhBI/vWWyqf1NcTfFqw2pdm6f2vWeqT4kiVF46oO+sP7f/3bydaMg4oC8d0HekX2xYZKoXiozMywfe1bV55cBC04xXDyzStXmjaYFpxuuN7+jaLGjJHo3Lxnij6Q1Tx/i06iNzRObPn8/27dt58803qauro6am5uDfZ1WPbVpruuNpMJng9d3aywGPbzHPSChpXty5WXP8iW3mGaoQPLtjveb4kzvWma5ZIUkST27XfsJ4bvd6062+LZLEf3ZqR6le3LuBVFfBqPwZMv/ZtVpzfH7DZiLpZC8wtCNh7zVvpz3LE3o2yZLEk7uXaY6vaNvNgVjAHAOJJ/doMzYF69kVbjbV5FECnt73geb43kgzGzr3mnLaBILn9mtHwlriAZZ3bDEVylcRvFC/WHM8lIqwuHWNqS60KiqvNb6PInr+DSSUJAtb8osiHMpY0LKUhNLzb0ARCm83v2easbR9JeF0z78BIQRvN79tyolWUVkfXE97sj3vY3xa9ZE5Itdee21mnbeHv8+qdgTaTXdVtUgye3XyRHZ2mmdYZZk9IW3G7mCHaYYsS+wNaUcr9gQ7TLdel4C94YDm+O5wh2lnRxWCveEOzfE94Q6sJp5aIfN0vDusfXPaG27vJUabNiPSZuoJHzLv1e6INmNf1PwNWEWwN6rNqO8FhshynPqYNj8XNcY6NO+XB+K9w+hMhTUn8OZEuylnqlsxJUEo1XP0sz0ZICX0l9KMKCXStCd7vp+EUhHiatw0Q0WlNdHz555Uk4TSIdMMgNZE9hy9Y02f7aSNj1mRlLmnVgBZgrDOcUIp/bVrIxLoM/TGDDOE0D1OJJ3Me+mnW6oQuu95NJU0faMV6L/n0XSyF27lmSUePUZvlDwKZ7kOs05b93E0x7pqbJhVLAujNxRXUh85Q0WQVHuepGOK+d95t6Iax4r10nVkjtXz9zfei9cR12AkesEJyc7oveuIKbFeO9anRQVH5GOU12CPHT0JAT6b9nH8dvNNkCT0Gb7eYEgSXh2GN8dGbD1JliQ8OgyPzW564su8V9rvh0dnLBf5bNrbLN1We684O16dc3Vb7aYdQ0C3BofbYu+VJ3C3Vfszd+dRyKsn6W2t7S2GjKS5TdhlMVeu/FB5NI7Vmw3V3BoMZy9eh0uj90qvMj6O67Ac3T1kPgoVHJGPUcNKykxPrmmhMrBYuzX60OJy04yUqjKwSLtfzeCiUtMMRVUZ6Ne+jkH+UtNTkug6jibDV2Z6cpUliYE+7foCg3ylpE1uybNIEkN82kWbBvrKe4EhM8SnXVp+gLfctJNgkWQG6VzHAK+5/kWQmbwHerWvo7/HPENCop/Ocfq6s5foz86AWneZZm2XWld53juxDlWp3Y9do5dMpaM0r2q1h8tjceHVqCVSai/CnqWUuhHZZRsltp7LPfisXty9MLlbJJlyjVoidtlOkS17LZNskpCodOReRfvTroIj8jHq8pHjTedWFDuczOuv3WH1suHjTDPcVhtnD9QuL37JUPMMqyxzweDRmuMXDx5Lb3giFw/W7uJ6/sAxppcbFCG4dPB4zfEz6kaZbhqmCMElgyZqjs+rGY5fJ2JijKFyqU432pmVQ6hw5F7q+nDGJQOmaI5PLOlPnbvU1PSqIri4/1TN8WH+Pgzz1ZiKhAkEn+s3TXO8zl3OhOKBpqNtF9TN0Bwrc/iZVjbS1E4QCYnzamdqjvtsbmaXT8RicrfJ6TUzNB0ah8XOiZXTTO9oOanyeBwakShZkjmpcrZpxvGlU/DolJQ/qfIkU86hjMz4ovHHZLn3bCo4Ih+jptXUMbCoJO+vqixJXDlqPHaLduXIseXVjCmryvsmaJEkLhk2FrfOksbgojKOr+6X9yRukWTOHzSaYof2U0qNx8/JdYNN1BGROLXfMKrc2pNnqcPN2f1H5f3UZ5EkZlUNYIBPO+ritTm4aMC4vK9DRmJ8aR9GFmt3IrZbrFw68Li8Pw8JGOwrZ2JpX00biyRz2cAppibXGlcRMyq0e/RIksQVA4xVw9VSsd3NiVUjdW0u7T/DVHTHbbFzao1+K4QL+840xbBKFs6o0W+FcF7tTFM7QSTgzBpthwrgrD6z86pT0i0VlTNqtJ0dgNOrTzC9o+X06tm6NvOqzDNOrZ6ra3NC+QmmHBEVlZOqTsr79Z9mFRyRj1GSJPHVCVPzLjplky1cOUr76btbXxs/La+bYKbRl8Q1I7Wfvrv1lTFT817WEAhD/WauHzUtb4YqBNeP1H767tZ1w6fmvZNLEYLrR2afOK8eMgUpz1uUiuD64dpPxt26bNBkrJIl72J5Xxo2M2uJ9wv7TcJhsebtjFw7ZGbWuhdn103AZ3Pmzbh64AxsWUq8n1IzjlK7Ny+GBFzSf3rW8usnVIym2lmSl5MrIXFu7TR8Nv3lhMmlw+nnrsrrSV9GYl71JEod+tWrR/oHMszXL2/G9LJx1Lj0l8MGeGoZWzQsT4bMuKLh9Pfod3quclYyuWRC3ozBngEM9Q7StSu2F3N82fF5/dJlZGqcNYz2a0eJj2UVHJGPWRcPH8NVBpyJQyWRcWL+fOq59PFmL3t/1sARfGWsdnhaiyGAP8w9m8HF2XsqnFg3mJsmzsqJ0a1fzTid0WXaT/jdmlrVlx9OPjkvxh1T5jGpMnun3zGl1fxi6hl5MW4eN4fZNfo3J4DB/nLumXpeXg7oV0bM4LS67M0Oa93F/H7aRQA53wavGDSZ8/tlb3ZY7vRy79QrkKTc3CoJOLduPJcPyP6d9Ntc3DvlaiySnJOjICFxUtVIvjBY/8kYwGmx8cfJ12KTc3OqZCSmlg3hy0PmZbW1yhZ+M/GLOGRbzowxRf35+rCzs9tKMneOux6vzZWTwyMjM9jbh28NuyirrSRJ/HDU9RTbvTlN4jIyfVyVfGf4lYbsvzv8i5Q7SnJmlDtKuHn4dYbsvzr4WqqdlTkz/DYfNw3/mqFeTNf0v4Z+7twcNxkZt9XNTcNuMlWg7tOsz+ZVf4KSJImfzjyZa8dk1uOzhewtkoRNtvDAqedxYr/sk163bp0yh6+PP94wwyLJ3Dv3HM4aaLzD7zfHzeB7x51gmCFLEnfPPINLhhrv8PvFkVP48eSTkSDrzdYiZW75P5kyj2tHTDbMuHTwBH455QxkJEPXAXDL+BP52qjskYpund1vNL+Zdj4WSTbM+PrIWdw85kTDjJNqhnPv8ZdglS0G3qsM4/NDpvGD8acb7iI8rWIQ9027EocBRvcEfGG/4/jphPMMM8aX9OPP067FabEZZpzWZwy/mniJ4Rv5MH8f/jz1S3htzqyfR7fTNbtyBPccd5WhpnoA/T2V/L/JX6PY7snqjHQzJpUO4TcTjTXVA6h2lfKHid+g1O43wMj8jSrqzz0Tvmp4V0yZo4h7xn+HKmepIadKAgZ5a7lr/LfwaOxkOVx+m5dfjr2JWlelISdXQqLWXcUvx96E3+Y1xHBb3fx49Hfp76kzzKhwlPPT0d+n1GDehsPi4HvDv8dgr/YS5OGMYnsxt4+4nXKH+UTqT6ty7r77cSqX7n2fRr2xewcPrlvB4oa9B2+GqgCLLKGoKnaLlYuGjea6sccxpCR7lKInLdy/i79vWM6C/bu6cggkVCG62tmrWGWZ8weP4otjJjOyNL9s7fcP7OXBjct4Y9/2rqflruuQMixZkjh74Ai+OGoKY8vz6y65onk/f9u0jNf2bQUyE5DSdR3dy1Bn9BvOdSOnMKlCP0yrpbVtB3hwy1Je3rvp4HkrQiB33bNUIZhXO4zrRkxhWmX/vBibAk38c+sH/HffBtKq2vU5fJhxQvVgrh02jVlVxh3PQ7U92MK/tn/A83vXkFIVLJKMKkSXIyBQhGBG5UCuGTyNuTXZuzn3pD3hNh7ZuYTn9q0ioaQOY2QSUyeV9eeqgcdzcs1Iw07IoaqPdvDv3e/z7N4VRJUk1h4Y44r7csXA4zmtZmxejOZ4J0/seZ9n9y0lnI5jleTM5wHQ9RsZ4a/l0v7TOb3PhLyWWtoTIZ7a9x7P7V9CMB3FIskHlwOlLsZgbzWf6zuTM2omG3Z0DlVnMszz9e/xfP17BFLhHhl93ZVcUDebM2qmGXZ0DlU4HeXlhnf5b8Mi2pOdPTJqnOWcU3sCp1fP0Ewe1VMsHee1pnd5qeEdWpMdB79XQNfvUaXcXsJZfeZyWvWsvLYxJ9UkbzYt4rXGt2hKtPR4HSW2Ik6tPolTquboJqhqKaWmWNS6iPlN82mMNx6MkAgEsiSjCAWf1cfJlSdzctXJeK3GnKlPk3KZvwuOyFGgXZ0dPLt1Aw3hELF0Gr/dwciyCs4fOgq/Q/+pJZ5OYZHlrOvie0MBntm2gf3hILF0Eq/NwfCSci4cqp80CpmS7xKSbpIsQEMkyNPb17MvHCCSSuKzORhUVMpFQ8ZS5tT/MSeVNEgS9izX0RwN89TOdewJdRBOJfHa7AzwlXLR4DFUuvR/zN3N0OwW/ZtwazzCs7vWsSPYRjiVwGNz0M9bzIUDx1Lj1v8eptQ0qhA4suyU6UhEeW7POrYHWwinkrisNmrdRVwwYBx1nmLd16ZVBUWoWRnBZJwX9q1lS2cz4VQcp8VGjdvPuf3GMcCr79h2M+yyVXeCj6QTvLR/LZs6DxBKxXFYbFQ5fZxdN55BOtuBARRVISUUHLJNlxFLJ3m1YR0bOusJpmLYZSsVTh9n9BnHML++Y6sIhZSanZFQUrzZuJ41gT2EUzGsspUyh5dTqscxskjfsVWESkpN4ZDtuoyUmmZB83pWd+wklI5hlSwU2z2cWDWO0f5+uq9VhUrSACOtKixuXc+qwHZCqYzTU2TzMKtiHGOLBmZlpNQU9iwMRagsb9/Ayo7NhFJRJEnCb/VwfNlYxhUPNcBIYpcdWe1WBzaxomMD4VSmrLrX5mFSyWgmFI/UjXwJIUiqCWyyPavdhuBmVnSsIZSOIITAa3Uzrng0E4vHZn1tSiSwStkZ28LbWNGxglA6hCpUPFYPI30jmVA8AWseDuGnRQVH5BiWoqq83bCdh7Yu54PmfQf7mHisds7qN5Krhk1iTGl+UYduqUKwqHEHD29fzuKmXSS7GC6LjdPqRnDVkMmML+2T1xNot4QQLGnZxSM7lrGwacfBCpJOi5WTa4ZzxaApTCrra5qxom0Pj+1axjuNW4h3MRyyldlVQ7h84FSmlevfnI0w1gX28/juD3izcePBipt22cLx5UO4bMA0plcMNr32u6mznif3LOGNxnUHK17aJAuTygZxSf/pzKgYZrrmw/bQAZ7Z9z5vNK4m0lXx0ipZGFvcn8/1ncmsipF5Pa0fqj2RJp6vX8z8xhWE05kKkhZJZqS/HxfUzWJ2xVhsJm/O9bFmXm54j/lNywh19QaxSDKDvXWc2+cEZldMMF27oinexmuN7/JG0/t0psJAZq1/gKcPZ9XMYXbFpLwiAoeqLdHBm02LeLP5XQKp4EFGraua02rmMrt8qulCWoFkgAUti3i7eQEdqUyrAgmJamcVJ1eexMzy6bjziAgcqlAqyHttC1jU8hbtyTYEAgmJckcFcyrmMb1sNh6TEYFIOsyy9gW81/o6bcnmg4wSWznTy+dxfNmJeK3m5pC4EmFFxwKWtL1Ka+LAwb4yflsp00pPYUrpPPw27dpInzUVHJFjVC/s3sAvV71Jcyx8MKR/qCySjCJUxpZW86tpZzGyJHtC6OGaX7+Fn616jYZoUJcxoqiSX0w+i/FluS+DLGzczs/WvMLeSMfB4/XEGOwr5/8mnsXk8tyXQZa27uL/1rzErnCrBiNzbf08pfxo3FnMqDS2pnuo1nbs4//WPs+2UJPuddS4irl19JnMrdbfVtqTtgQb+Pm6Z9gcbNC9jgqHnxtHnpl1W2lP2h1u4s6NT7G+c2+PDJnM8lep3ctXh57JmX2y73g6XPXRVu7Z/ARrAjuRJfmIZm3dDL/NzXUDz+A8nRoaWmqJd/D7rY+xMrAFGfmI7ZoSEgKBx+Liiv6ncUHt3Jyd0I5kkPu2/5tl7esPnnNPDJfFwYV1p/C5ulNzdkJDqTB/3fkoS9tXZ5Y5NRgO2c6ZNSdzSd9zcmZE01Ee2vMoH7QtRXT9X0+ySTZOrjqRi+suyvnpPaHEeWLfwyxpew+B2iNDQkKWLMwqn8vn6i7HJufmvKXUJM/XP8KS9rdQhaLJkJCZUnoCF9R+HkeOzpsi0rx64FHeb3uV9MG+OEd+JiAxvngm59dej9Niznk7FlRwRI5B3b9hMfeseceQrSxJOGQrf51zMTOqBxhmPLRtGT9b9RqQvZaYLElYJZn7ZnyOE/sMNcx4evdqfrjyvzq3vkMYZBJcfz3lQs6oG2WY8Ur9er6/4hmEEFm3MWduHxI/m3ge5/ebYJjxTtNmvrv8cRShGmII4PYxZ3PpAP26DYdqaet2blrxcGa5x+Cem28NP52rB51gmLE2sJubVz5IXE0Z7uT6hUEn86XBpxpmbAnu43urHyCqJI5wcrT0ub4n8LUh5xp2FPZEDnDr2j8RTEUN14s4vXo63xxqPMH1QKyFH6z7Ax3JoGHGCRWTuXHY1VgkY5Gk1kQ7P93wW1oT7YYZk0vG8Z1hXzbsKASSndy1+R4a402GGBISo/wjuXHYN7EbdBQi6TC/3/or9sf2GupIKyExyDOEbw79Hk6DVVDjSpQHdvyKPdFthhl9XP356uAfGo7AJNUED+3+FTvC6w0yZCocfbh+0B34PuPRkVzm78KumU+BHt++yrATApmllbiS5ksL/sOmjiZDr/nv3vX836rXEBgraKoKQUpV+Nrip1jVtt8Q482GLfxg5Qsaz0Y9MBCkhcrNy57h/eZdhhjvN+/glhVPG3IQIHOtKoIfrnqOdxq3GGKsbt/LzcsfIy0UwwyAX65/kVcb1hlibOls4KYVD5HMwQkB+OOWV3lh33JDtrvDTRknREnm1E7+Hzvf5D973zVkeyDWxvdWP0AkHTfshAA8tW8hj+5505BtayLAbWvvy8kJAXi18X3+uftFQ7adqRA/Wv/HnJwQgIUty/nrjqcM1aqJpKP8fOMfcnJCAFZ0rOPPOx4yxEgoCX6z9XeGnRDIJFhuDG7i/u0PGPqepNQk923/DfWxfQZ/6RnGzsgOHtjxRxQD3XgVkebBXb9lT3R7TowDsb38beddpNTsDf1UofD43t8bdkIyDJXWRAMP7voFyV5stnesq+CIHOVqi0e4Y/lrOb9OIEiqCt9f8lJW21Aqwe3LXsq5/oQgk7NyywcvZL0JJpQ0t654PkdCF0cIvr/8uawTWVpVuHXls+Qb47t95XMHE1r1zuUHq59GFUZvTf+TBPxkzXNE0/qdOoUQ/HTd06TUnkPN2fSrDc8TSPbcdv1Q3b3pmYwTkgfj3i0v0hLvue36ofrj1meJphN5Mf6+8xX2R1uy2v1t5/N0piJ5Vc58ct+bbA9nd6Qf3fMibYnOvBivNC5iY3BHVrtn979CU7wlZ4ZAsKh1KasD6w2cy2vsi+7Pi7EysJpl7dmd3AUtb7IrsiMPhsqm0Hreb8vu5C5rX8i28HpEjgwVlT3R7bzXOj+r7brOJWwMLsv5N6ii0hjfw8KW/+b0us+yCo7IUa4nd64lreZfXXR9RyPr2g/o2j2/Zx1xJZVXwS0Vwa5wO0tb9uravVK/kWAqnjejKR5iYeN2XbsFTVtpTYTzmrwF0JmKMf/AJl27pW072R9tz2tiFWRaq79cv1bXbmPnfraFDuRdIjwtVF6sX6FrsyvcxJrAblNlyF+oX6o73hhrZ0nbprxLhMvIvFD/vq5NIBliUcvqvMt3W5B5qUF/4oukY7zdvDR/hiTz8oGFujZJJckbzYvyZsjIvNr4jq5NWk3zZtNbef0+ILO0Mb9JP0qlCpW3m+ebYrzd/Lrug40QggUtr+RdTj3juL2aNbqzuPVlpDynSIFgSdurKELJ6/WfNRUckaNYiqry0Nblef+oIXMTfHTrSs1xIQQPbVuW9/G7GY9s139SemT7UlN9SiySxCM79M/z0Z1LTXUFlpF4dOcHujaP7frA1O4UCXh01/u6N9on9ywxxRAIntj9vu6N9tn95hgqgmf2vU9a1b7R/rdhianPXEXlpYYlxBXtMPprjUtM/T4UVN5sWkY4rR1Beqd5KUk1lT9DqCxuXU1HMqhp837bCmJK/qF8FZXVgQ00x1s1bVYH1hBMh/JmCATbwtvZH63XtNkc2kBbMnsUS49RH9vH7qh2BGlPdDuNcePLPj2pPdnCtvAGzfHG+F72RLfkHHE5VOF0J5uCxpZJP+sqOCJHsdZ3NNIYzf/GAZmb4It7N2qO7wy1sTPUZuInnWG8Xr+ZtNrzj7YpFmRdoMHU07ciBO827yCc6nlZI5iK8UHrLlNdgVUEazr20xLv+T1PqWkWNG3OKdfhcAlgZ7iFvZH2nseFYH7jOlMMgMZ4gK1B7UjY/MbVphmBVIR1gT2a4282rTT1mQNElQSrOrQjYe80rzA1IQGkRJrl7dqRsIUt+tElI1JRWdquHQlb3LbCVMM0yEQTPmhfpTm+tH25qQ60kIm86C3PrOxY2gsMCyvataNtawJLkDG3jVzGwprAEs3xdYH3TV+HhMy6Tv2IXkEZFRyRo1ht8ezr/EYUTadIaOQ+tCUivcJQhCCY6vmJrrcYAB0auQ8did55rwDaNc63MxkzPbEeZCTDPf57XEkdrKlintHzdahCJZSK9QqjI9XzdQB0avBzZiS1nfFAypyjDpkJXO843TU8zMgiyQfrjfTISHaadqhkSSak93mk8stxOVSSJBHSiaqEUrkl8/YsQSit/Z6H0ubfKxWFsA4jogTJvWvThyVQCXXVZilIXwVH5CiW2SfWQ6UVrVA0/j0/Rs9hei12foyej5XuzfdK41gfy+fRq4yeP4/Mzqjecaj0lmbMT0hdDJ33pDfeLwn972jvrPNLKDrvVW/lEqR1jvNpYQiE7nEyS47mv79pHYdf7aX3ysgOoIIKjshRLb/NWFOqbLJIMm5rz5UkfXZzlRkPVZG95/3//l5l9Hwsv633GFrHytaWPSeGxnW4rXbTIfqDDI3ztUhy1jb2RqX3nrhNVv08yNBpnOY12FRNTyoiC8NjniFUvDoVSn0GG7fpSQih2xelt/qZ6DE81ty69PYkCRm3znvusrjzTiI1ynBazH/mAB6T1Vw/Kyo4IkexRpdW48zSFyWbZEliSkWdZmGoof4KfCYdHhmJ0SXVODTOtc5dQrnD3E1QAgZ4Symx93wTLHd46esuMT2FVzp99HEX9zjmttoZ7q82lYAJUGRzMdDTcx8WWZIZX9LfNMNlsTPMX6M5flzJICwmf/5WycIof19tRulQ02XnJSTGFA/UHJ9YMtz0dQCMKdaurDu+eLjpyVUgGF00RHN8tH+46c9cRWWkX7u44Aj/cNNOriIURviGa44P9Y0wHQlTURjm1e4CPsQ7ChVzEQuByhCvdpHEQZ7RphkgMdBjvBDjZ1kFR+Qoltfm4KJB40ztBFGF4PPDp2iOOyxWLh90nDkGgmuHTtUct8oyVw6ebPpGe83gaZoOlSRJXDnIeNXSniQjccXAqbqT5+UDjjeVJyIjcXH/KbqN9y7tP90UwyLJnFs3CbdV28G8qO+MvLfVdjNOrZ5AkV37yfH82pmmlk4sksys8jFUOIo0bc6qmWXqOmRkJhQPo9al3Xn69OpZpiZXCYmh3v4M8mo7bSdVzcz7+N3q46xmpE/b2ZldPtNwhVctldpLGVs0RnN8SsnxOGRzDzZeq48JxZM1x8cWTcVtMfdgY5edHFei/Z4P842nyJZfx/NuWSQLk0pONHWMz4oKjshRrquGHmdqJ0i508PJtfol2C8ffNzBVtv5yGdzcGZffc//4gHHmWouZ5etnNdPv4/Kef3GZ+1CrCdZkriw/0Rdm9Nrx+I2sawhEFzUT9sxBJhbNYpiW/69KhShclE/fadsatlQqpzFphgX9p2uazOmaAD93VV5u5+KUDm/Tn+CHuStZYSvf95P+ioq5/bRL4lf46pgQvGIvKMiAsHZfebq2pTai5lSOsFU5OXMmpN0f2Meq4fpZcfnzZCQOKXqZN2S+A6Lk5nlc/NmyMicUHGybrl6q2xlZvkpeS/PyMgcX3Yidh2HSZYszCg7I+/vlYzMxOITcPfSctixroIjcpRreHElFwwYk3c04dYJJ2GV9T/mvt4Srhw8Ke8J4+axJ2ouy3Srwunli0P1Jy49fWPkCXizLCH5bS6+OnxO3ozrhs6kLMsSksti55sjTsnr+BJw2YDjNZd+umWVLXxzxBl5MiTOrj2OgV7tJ3zILAF9Y9hZeTFkJOZUjmFkkfYTPmSiVF8deg757D6QkZhcMoyJJdpP+N36wsBz8/ruysiM8g9kaln28PmV/c9GknKflmRkBnnqmFk+Iavt5+rOxiJZcp78ZGT6OKuYXZE9InhOnzOxy7nnIcnIlNpLmVORvY/RvKozcFlcOTsjMjJeq4+5FfOy2s4uPx2v1Z8zQ0LGaXEztyL7d39q2SkU2crzYEjYZDtzKy/I6XWfZRUckU+BfjntTKZW9cv55nHj2NlcOGisIdsfTjyNuTVDcr7Rfmn48Vw1RDuMeqi+M/okzqwbnSMBLh1wHNcPMxa6vn7obC7qf1zOjLNqx/LNEcbCqJcPmMZVA3PrDisBc6pG8N1RpxuyP7duEl8aclKODInJZYO4fcz5huxPqhrH14eemRNDRmJUUV/uGHOZIftpZSP59rALc2YM8vbhp2M/byiKNq54CDcNv7Kry6pRhkytq5yfjL7e0HLFMN8Avjv82i6GMYqMTLmjmB+P/ho2uedk8UPVz1PLd4ffgCzlxiiy+fjBqG/jtGRfEqlyVvGdYd/CKlkNP9zIyHisbm4ZcbNuomq3Su1lfGPo97DKNsOTuIyMw+Lg28O+j9+mvRTXLZ+tiBsG345dduTEsMk2vjL4Nkrs5VntXRYPXxr0Y5wWj2GGhIxFsvL5AbdT7tDO0Srowyp03/2UKKGkue2Dl3hu94YeW7V3S0ZCkiR+NGke1wwz5iB0K62q/HTlqzy2c+XB9vJaDIDvjjuR64dPz2nJRREq96x7g39sX6LPkCSEEHx9xAl8Y+ScnBhCCP60+W0e2LoQSZI0l526+V8YMoObRs3LqZW6EIIHdyziT5vfAI5s1X4445L+U/n+6DOx5rh09MTuxfx280sg9BiZ78NZtRP5wZgLsOXYrv3F+mXcs+lZFKHdjrCbcWLlWH405lIcluwT66F6q2kVv9r4OGmNVu2HMqaVjuCOMdfg0slx6UlL2tbxq00PkdBpaNbNGF80lB+Ovk53J0tPWt2xibs2/52oEkdC6vFaZGRUVIb5BvDDUV+hyObLibEpuI1fb7mfcDqaldHfXcetI79Bqb04J8bO8C5+t/WPBNPBrIwaZzU3D/8OFY7sk/ehqo/t495t9xBIdWRllNkr+NbQ71HlzG3ybo438MDOX9GebEZC7rESave/F9lK+fKgW+nj6pcToz3ZzD92/YKWRP3B8z2Skbk+r7WIawfcRp07eyTvWFcu83fBEfmUaVNHM49uW8HTu9YdUaSswunlmmGTuGTweCpc+a9Nbg+28u/tK3hy12piyodLW5c63Fw5eBKXDppItTv/z2R3uI0ndq3giV0riaQ/PHEU2ZxcPmgylww8jtosyxh6qo928J/dK/jP7uVHFFvzWh1cPGASlwyYTD9Pad6MplgnT+1dzn/2LD2i0ZzbYueCfpO4uP8UBnp73iVjRK2JEM/vW8aTe5bQdlghNIds45y647io3zSG+KrzZnQkw7xUv5yn9i2mJfHhZnY2ycLpNcdxQd/pDPfX5s0IpqK8dmAZz+xfRGP8w4WerJKFk6omcH7dLEb4+uadTxRJx3izaRnP1y+kIf7hUuMyMrMrJnB2n1mM9g/KmxFTEixoXsaLDe+wL9b4oTEJiePLxnNmzQmMLRqaNyOhJFnctoxXDrzNnuiHm/JJSBxXMpbTqucytmhETg70oUqqKZa1L2d+05vsihzZ3XqMfzSnVJ3MuOKxeTPSaprVgeW83fw6OyLbjhgf7hvJ3IpTGVc8Me9EWkUobOhcwaLWV9kePrKK9EDPcGaXn8bYoqm6uSd6UoXCltAqFre+wrbwmiPG61xDmFl+JmOKjscm987W+E+7Co7IZ0ChVIL17QfoTMaxSjKlDjfjyvpo5oMIIVgfqOdALEBMSeGxOhjiq2SAV/spJ5JKsq6jgc5kHFmSMozSPpoJoUIINgcbqI92EFWSeK0OBngqGOTTzleIKynWttfTmYwjSVBsdzGupFZ3V8n20AH2RVuJphO4rQ76ussZ4tN+kkoqadYF6gkkowiRYYwpqcWp81S/K9zI3mgz0XQCl8VOrbucId4+mhNLSlVYH9hPIBlFFYIiu4tRRbW4rdo3pX3RJvZEGokqcZwWB1WOUobpTMJpVWFjZz0dyQiKUPDbXIzw1+LVqaHSEGthT6SBiBLDIdupcJQw3DdAk6EIlc3B/XQkw6TUDGOYr1a3XkhzvI3dkX1ElRg22UapvZjhvkGak5cqVLaF6mlPhkiqabxWJ0N8tRTZtHfgtCfa2RXZQ1SJYpVtlNiKGeYboskQQrAjvJ/2ZJCEmsJrdTHQ04diu3Z0IpAMsDu6k2g6ilWy4LcVM8Q7VHPyEkKwO1JPW7KThJrEY3XRz11DqV17aSGU6mRPdDvRdASLZMFrK2KwZ4TuBLk3Wk9bop24msRjcVHrqqHMUaJpH0mH2BPdSiwdRpZkPFY/Az0jdSfIhlgDLYk2Emocl8VFjbOacp0ISEwJsy+6mVg6hCRJuC1++nlG6yaANsUP0JpoIa7GcMouKp3VVDi07w0JJUp9dAMxJVPJ1WXxUesejcOiHcVqTTTRmmgkrsZwyE7KHVVU6CyRpNQ4DdF1xJVOBCpOi58a1xgcOrtyOpIttCTqiStRHLKTEnsllc46TfvPqgqOSEEHFU7FeXH/Gh7dtYQ9kbYjxieV9ufygdM4sXpk3jtOoukErzas4fE977Mj3HzE+Jiivlw2YDonV43WdTD0FFdSvNG4hif3LmZrqOGI8eG+PlzcbyYnV4/TdTD0lFBSLGhey7P732NT8MhuwgM91VzUdxYnV0/EZWA9viel1DSLW9fxfP1CNgSPfAqtc1Vyfu0JnFQ1GY81v4JgilBY2rae/zYsYF3nkU+h1c5yzukzh5Oqpua8NPE/hsrqjg282vgOqwNHPoWW20s4vWYuJ1bOwJ9nsS5VqGzo3MT8prdYHVh7RGi/2FbMKVUnMqdyFkUG8gp6khCCLaHNvNP8BqsDK48Iu/usfuZWnsTs8rkU27Un/2yMXZGtLGp9ndUdS45guC1eZpXPY3r5yZQayF3QYuyLbef91tdYHXj3iMqkTtnNtLJ5HF92KmWOqrwYAAdiO1nW/jJrAu+giA9HS+2yi+NK5jG59HTKHflPzC3x3azueIl1gddJiw9HS62Sg7HFpzCh5CwqnAPyZnQk9rI+8AIbA6+QEh9ud2CRbAz3n8rYkvOocBaWWPJVwREpCIAPWndy47J/E+1a+ujpg5aRUBH0dZfy5+OvoW+OyxRrOvZw44qHCaZiSFkYVc4i7ptybdYdHYdrc3A/N638Jx3JsOZac/e/l9q9/O646xjm75MTY1e4ke+t/iutic6D56vF8Fvd3Dnhi4wu6p8Toz7Wwg/W/pkD8TYdRuY9dFuc3DH6i0wo0d96fbia4+3csf7/sT/WpLme3c2xy3ZuHXkdk0tzSyDuSHbyy41/Ynd0fxaGhFWy8K1hX+D4stwSiEOpEL/dei/bwzuzMmRJ5ksDr2VWRW67sqLpKPfv+CNbQpuyMiQkLu9/DXMqcqsLkVDi/Gv3H9kQXJWFIQOC82qvZG7FmTkt6aTUJE/s/RNrO9/XZXSPnVp9KSdXXpQTI62meLHh/7E68BYyFs1iX935GDPLL+TkqqtzWtJRhcKbjQ+wquO/SFgQWRgTS87h5OqvIOewpCOEyvstf2Nl++OaOSUZRoY/suh05lbfhEUyV1jys6iCI1IQC5q2cOOyxxBCGCqOZZFkPFYHj8y6Xne55lAtbdvBN5f9E9UwQ8Ih23nw+C8z1G8sn2FdYA/fXP5XUqpiiCFLMjbJwn2Tr2d0sbGktG2her614j4SStpQ4Sq5a/K7Z+L1hraXAuyNNvGdVb8nmk4YYkhdScc/Hf0lQ9tLARrjrXx39W8JpSKGGQC3jPgCsyr066d0qz0Z4Pa1d9GRNNbcrNux+tqQazix0pijEEyF+OmGX9KaaMupkNg1/a/glGpjO42i6Qh3b/4FjfEDOTEurL2E02uMbXtOKHHu3fYz9sd259RO/tSq8zmrz6WGbNNqir/u/Bm7I5tz6h80s+wMzq39giFnRBFpHtvzC7aHV5FLj5cJxSdzXu03DTFUofDC/l+xNfSu4eMDDPPN4ry625AMODxCCN5svIfNna/mQJDo75nKWXU/z8nhKSi3+buwffcY1NZgIzcvfwJVqIYrdCpCJZJO8JUl/yJooDPrnkgrN614BMWgE5JhCOJKkq8v+wftCe0uod06EOvgppX/MOyEQCacn1LTfGflP2iKB7LatydCfG/VX0goKcMTkopAFSq3r3mQ/dHWrPahVJTb1t5v2AmBTBEsIVR+tvFBdoWPXIo6XDElwY/W3WfYCTnIQPDrLf9ia2h3VvuUmuLnG+817IRkGBn9efvDbOjcmtVeEQq/2fKHnJ0QgIf2/JvVgbXZz0kI7t9xb85OCMAz9f9hefsHhhgP7f4T+2O7cnJCAF5veo4lbe8Ysn1y3/05OyEA77W9wnutrxiyfaXhrzk7IQCrA2+yqOVJQ7aLmv+VsxMCsDX0Lgub/2nIdkXbozk6IQCCPZGlLGy6N+dzK8i4Co7IMagHtr6DIpSci4QrQqUx1smze1dmtf3njgUk1XTON0AVQUcywlN7s9/MH9/zLjElmXO5cxVBRInzxJ73sto+u/89gqloXoykmuaJPe9ktX3lwPu0JXJvwS7IbKl+bO/8rLZvNy2lMd6aVylyIQSP7ck+Kb3ftpJ90Yb8GMDje1/Iare6Yy07I7vzYkhI/Gfv02QL8m4JbWJLaFPeZduf2f9kVwdYbe2N7mB9cEXeXY5fbHg8ayfbpvg+VgUW5c14vfEJUmpC16Yj2cTyjlfJt9vtwpYniStRXZtIOsDStqfzOj7AsrZniKQDujYJJcyytkfyJAjWB/5LMNWY3bSgvFRwRI4xNceDvHlgU95l4QXw711LdG+0wVSMVxrW5N1HREXwn70fkNJpix5LJ/lv/bL8GULw/P6lxBXtmhIpNc3z9Yvz7uuiCJXXGpcT0okgKULlhYb8JwsVlUWta+hIhjRthBD8t2FB3pVxVVSWd2ykKX5kMvOheuXA23mXvBYINod2sC+qH915velNU6XU98Xq2dHDVtRD9VbzfFOl1FuTLWwObdK1WdRijhFKd7K+U/+B4P3W100x4mqUtYElujYr2l831ek2LZKsDbyta7Mu8Br5OjqQaWC3LvC6rs2W4HwUoX0vyCYJiQ2Bl/J+fUH6Kjgix5ie2bvC9DEOxDp5v2WH5vh/968kneVpLZs6khEWNGnfzOc3riam40QYUVRJ8Eajdqh+Uct6gin9p7VsSqlpXm9crjm+on0zLYmAKYYQglcPaE8YG4M72B9rMnErzyQyvnpAO4K0K7KP7eE9eTtU3YzXGhdqjjfGm9gY3GyqwZyMzBuN2hNfR7KdNYFVphlvN72hOR5Jh1gZeM80Y1HLa5rjCSXGsva3TTfje7f1Zc3xtJpiefurOS8tHa4P2l7UjFKpQmFl+wumvlcCwcr2F1A17klCCNa0P5P38TMMlfUdLxyxU6ig3lHBETnGtLJtj6nOrZBJXF3dfuT21W6t6dhDPv1DDpVVklkT2KM5vjawx3QLeYskszawW3N8fWA3VpMMkFivw9gQ3GX6OgSC9Z3ajuHG4E5TT8aQiYr0tNW3W5uDO0x+4hnGRp08kW2h7SYJGcbm0BbN8V2RnaYmvW7GtrA2Y190V9ZlFSOMXRFtRmN8Hymhv6ySTQJBfWyn5rm2Jw8QV7PncmVTW7KBuBrpcSycbiOcbjfN0DtOQg3Tmao3zUioITqT5o9T0JH6WByRRCLBhAkTkCSJ1atXfxzIz6wCyeyJptkkwRGVSD/ESEVN38wFENJhhNIxUy3kIZO4GtZhhNMxU12HIXMz14uqRNIxzDptAMG0PkM20dm4W2EdRjQdzbu6pmGGEst76efDx9FjmIuAdSuuaP/OYr3ESIs0abXnJ/C40vPEno+0jvXpY/TsNCU1/j0fJXrxWAX9Tx+LI3LLLbfQp09udR0Kyk/5FiXL5Tj2XmBIWRjWPLqQHsmQdHu7WCVL3iW4D5VebxeLlG/f5MMYOlsHrZLFpFvYfRz91uu9sdFf7/OwSBbTDm7mODrX0UtbMPXKkeu9j7lKa8uo3IsMrferN7erajHyLeueC6M336vePFZB/9NH7oi88sorvP766/z617/+qFEFAZVOn+GumlpShaDUoV1uu8zh64XlBiixazNK7V7TT/mSJFFi167qqTdmVBZJplSnbHiJ3Zd1h0U2yUiUOrQrhxb3AkNCotSuvde/yOYzlY/QrRKdCqhFtt6pFaR3HL81vwqsh8tr1f7MfXlWeT1cLotHMwrl66XrsEpWHHLP5fu91uJeYUhIuCw9v19uS+8wMsfq+T1xWvymEm4/xLDmV123IH19pI5IU1MT119/PQ8//DBud/ZS0olEgmAw+KG/gnLT6bVjTeeICASn1mhX2zy1ZqzpZRNFqJxaM05zfF71+F5hzKvWZsyt6h3GiVXjNcdnV0ww/XmoCObqFBybXjbB1PEh85nPqdTu1jypZJzpaIIEzKqYqjk+tmg0DpMNwyQkZpQdrzk+1Dccj0XbATbGkDm+bIbmeD/3YIps5iYsGZnJJTM1x6ucfU23mZeRGVc8UzMqWGyvpMY52FRkUkZmuG+qZp8bl9VPP/d4U46ChEw/93hc1p4dUKvsYIB3umlGpXM4Plv+5fEL0tZH5ogIIbj22mu54YYbmDzZWDv6O++8k6KiooN/ffv2/ahO75jVSdUjKLbn1z8EwILEjIoh1OmUep9ePpQqZ/5PZDIS44v7McSn/aMeV9yfAZ7KvG+BEhKDvdWMKdKurjrUV8tIfz9TEaQaZymTSrXLsNe6KphYPMxUMmmxzcv08jGa42WOIqaXjTPFcFuczCrXdnZ8Ng+zyqeaYlhlGydUTNMcd1qczKmYbYohITGncpbmuE22MafyJFMMgcoJOqXeZUlmdvlppiZwFZVZ5adojkuSxKzyM/M+fjdjRvlpujbTys42tVymojK1TL8S7XGl55jamSNQmVR6rq7NuJLzTTPGlVyQ9+sL0lfOv8Zbb70VSZJ0/zZv3sy9995LKBTitttuM3zs2267jc7OzoN/+/bty/X0PvOyyVYuHTA178lVQXD5QO3JAjI32sv6T8/7RqsiuGyAfrlvSZK4pN/MvG+BAsEl/WZkzQG5qO+svCMWEnBh31lZkzjPrz0h72UNCYlz+szKupZ+dp85eTNkZE6rnonDoh+NOL3GHGNuxfF4rNpdfAFOrpprijGtbHLWJZ7Z5SfmPbnKyIwtGk+ZTmdagOllJ+ad3CsjM8gzgmqXfuO440pOwCblF0GSkKlx9qevS79FweiiWThlD/kkXEvIlNirGejRjkoCDPEdj8daktf9RELCYy1lsE//nlXnnojf1ifPqIiEQ/YyxDc3j9cWZEQ5fyo333wzmzZt0v0bNGgQb731Fu+//z4OhwOr1cqQIZkv/OTJk/n85z/f47EdDgd+v/9DfwXlri8MnsVwf3XOeRwScH7ficyuHJbV9rL+0xlf0i/nPA4ZiXnVY5hXrf2E361zaicztWxozk6VjMT08uGc2WdSVtuTqiYwu2JMHgyZccWDOK9OO0TfrWllo5lXNSXnG62MzBBvHZ/rm71/ypiiIZxZMzun43cz6tyVXNZP/8kYYLC3P+fXZrfriVHhKOXyfvpPrQB9XDVcXJf7k6eMTJHNzxX9LslqW+Yo49K+V+bFcFs9XNHvmqy2XpufS/p+KWeGhIzD4uTyfl/Oauu0uLm03zfyYEjYZBuX9cveB8Ym27mw7035uSGShYvqbs7KkCUL59TeSr7Ozjm138+aWCtJMqf2+UGXXW4cCTilz+1YTS4bFqStj6zp3d69ez+U49HQ0MBpp53GU089xbRp06iry94mutD0Ln+1JcJ8+f1/sSPUbPiJ/5Sa0fzquM8Z3nkTTMX4+rJ/sLmzwRBDAmaUD+Oe467AYbEZYkTSCb678h+sCRgrpiUhMbFkIPdM/Dxuq8MQI6Gk+NHaf7KsfYuhd0pGYri/L3dPuB6fTf8Jv1spNc2dGx/ivbbsvVC6Gf09Nfxq3NcoNphUqwiVP2x9hLeblxlkyNS4yvnF2G9S5ig29BpVqPx95+O83rTIMKPMUcIdo2+kymmsmaIQgif2Pc1LB4z1Bel2Qm4b+V1qXMaaKQK82PA8LzQYK3TV7YR8Z9gt9HUba6YI8HbzyzxX/7BhhsPi5KuDb6O/x3j7+SVt83l2/1+7vrv632AJGZts5wsDb2Ww13jX5TWBd3hu/x+6foPZGRbJyqX9bmOoz3jX5a3B93hh/68QKFl/65l+yBbOrbuVYX7tXJrDtSe8lJfrf4wq0gaWajKUeTW3MrxonmFGQRkdld13d+/ezcCBA1m1ahUTJkww9JqCI2JOkXSCe9a/wgv7V6MIccSPu7sVvc/q5PODZ/KlobNzDifHlRR/2PwKz+1fQUpNAx++TXUz3BY7lw+YwZeHnKS7hbMnJdU0f972Ks/s+4BkV12FQxndHV6dso0L+07nhqGn6m6p7UlpVeGfu17n6X2LiClJJKQPvV/dz1A22crZfY7nK0POMuxMdUsVmb4xT+17m6gSP4KR4UhYJJlTqqbwlSEX4LIYc6a6JYTg2fo3eXLffMLp6MH3/3CGLEmcUDGJLw/+HF5rbjlFQghebVzAU/teIpgOazIk4Piy47hu0KUU2bR3mWjpneZFPL3/OQKpzh7b23dHmI4rmcDnB1xJib04Z8YHbe/zbP2TtCfbemR0/9sY/ziu7P/5rEsyPWlNYCnP1z9KW7JZlzHMO4aL+15HpTP3JNRNwZW82PAvWhINuoyBnpFcUPslql3Gnalu7Qyv5dUDf6U5sVeXUecazpl9vkIf1+CcGfXRjbzReD9N8e1IWBB8uNiajAUVhSrnEOZVf5Vat7HO1IeqOb6VBY1/oCm+qUdG97+V2gdyQtXXqfMYd6YK+p8KjkhBH1IgGeW5fSt5es8KGmOdJNU0ToudIb4KLhswjVP7jM55Uj1coVSMF+tX8dTepRyIBUiqKZwWG/085VzS73hO6zMOV5YchGyKpBO8dmAVz+xbQn20jYSawiHbqHWXcWHf4zmtZiIeg1EQLcWUBG82rub5+sXsi7SQUJPYZRvVzlLOq5vOqTWT8GbJc8imhJJkYctqXmh4l72RRhJqEptso8JRzJk1Mzi1eip+m7mdHSk1xeLWNbzYsJDd0QbiSgK7bKPUXsSp1dM5pWo6xTrbjo0orSosa1/Dq43vsDO8l7iawCZZKbb7ObFyBvOqZlFiN7fNVBEKawLrmN/4FtvDO0moCSySBb/Nx+zymZxYeQJlDu3EaiNShcqG4DrebnqD7eFtJNQ4smTBZ/VyfNlMTqg4kXJHhSmGEIKtofUsan2dbaGNJNQYsiTjtniZVDKTmeXz8nJADmfsimxicesrbA2vJaHEkJBxWTyML57B9PJTqXKa2wAghGB/bAtL215mW2g5CTWGBDgtXkb6pzOl9AyqXQNNMQAOxLayqv1FtoeWkOiqyuqQPQzxHc/E0rOpcWVfPs6m1vgO1gVeYEdoYVehMoFddtPfezxjS86j2jmqV+oMfVZ1VDoi+ajgiHw0EkJk/YEllRQLWlazqmMroXQMGQm/zcPM8rFMKRuZNf/ECCOlplncuoYVHZsIpaJIgN/mYWrZGKaVjcmaoGmEkVYVlnWsZWXHeoKpTFVEn9XDxJJRTC2dkDVyYoShCpXVgXWs6FhNKBVCReC1ehhTNIqppZOwy/pOnlHGxuB6VnQsJ5wOowgFj9XDcN9IppRMxZElcmKEIYRga3gjqzqWEEoFUUQat9XDIM9wppTOxGFx9gpjV2QzqwLvEUoHSKtp3BYP/TxDmVRyAi6LfnTGKGN/bCtrAgsJpdpJixRO2UOtewgTS07EZdFf6jLCAGiMbWdd55uEUq2k1QQOi4cq52DGFZ+CO0udD6OM1vhONna+TijdTFqNY5c9lDkGMrr4dDxWfQes+9aejdOR2M3W4MuEU42k1Bh2i5ti+wCG+8/CY6vsFUYwuZcdwZcIpfaTUqPYZA9+ez+G+M/Ga9MvdmmUEUk1sCf0HOHUHlJKBKvswWOrY4D/PLw2/QiQUUZBxlVwRArKWx3JEE/te4eXGhYTUeIfCsFaJBlFqJQ7ijivdjbn1c7KeekAIJiK8Nz+t3n5wLuE0tEeGcU2H2f3mc15tXNw5xGBiKRjvHTgbV5tXEBnKvQhRvf/9lm9nFY9m7NrTsKXRwQirsR5rfEt5je9TUcq0CPDbXFzcuUczqiZl1fBrqSa5O3mN3mr+Q3aDls+6P7fTtnJ7Io5nFZ1OsX23OtXpNU077W+yTvNr9KabD4Y/j6UYZcdTC+bw0mVZ1KaR3RAEQoftL3Fu60v05xo+BBDQkagYpVsTC6dw9yKcyh3GM/16JYqVFZ3vM3i1v/SlNjTxVAB0cUQWCQL44vmMLPiPCrziA4IIdgQfIdlbc/RGN92BKP7/48sOoHjyz5HpTP36IAQgu2hhaxsf5rG+MaupYIPM0BiiG82k0ovoco1PGcGwJ7we6zreIzG2JoeGACCfp5ZjCu9nCrX2LwYDZElbOh4lMbYsg8x6EoPF6j0cc9gdMmVVLuzJ5f3pJbYcrYG/kVT9L2u5U4AtYshIVCodE1jaPE1VLmzJ5cX1DsqOCIF5aVd4QPcuvbPBJLhrFsoJSQGeWr45bivUOow/tnUx5r54dr/R2uiI2uCq4RErauSn4/9GhVO4xNsc7yNn228l8Z4S1aGjES5o5Qfj/omNS79p79D1ZEMcNfm37M/1pA1sa47mfLWEd+hzm281UEoFeKP23/H7sguQwyP1cN3hn2Xfu7+hhnRdIS/7vwtO8LdibranP8lU97CQK927ZTDlVDiPLTnt2wJrclqKyNjle18YcD3GOrLvrOqWyk1yVP7fsfG4JIec28OZ8iSlcv73cIwv/HJTxEpXm74A+s738rKkMi0KDiv7hZG+LXrmhwuVSi80/Qn1gX+e9BB02OA4JSa7zKy6FTDDCFUlrbez7qOxw0xBCozK29iZPH5OTAEa9v/xtr2vxtkKBxX/k1GFV9hOCohhGBb58Osb/tdj7kePTFGltzAiJIvFyIfH4Nymb8L3XcLAqAh1srNq/9kyAmBTJ2OXdFGvrv6PsIpY432WhMd3LL6D7QmAoZ22QgEDbEWblnzezqTIUOMzmSIH63/LY3xVkMMFUFrooMfrv8tbYmAIUYkHeHnm+6hPnbA0E4eFZXOVJCfbbqb5niLIUZcifObrXexJ7LbMCOSjnD35jtpiDUYYiTVJPdvv5sd4a2GdkOoqMSVGPdu+yX7orsNMRSR5h+772ZraJ0hexWVlJrgb7t+yW6d7rMfeo1QeGLvPWwKfgCQ9f1SUUmLFI/s+SU7wsZ2MQmh8mL9b1nf+bYhhkBBReHZ/XeyLbTEIEPwduMfWRd4sesY+r/DzO4SldcP3M2W4FuGGMBBJ8QoAwTvNf+GzYEXDDPWtv+dte1/z4EBK1vvZVPgMcOM7Z2PsL7tdx86RjbGpo4/s7njL4YZBX08KjgiBaEKlR+u/SvRdDynYlKqUKmPtfDrLY9ntRVC8PMNfyeYiuTGQKU10cndm/9lyP63W/9Oe7IzZ0YwFeaeLX/BSIDwgR3/oDnemjMjmo7x6633GmI8uuch6mMNOTOSapI/bPutoTb0z+3/N3uiO3KqOCkQKCLN/dvvJqUms9q/1vgfdoQ35sxQhcrfd91F3EAn20Utz7IltDzHImUZx+vfe+4kku7Mar2i40U2BheQzVk7kgHP7r+TYCq7A7qp83XWd76UIyOj1xvuoiO5P6vdrtA7B52QXPVe829oi2/LatcQWcLa9r/lxVjR+keaY9kjZ22x1axr+21ejE0df6Ypujiv1xb00ajgiBTEio6t7Is1o+RR0VJF8F7rWhpj7bp2m4K72Bbem1fVTBWV1YGt7I0c0LXbHdnP+uDWvBnbwrvZHt6ja9cUb2ZFYE3ejPrYATYEN+vaBZIBPmhfkldJahWVtmQrawOrde2i6Qjvt72dV4VRFZVQupNVHR/o2iWUOO+2vpYXQyCIKRFWdOjXK0mrKRa3/jfn43czUmqCFR1v6tsJlSWtT+XFAIEqFFZ1vJyFIVje/gT5FPXKUARrO57Pare24zETPVckNgSyvw8bOh7NmyFhYVMgu6O0rfORrqWp/BjbAsZquxT08ajgiBTEC/WLTPXekJF46cD7ujYvNSzCYoYhybx04D1dm9cazV6HzCuNC3Rt3mxeaJoxv0k/jL6odUHeJci7GW82v6Fr80H7QkNREy1JSCxoeV3XZnVgMUk1njcD4N3WV3UjSJuCS4kq+TfHFAg+aHsZVee92BleQSjdaoKhsrLjZRSR0rRpiK2nI7mXfKIh3Yz1gVdIqdrLpG3xbbTEc4tOfZihsD34OglFe5k0mNxHY2yZKca+8AKiae0IUizdTEPk7azLMXqM5tgSwqlCC5GjRQVH5DOu9kSQD9o25fWE3y0VwUsNizUnjEg6xqLWVXlFXA4yhMr8xiUHi6YdrqSa4p2WJSavQ+Xd1uXElJ4nT1WovN280DRjRccaOlPak+eClvwiFYcyNoc20ZrQvpm/2/KmKYZAsDe6k4aY9s38/bb5phq/AbQkGtgT1V4OWN7+mokn/IyCqTZ2hrVzWFYHXjXNiCshtoW0I0gbAi/n/YTfrbSIsy24UHN8S/BF0wy1yxnR0o5eYGSOox1B2hvKLwJ2qCQs7A4+Z/o4BfWOCo7IZ1yN8XZTE1K3QukoUSXR41hrIoAi8p+8u5VQk3R21QI5XIFk8GDVVTNShKKZtBpVYkQVY4m5ehIIWhNtPY6l1TSBVM/8XNWi44i0JZt7hdGaaNIZa+yV71ZbUo/RkPfT96FqTzZq8xP7TTMkZAI6jI7kvryf8LslY6Ezpb182Znc3yuMUKpeczyUMv9egUQ4pZ3vEk7tM+3gCgQRHUZBH68KjshnXHEN5yEfaUUSYr3K6PlYcbX3GHGN69D6995kJD6G61CEYmpZ5lAldN6TlMiezGqMoe38mV36gYyTkNBZ0kjqjBlnSLrH6Q0GSCRV7eTeVFeFUjMSCAMMc86nQCWlw0irsV5wcFXSas8PNQV9/Co4Ip9xOfMoSKYlt0blzXyKnuXO0K/6mYtcGgXUepVh6ZnhkHvvvdJiWCQLFim3XjxacmowAOy9dC26jF74TAQqDlnvOnLrxdMzQ2D/iBnZjmOXjTVP1JOEpMuwyR7yTbj9H0PGpsOwyi7TERGQsWWpsFvQx6eCI/IZV42rrBd+1FBk82o6HBWOYqxZyrUbkcvioMjW882j2ObD0Qttuq2SlVKN/iguiwuv1VwPGMgkk1Y4ynrmy1ZK7eZ6p3Sr0qldoK3CUdUrjAqd/iiVjj698t0qd+gx+prO38gwtAvNVTj6mWYIVEodtZrjpY7+vZC/kabErt3VvNjer1dyRIrs2uXS/fb+vbBsouK3axfl89kGmI6ISIDXZrzwX0EfrQqOyGdcJXYfM8vHZO0doycZiXP6zNCsVui2uphbOcncrhlkTquertm51ybbOLlyhukdLXMqpmpGPmRJ5uTKOaYZU0qPw6fTjXZuxUmmbuYyMmP8Yyi19+zsAMyuMNfWXEJmoGco1U7tCXx62SmmJgwJiWpnX/rqdHGdUnqa6ZyEYlslAzzaVVwnlJxhmuG2FDPYO0VzfGzxWabzN+yymyG+2Zrjw4vOMc2wSDYG+7S/O0P8Z/dKPs0g35ma4/185/RKjsgA3/mmjlFQ76ngiBTEubWzTSWTCuDMmum6Nmf1mW1u1wwqZ9bol8o+tXq26R0tp1efoGtzYuUJphmnVM3VtZlVfoKpG62KyomV+o7GlNJZ2KT8Oy4LVE6o0C8rPr54Ok6d5YjsDMGs8tN1y3EP90/BYy3OmwESx5ediazjiA/wjKfIln8ESULmuNKzdJfDqpwjKHMMJN9lDQmZMcVnYdVZDitxDKTKOdZUjY+h/tOx6yxpeG196OOebqrGRz/vSbh0Gvo5rWXUek42xahyz8SdpdleQR+fCo5IQUwoHsIgT5+8oiIyEnMqJlDhLNa1G+brz0j/wLyiCTIyk0tGUevW7wXT113DhOJReTNG+YcwyKvfpbPCUca00kl5M/q7+zLCp9/C3G/zM6N8Vl7OiIxMlaOKsUXjdO1cFjezKublzSi2lTK+WPsJH8Am25ldof1kqycJGY/Fx8RifefTIlmYVX5engwJh+xiYsmJ+naSzPTyi/NmWCQbE4pPz8KQmFx6GfklekpIksy44nOyWo4rvdJUxGJU8UXZbUquNFHjQ2VUyWVZ7YYUX22qVsmw4mvyem1BH40KjkhBSJLEz8Z+Ca/VndMEKyPT31PNd4ZfYsj+9lFfpNTuz2mJRkam2lnGd0cYu3HcOPQLVDrLcr6OEnsR3x12vSH76wddS62rJmeGz+bl5mHfMNRw64p+V9HfPSBnhtPi4ttDb9Z9wu/WuX0uY7B3eE7OiIyMTbbztSHfxyZnj6jMq7qIEb6JOTEkZCyShesH3Y7DQDLqjPJzGV00I0eGhITM1QN+gNuavWnjhOIzGFd0KrlFLCRA4qK+P8Jn014m69aIopOZUHJhDsfvZgjO6PNDiuzZn/D7e2cysfTzOTIyOqH6Nkodg7La1bgnc1z5N/JiTKv4HuXO0VntSp1jmFB+W16MMaU3UuHSd6IL+nhVcEQKAqDSWcLvJ36LCkdRV4NufUnAcH9ffj3h67itxnYulNr93DPhRqpd5YYmDQmJAZ4a7prwbXw2Y7sKfDYPPx9zE33dNYYZNa5Kfjn2Zors2nkbh8plcfKDkTcz0NOva0LT50hIlDlK+fGo71PmMJaIapft3DTsuwzp6nKbnSHjtxVx64jbdZNUD5VVtnLD4O8x0j/OEENGxm318u1hP6LGpZ0UeagskoXPD7iJMUVTDDOcFhc3DP4xde7skx5kcnc+V3cj44vnGGbYZAefH3gH/T2jDDEkSeKMPt9kYvEZXQz9W6eMBatk43N972CQ9zhDDIATKm9gUuklhhgSFmQsnNnnxwzxGe/we1zZFzmu7LqDx8jGkJCZU/0DhvpPM8wYVXwlx5V/0zADJKZV3MKwYuOO2KCii7ucEckgA8aUfYehhWjIUSdJGOnA9QkplzbCBfWOgqkIL9S/xwv179KRCmGR5IMVUyVJQhEqda4KzqudzZk1x2O35J5nEEnHePnAu/y3fiFtyc4eGVWOMs6pPYEzambitOS+GyauJHi9cREvN75DS6L9IEMAchejzF7CmTVzOLVqNm6NLbt6Sqop3m5eyOtNb9EYb8YiWboYAhkZBYViWxGnVJ3IKVVz8eSx4yatpnmvbRFvNL3OgfgB5K7W7wKBLMkoQsFn9TG34iROqjwZny3334kiFJa2LWJBy2vUx/Z+iCEho6LgtniYWX4ycypPpchWkjNDFSqrAu/xbssr7Ivt6JHhlF1MK5vH7PLTKbaX58wQQrC+czFL2l5kb3Rzjwyb5GBS6Tyml59Nqb06L8a28BKWtT3P3ujaQ5yF/zGskp2xxacwpfQ8yhzGHLbDtTu8lFXtz7A3uryLISFQkZFRUbFIVkb45zGx9CLKHAPyYtRHl7O+40n2Rd7vct4yDAm5i2VhkO9kxpRcQrlTfzlRS02x1WzqeJx9kYVd7uGhDIGERD/vSYwqucxQJKQntcfXsT3wKPWRNw4es5vR3Se5j2cuQ4qupNxl3CksyJxymb8LjkhBPUpRFd5v28DKjq2E0lEsyPhtHmZWjGVc0WDd5QVVqKTUFHbZrmunCJUV7ZtY0bGRUCqKRCaiMa1sLOOLh+ouL2QYSWyyPavd2s7NrGhfTygdQSDwWT1MLBnNhOJRunkxQgiSaiIrQwjBptBWVnSsIpQKoyLwWj2M8Y9kYsk4LDpbl4UQpEQCq5SdsT28nZWB5YRSQVRU3BYPI3wjmFB8HFZZOxHyfwwbcpZz2RPdyaqOJYTSQRSRxm3xMNg7nPHFU3WXYoQQpEUCSxYGQH10F6sCiwmlAygihdPiob97GBOKp2PT2YL9P4YVOUsdlKb4HtYEFhJOdZASSZwWD7WuIYwrno1d1o7gCSFQRALZAKMtsZ/1nW8SSrWRFgkcspcq5yBGF52Iw6IdwRNCoIoEkmRBzpIwHEg2sKlzPqF0M2k1hkP2UuYYyIiieTgt+hE8RU0gSXJWRijVyPbgq4RSB0ipMeyyh2JHf4b6T8dpKdZ9raomQJKQJf2HhWi6mR3Blwml6kmrUWyyG7+tH4P8Z+Ky6i9bqSJT5E+W9OvSxNNt7A39l1BqD2k1glV247H1pb/vHFxWY1HCgnpPBUekoI9dnalOFrYs5J2Wd2hPZjrxSkhUOio5qfIkZpXPwm01V7QpnA6xuHUBi1rfpD3ZevDpp8xezuyKeUwvOwGv1djyipai6TDL2hfybuvrtCWbDm4/LbaVMaN8HseXnoTP1nOdEaOKK1FWdyxgSdsrtCbqDzL81lIml53ClNJT8NvM1RJJqnE2BN5hefuLtCT2Hkzs81hLmFB8KhNLTqPIbu7mnFYTbAkuYHXHc7TEdx5kuCx+RhWdxriSsym2a9cAMSJFTbIzvID1Hc/QEt9yMAnSIfsYVnQ6o4rPo9je1xRDFWn2hheyKfAkLfH1Bxk22csg32kML7qAEgO5EdkYB6KL2B54gpb4KgSZnklWyUNf76kMKbqYYsdwUwwhFFpi77I7+BhtsQ8QZFoeWCQ31Z5T6O+/jGLHWJMMlY74+zQEH6Yj9i4qmeq5suSi3D2PPr4r8TsmGsqD0mYIQoklNIceIhB/C9HliEiSg2LnSVT6rsHnON4Uo6CPXgVHpKCPTTElxiN7HmFJ25KuIHjPXyerZOWkypO4uO5i3af3npRUEzy57xGWtC9EFWqPDAkJWZKZXjaHz9VdhT3H4mZpNcULDY/yftsbpIVCT7sXuvNBJpXM4qK66wwlUh4qRSjMb3yU91tfJq1R/rw7nDymaAbn1d2Ay5Lbco4QKotaHmNJ67OkRJzuZMbDGQLBMN80zuzzTTzW3BwrIQTL2p5gWdvjXeW+tRgqAzxTOKXmJrwGkjUPZ6wPPM2K1n+SUEMHj9cTo4/7OOZWfx+fLfellq2dz7Oy9QESakCDYUGgUOkcz4yq23SLeWlpT+hl1rb9nrjSpssodYxhcsWPKHIMyZlxIDKfTW2/Iq40ARY4bNdKN8NvH8HY8p9S5Mh9GaQt+jbb239GPL2/R0b3v7ltQxlW9jOKnLkvgwTj77G7/Yck0rt0GQ7rQAaU/gK/c0bOjII+HhUckYI+FgVTQe7ecjcHYgcM1daQkBjuG86NQ2/EYbDsezQd4d7td7E3ustQcSwJif7uQXxz6Pdx6YTHD1VCifOXnXexK7LZIEOmxlnHV4f8EK+BHRcAKTXJo7vvYlt4NUa2aErIlDlq+OKgnxqOjigizbP77mZLaLEh+0yCazlXDbiTYruxOhmqUHi94TdsCr5hmOG2lvC5fndT6jAWuRBC8G7T79nY+ZxBhgWHxcvZdb+jzKld/Oxwxsq2/8f6jkcNM6yyi1Nqf0+F01iCK8DG9r+yoePPBhkysuRgds0fqcghl2FX50Nsar/boLWMLFmZVHkvFe6ZhhkNocfZ1nZH139l+/7KSMiMqvgD5Z5TDDPaIs+xs+2mruNnY2RyWgaV/ZYyz/mGGQV9fMpl/i7smikoLyWUBL/b+jvDTghk0sa2hLZw/477UQ0UUEupKf6847fsi+42XKEz055+Fw/s+B1pNZ3VXhEK/9z9O3ZFtuTAUGmM7+evO+8iqWZv7KYKlSf3/YHtBp2QbkZ74gD/3PV/uk3fDtoLwcsN97Il9L6h43czgqlW/r3nh8TSIUOvWdD0gGEnpJsRTXfw9N7vE0l3GHrNsta/G3ZCMgyFhBLixf03EUppd+o9VOs7HjHshHQz0mqU+fU3EkzuM/Sa7Z3/MeyEZBgqioiz6MC36ExsN/Sa/aHnc3BCAFRUkWJF8zcJJNYbekVL5LUuJ8SIg5BhCBQ2tnybQHyZIUZn7J0uJ0Q1yBCAys62m+iMLTDEKOjoVcERKSgvzW+az57onpyrjAoEazrXsKRtSVbb91rfZntkS84MFZWt4U0sbnsnq+2KjnfZHFqTc3EkFZV90Z0sank1q+2m4FI2dL6fc7lzFZXm+H4Wtjyb1XZXZDVrA2+Sa0EsgUog2cSilsey2h6IbWJ1x3M5Hb+bEUl38F7zg1lt2xM7WdX+cF6MhBLi/eb7stqGUg2sbDPuIBzKSKsxPmj+TVbbeLqV1a2/zpkBmUTWFS0/z2qZUoKsb/tpnow0a1t+QLaAuKLG2NKaT70OgUBlc8v3EFkeOlSRYmfbzeRXzE2ws+0mVJHK47UFHS0qOCIF5SxVqLzZ/GbefUQkJN5o1n+qFkLwdsvrmOnk+Xbz61lvtItaXs2ruihkbrXvtr6WNbrzfutLWWtCaDNUlra9RlrVv9Eub3sx7x44ApXVgddJqnFduzXtL+RdVlugsDn4FnFFP/KyIfCcKcbu8CIi6TZdu62dz5r4zBUaYksJJvfr2u0MPZf370Og0pZYRyCxTdduf/h5ExOwSji1g0Bita5Vc+QlFBEmPydBJaE00BF/T9cqEHudtNqWJ0OQVtsIxObn8dqCjhYVHJGCctbazrUEUoG8Xy8Q7IrsYk9kj6bNtvAmWhKN5Hdzyqgp0cCOyBbN8b3RHeyPGcs90VIg1cbm0BrN8Zb4fnZFNuQccTlUUSXEpuBSzfFgqoVt4aWmeuCk1DgbOxdqjsfSnWwJvoOZpmmKSLOxU3vCSCoRtnS+aooBsDnwovY5qAm2dD5v6vOQkNkafF5zXBVpdnT+B0wxLOwIPqk5LoRgd9D40pIWY0/wcV1GffBfmHkYAAv1wUd0LZpC/8TcVGTpOkZBn1YVHJGCctay9mWmOtBCprrl8o7lmuMrO5Z2FaQyw7CwskN7Al8T+KAXGDJrAtrLTOs7l+QdDemWhMy6gPZT5Zbg+6amim7KBh1HZGd4CapJBwEEWzrf1hzdH12G0rVVM3+CyvaQdrStMbaKlBo2zdgVfF1zvD2xgbiiH5XJzlDYF35NczyU2kosvR8zjrpA4UDkNc2lk3h6P5HUFlMMUGiPvYOiEW1LKe2EE8sw47SBQjixlJTSbuIYBX2SKjgiBeWszlSnqadvyFRQDekkSIbSQVNPrZCZMMLpoOZ4ZszcpjEVlVBKmxFJd5qudyBQCaUD+gzTP2Whm0waTQfyXjI5VHqMWLoDc0/f3ccJaI7FFWMJs9kUVzs1xxK9NCGm1DBC9Oz8JXuJIUiTFhENfm9N7IK0GuhxJLMk0ztK99r5FvRxq+CIFJSzFI2bYy7KVLDUPo4qFFNLJt3KxugNKUJ7d46KYtbXMcbohQk8G6M3ykfpRVXMR1wMMHrpM9dyELKN5Sqta+lNhtD43LX+PR9p5bL0JkMUElY/tSo4IgXlLK/Vm3eyX7ckJNw6dT7cVo/p5Z8MQ7sgWKZYmPnr8Fi9OgztsVzk0akY65Q9pqNHAG6L9l5/h+w1HQUDcMra74dD9tIbXptD1v7M7VnKohuVTdb+7trk3mHI2LBolE63yb1XV8mqcb5W2VwF4UNl0ziWpRcZvXmsgj5eFRyRgnLWcN9w09EKBYXhPu2S1kO9I0xPfCoqQ7wjNMeHeEeZfgoXCAZ5R2qOD/SYZ0hIDPSM0Rzv7xlr2hGRkOnvGac5XucZh1knQUKmn0e7UFe1a7yp42cYFmrdkzXHK51jeiFnx0KNDqPEMQqZ3JtBHs6ocE3SHPfZh2KRzLVMAJlixzjNnjouW39ssrlWAyDhtg3BouHs2C3V2C21Jhlgt9Rit+ReWbego0MFR6SgnDWjbAa2LI20sqnIVsT4Yu2J57iS43HKuXfEPVQui5vjSqZqjo8ummS4MqqWbJKdySWzNccHecdRYjPX00VCZlLpyZrjfVzDqXAMwFx0R3Bc6Rmao+WOAfRxjTY1iQtUxpWcrTnut9fQ1z3VJENhdMkFmuMuaxn9vHNN5bsIFEYUfU5z3G7x0d93lmnGkKJLNcctsou+vs+ZzNtRGeC/SnNUlmz08V2OuWlCUOu/RjNPSpJkKn2fx9x3V6LKdy2STtPIgo5uFT65gnKW2+pmZvnMvJdOJCTmVc7T7Uprl+3MKj8xb4aMzOzyk3W7uVokC7PKT8t74pORmVY2F6dF22GSJZnp5WeR741WRmZc8Sw8Og6TJElMKTuHfCMWEjJDfdPw28p17SaUnpd35EVCpq97fNYy72NKLjIR3ZEodwynIkvL+pHFnzOxRVjCb+tLlWuCrtXgIjMMcFkqqMlSgr2f/xJTDJtcRJVnnq5NjU/bGTIiWXJR5TlH16bCczESufWfOlQSVso92o5hQUe/Co5IQXnpzJozcVgcOeeKyMgU24qZWzk3q+1JlWfgtnhydhRkZDxWLydWnprVdmb5KfhtxTk7PBIyDouLEyu0n/C7Nan0ZErslXkwJKyyjbmV2W+yY4rmUu7ol4dTJSFLFmZXXJHVcohvFpXOoXk5bhISMyq+kNWur2cqNa7xeTJgWsVXstpVOsdT656epwMqmFT+jaw7oUocI+nrOZV8HdBxZTci6TjqAF7bAPp6P5c3Y3jJdzRzULrlsFZT5782r+MDDCj+NhadnB0Aq6WEmqKv582oKfo6VktJ3q8v6JNXwREpKC9VOCr4ztDvYJWshidYGRmXxcX3hn8Pr06CZ7eK7SV8Y+gt2GV7Tgy77OAbQ75PkS37zclr9XPD4NtxWFw5MaySlS8P+j6ljuzLLk6Lm+sG3oHL4jPMyDRAs3DVgNupcGZfQ7fJDi7v/394raWGJ1gpQ+GivrdR7cre5t4iWbmg78/x26pzmMQzHYtPr72VPu7szeIkSea02l9QbO+fEwPghOpbqPNo51X8jyExp+ZnlDqG5eyMTK34Dv282ktxh2pK5U8od04gV0dhTOnX6ec73ZDt6PIfUO6akTNjcNGX6Oc3FkUYVPI9yt2n5czo47uaOn925xOgj//blLkvyun4AGWez9HH/+2cX1fQ0aWCI1JQ3hrqG8rtI2/HZ8skomlFR7on3wpHBT8e9WNqXDWGGf3cA/ne8J8cdCq0Jo5uRrG9lO8N/wl93f0NM6qddXxn2C8otVd86FiHq5vttfr59tD/Y4BHfwngUJU6qvna0Lspd9TqXkf3e+iyeLl+8M8Z7B1rmOG3lXPdoN9R5Ryoy+ieUOyymysG/JyhPu08msPltpZw+YA/UuMa2XUkraf2DMMmOTmv788Y7p9jmOGw+Div35/o455ogCFhkeyc0udnjCg60zDDJrs5re4+6jwzsjAyozI2ZlX9mJHFFxtmWGQHJ9TcR1/vqYYYElYmVfyAkSXXGWbIko3JVX+iznu+AYYMyIwsvYXhpTcaZkiShVEVv6ePrzufJBtDYkDxjQwp/aHhGjqSJDGw7B6q/V8l87nqMSyARLX/qwwsvdt0nZ6CPnlJIlszjk9QubQRLuiTU0pNsbxjOW82vcmOyI4jxkf5RzGvch7ji8cj55lQpog0awIreLv59R7Ltg/1jmBuxamMKz4Oi8YugOwMhY3BVSxqeZVt4SM7kw5wD2V2xemMK5qKVc4vWVcVCttCq1nS9gpbQyuPGK91DWZ6+ZmMKZqBTXbkxRBCZVdkDcvbX2RbaCmH545UOgYwpexcRhWdgF125skQ1EfXsbrjBbaH3j0ir6PE3peJpecz0n8ydp1t2tkYTfENbOh4lh2ht4/Ih/Db+jCm5CKG+U/DYWJbbmt8I5sDz7Ar9DoqH65r4bFWM7L4cwz2n4XTkv/20EBiCzuCT7E79CKq+HDHZpelgiFFlzLAdx5Oa/67VELJ7ewNPsH+8LMo4sOVTO1yKf39l9PXdxFOa/7J09HULhpCj9EYehLlsEJoVrmYPr4rqPFditNq/GHjcMXTe2kJPUpL+DEU8eFigRbJT4X3cip8V+K09subUdBHr1zm74IjUlCv6kDsAK3JVuJKHJfFRbWzmnKHdhJkXImwL7op04ZeytSy6OcZrTtBNscbaU02E1diOC0uKhxVVDiqNO2Taoz66HpiSuam5rT4qXWNwqFTY6Qt0UxL4gBxNYZDdlJmr6TS2UfTPqXGORBbS1zpRAiBw+Kj2jUGp84EGUi20JKoJ6FEsclOSuwVVDq1kznTaoKm2FriSgcqCg7ZT6VrNE5LseZrgqlWWhP7SCgRbLIDv62CCkd/zadIRaRoja0mrrSjijR2i48yx2ic1jJNRiTdTmtiNwkljFVy4LWVU+EYpMlQRZr2+CoSSiuqSGKT/RQ7RutOkLF0gLbEDpJqCItkx20to9wxTJMhhEJnYhUJpRlVJLDKPnz2UTit2p9hQgnSnthKUgkhSzZc1lLKHCM0d2MIoRJOrCKpHEAVcSyyH7dtJE6b9meYUkJ0JDaRVDMMh6WEUscozXwQIQTR5GpS6f2oIoZF9uG0jcBhG6jJSKsROhMbSKmdSFixW4opcoxB1tjpJoQgkVpLKr0XIaLIsg+7dSh221BNhqLGCCXXkVYCIMnY5GJ8jnHIGjknQgjSqQ2kld0INYIke7FaB2OzaW+vV0WcSGLNwaqsVrkYj2M8spSf81zQx6uCI1LQUa/G2C5WtL/CmsBbpA97QrRJDiaWnsrk0jMod9TlzWhN7GFNx4usC7xG+rAeJlbJzuiiU5hQcg4VTu2bejYFkvvYEHiBjZ0vk1KjHxqTJRvDfKcwtuS8rDs59BRKNbA58DxbOp8neVifFBkrA30nM7L4Aiqco/MOU0dTjewIPsv2zqdJHla+XMJCnfckhhZdTLlzQt6MeLqFvaFn2B18nMQRvVhkqt0nMrDoMsqcU/NmJJU2GkJPsz/0CEml6bBRiTLXHOp8V1LqmpX3ds+0EqAl8iSNwX+RVOqPGC9yzqbKdw3FrhOzJpxqSVGDtEeepjX0IMn07iPGPY7plPu+QJHrFKQ8I4CqGiYYfYZA+O+k0kd2+nXaJ1PsvQ6v60ykLEmt2owosdhzRMIPkk5vPGLcZhuPx/tFXK6zkQoOxjGlo8YReemll/i///s/1q5di9PpZM6cOTz33HOGX19wRI49KSLNKw1/ZmXH68hYNIt9ScgIVI4vO59TqnOrEaAKhQXNf2VF+7MHj9Mzw4JAYXzxWZxc/XXkHCYNIQRLWx9kRfsjhhjD/KdwYvX3sORQf0UIwdqOR1jR+hckpKyM/t45zKn+MdYcl3S2Bh5nVevvkCAro9o1nRk1d2LLshPicO0NPsPa1p91FcLTZ5Q6JzGl6g/YdSq99qTG8Itsar21awlHawuwBVDw28cxruoB7JbclkLao6+zvfXbCJFAe7t0huGyDWdE5b+wW7WjdT0pFFvA7tavoIpux7YnTobhsA5iUOUj2K3626IPVyyxhIbWa1FFELo++SMlAypWSx215f/GbhuSEyOZXE1721WoavvBY2kxZLmKsvJHsdmyJzQX9OnQUeGIPP3001x//fX88pe/5KSTTiKdTrN+/XouueQSw8coOCLHllSh8J+9v2RraDm51LwYWzSX8+tuNOSMCKHyUsPdbA5qd3k9UhJDvNM5t+6HhpwRIQQLmn7Dxs6XcmLUuY/jrLpfGc5hWdryJ9Z3aLdpP5IgU+Eczel1vzfsjKxv/ysb2v+SE6PIPoST6v6qW+b8UO0I/IuN7b/JgWHBY+vHzD4PG3ZG6kOPs6XtDrQn1cNlwWmtYXLNE9gt+vVTutUafpYdbTd3/Zcxhs1SzujqZ3EYzJnojL7C7tYbuv7LSD0VCxa5iKFVz+OwDTDEiMTfoaH1mq7jG2PIkpu6yhdw2LSrIR+qROID2lovA9JgqNaJBUlyUFb+DHa7doXfgj49ymX+/kh2zaTTab797W9zzz33cMMNNzBs2DBGjRqVkxNS0LGn1w88yNbQMnItvLWu8x0WNBubkN9reThHJwRAsD38Pgua/mbIenX74zk6IRnG/uhKFjb93pD1psCzOTkhGYJKS3wDixp/Ych+d/DlnJyQbkZncjuLG2/DyDPMgcibOTkhGYZCJLWXZU3f0mxRf6jaYu+ype0nB19tTAqJ9AHWNH1ZsyHboQrFl7Gj7btdxzfOSCmtbG6+BlWNZ7WOJtexp/XrXcc3WtRNQVE72dl8BYqq3c26W4nUVg60XYdxJyTDUEWU+pbLUAx0/U2n99LedjWQwpgTkmEIkaCt7XIUpdHgawo6VvSROCIrV66kvr4eWZaZOHEiNTU1nHHGGaxff+ROhEOVSCQIBoMf+ivo2FAw1cay9hfzfv17rU8TU8K6NrF0kKVt/8mTIFjZ8RzhlH5b8pQaY1nbv/JmbOp8ic7kkXkFh0pRk6xozc1B+B9BZVf4LdoT23XtVKGwpu3evBmN0cW0xdfq2wnBprbfkU/BLYFCe3wlLbH3s9ru7PhtzsfvZoSSG2iNvpnVdl8gPwYoxFPbaYtm/+43df6ua2ks1yC1QlLZT3vkqayWHcF7u7rU5lq9VkFRW+iMPJLVMhy+HyFieTGEGiQSfjDH1xX0addH4ojs3LkTgJ/85Cf88Ic/5MUXX6SkpIS5c+fS3q7tUd95550UFRUd/OvbN7d1z4KOXq1sfw0z/SQUkWZNh/6Esb7zNdMN5tYFXtUd3xqcf0Tiay6SkNkQ+K+uze7wOyQNPN1qMyxsDjyna3Mg+h5xpdUUY1un/sTXFl9OJL2X/EvPW9gd1I8KBRPrCCU35M0Amf1B/ck1ltpBKLGE3CfWbkk0Bv+ha5FMNxCMvYHxCMKRag09qBulUpQ2QrHnTTBUAuEHEUL79aoaJhp9wgRDIRJ5uCsHp6DPinJyRG699VYkSdL927x5M6qa+cH+4Ac/4KKLLmLSpEn84x//QJIknnzySc3j33bbbXR2dh7827dvn7mrK+iokCLSLG9/2WSHWMHSthc1b7RCqKxsfwEzHWIFKqs6/ouqc6Nd2/EMZhwqgcrGzhdJq0lNm42Bp003ftsWfJmkEtG02Rb4j+mmbPvC84krHZo2u4OPm2Y0RRcSTR/QtKkPPma68VsgsYxI8sj6N91qDj2KfoGtbBJEUxsJJ9ZoWrSFH8Vs08JkejfhxGJNi87o4+TvTGWkqM1E4m9ojseiT4NJJ0KITmKxXJc+C/o0K6d9XzfffDPXXnutrs2gQYM4cCBz4xg16n8Z0A6Hg0GDBrF3717N1zocDhyO/Io4FXT0KpBsIqqYX2YLpDLH8ViPLCwVSXcQSjebZkSVDoKpZortRyYXptQYHck9phlJNUIguZdy55G7EIRQaYlvxIxDBaCIJO3J7VS7eu5w3BpfY6phGmQchY74Rmo8PTdna4stN80AQSC+Fre352TPjsQHvcCAzsRKPPbBPY4F40swE6nISCaUWIHX0fPnEUksxayTABaiiWX4nD1/HvHEsl5gWIknluJ1ndbjaDK5jMzzrZn3y0oyuRS3+0ITxyjo06ScHJGKigoqKiqy2k2aNAmHw8GWLVuYNWsWAKlUit27d9O/v/HS2wUdG4rrPJnnfqxwj45IQu1dRk9KZMlRyUUJtedjZWqR9M5GtqTS8/KOKpQjKm/mzdBZQkqL3vlMkqq2E5s2sYT1P8mkdRmdmmNGJSGj6DK0I0vGpc9Q1OyJpkZ0eLXTQ6WqnZh32lREL7znBX16lH/vZR35/X5uuOEG7rjjDvr27Uv//v255557ALj4YuO9Ggo6NpRvyfVcjpVLDZCjgSFrMnrvvdKqpCl19Rwx/3Sszchweuf90mf0xvslkPQYOdR+0SboH0fCPEMC3cJj+RYlO5KT7b0yuoVamwC9c64FfTr0kTgiAPfccw9Wq5Wrr76aWCzGtGnTeOuttygpKbRr/qyppwhGvnJr1JVw65Q5z5lh7flYDotft3hZTgyNtuUWyYFFcqD0QrKeS6NYlyRJOGQ/ia7S2Wbk1CkI5rCUEk3r7xAyIodFu7y8w1JBStXf6ZRdQreWiN1SScJE0m1GaWyy9nXYLFXEUxsx4xwKFKw6DKtcRXchtPwlsOi8V7Jc0cVIa9pkl4Ss85kXdOzpI+u+a7PZ+PWvf01TUxPBYJD58+czevTojwpX0FEsn62MOtdwUwmYEjJDvZOxW1w9jjssHgZ4Jplm1LrG4LFqOQlWBnlnm3zSlyhzDMZv67nniSRJDPLNM83w2WopdWhXwuzvO8N0xMJpKaPMqd0duNZ7NmZvMVbZR7lTuztwlfcczCV5giw5KXOdoDle5jkPs8tlElZK3Kdojpd4zsN8hEpQ5NbuQOx1n4v5ZRMFn+tczVGX+1zMOSEAaVyu800eo6BPkz4yR6Sggg7V1LJzTEUSBCpTys7StZlYcq5pxnGl5+najCk532RypGBcyYW6vVRGFl9oOgFzVPFFuozBRWYZMkOKLtZdSurvv8jE8TOOYX/f57DoVImt8V5oyqGSsFDjvQCr7NW0Kfechyz17AAbk4VS99nYdJ7yi9xnYpHMVI+24HfNw26t1bTwOOdhkXMrN384w2WfoVvq3W6fgcUygPydQxmbbWyhuupnTAVHpKCPRSP903FZfEh53KAkZIpsFQz2TtS1G+idgtdanidDwmUpYohvhq5dH9d4im1984y8SNhkN0N8J+lalTuHU+4YkTfDItkY4j9D18pvH0Cla0rek7iExCC/vtPmslZT7Z6bN0Mg6O/XzymzW0qp8pxNvttrBQq1vst1bSyylwrvxXkzQKHKf42uhSw5KPNdTf63ZIVy77W6FpJkodh7nSlGse+6LAwJr/dLeR4fQMXj+aKJ1xf0aVTBESnoY5FFtnFh3XfzeKWEhMQFdTdn7TUjSxbOrr0tz4lP4uza27Im1kqSxLw+P+iKBOTu8MyruR2bnL3L6Ozq27FIjjwYgllVt+Gw+LJaTqm8HZvsycvhmVTxfVzW7D1axpTfit1SnNdnMqr0Zjy27N2Xh5TegsNSST6OwsDib+K1Z++fUld0E05rv7wY1f4v4XPoO9EAlf5v4LQNy4MhUeq5Eq9zdlbLYt+XcNjG58Xwus7H49R3cAHcnquw22fkwZBxOE/B5TYXSSvo06eCI1LQx6bBvolc2Pe7yMiGohYSMhbJyiX9bqefx1hXzjr3mK7mdVZDE6yUoXB27W3092SfLAAqncM5s/aXWCW7wUk840ydVH0LA70913g4XCWOQZxa92tsksswA2B65U0M9mvnIhwqr62OOX3+hE325uQojCv7JoOLLjBk67JWc3zNX7DJRTkxhhZ/hUFFVxuytVvKmFj9TxyW7kRJY+rrv5YBRV83ZGu1FDGi6iEc1rqcGBWey+hXfJshW4vsZVDlIzisg3NiFLvPpa7057pLcd2SJRe15Q/jsI0il9u/x3kKVaW/M8SQJDulZQ9is0/MgSFhd8ykpOR+pF7cnVbQp0MfWffd3lCh++6xqd2R9bzW8BeaEruRsRxRll1GRkWlj2soZ9TcQK17aM6MA7HNvNH4J5ri2w62lz9U3f9W4RjEydVfo86tnXSppdb4dhY2/Z7G+AZdRom9P7Mqv0Ffz+ScGYHEbhY3/4bG2Cpdht9Wx9SKb9DPOytnRji1n+XNv6Ip9kGPu4K6GR5rDePLv0Vf77ycGbF0I+taf0FTdCEZp6lnhtNSxYjSb9HXd07OjKTSypa2n9IS7a78eXi+UGbHiN1SzsDib1HruzRnRloJsLv9DtqiL9Fzc7rMtmirXEpt0Teo8l1raPI+VIoapL7jJ3REnqXn5nQZhkUqosL/VSr9XzXUmfpQqWqUls6fEoo8gaC76d+hU0GGIUteir1fptT/nZwdBCHiBDt/SSTyMNBdSfhQRmabryS58Xi+gM9/S69slS7o6FAu83fBESnoE5EQgvrYVpa3vczW8DISShQJcFi8jPQfz6TSM6hx9VzpMhc1xbaxquO/bA8vJqFkioU5ZA+DvdOYUHouNS5jbc311JbYyfrA8+wILSCphBEI7LKbvp6pjC0+n2rXmJwno8MVSO5hc+BZdobeJKEEAYFVdtHHPZlRxRdR7ZpomhFK7mNH8Bn2hF4hqQRRSWOTPVQ4JzK0+BKqXFNznvAOVzRVz57QU+wP/ZeE0oEgjVVyU+KcwMCiy6l0zTT9RJxIN1EfeoID4WdIKm0IUlgkN37HWOp8V1Pmnmu6XktSaaEl/AQtoSdIKi0IksiSG499FFW+ayhxn6Zb/8SI0kob7ZH/0Bb+NynlAEIkkSUXTtsIyn3XUuQ+E1kyV4laUQMEI/+hM/IIaWU/QiSQJBd26xCKvNfic5tN1AVVDRGLPkUk8hCKshch4kiSC4tlAB7vtbhcFyDLHlOMgo4+FRyRgj516v4aZptMw6l6dgT/Syi1j7QaxSq78dn6Mth/Ll6NLbG5MqLpA+wNPks4tYeUGsEqu/DY+tLPex5eu35lYKOMeLqJhtDThFM7SKthLJILl62WPt7z8dqzR4CEEFkZSaWF5vBTRJObSashLLILu6WGSu9FeOwje4WRVtppizxJNLkeRQ0iS05slipKPRfh0ShnnisjM1k+RSy5ClUNIkkOrJZK/O4LcNonZ329EYaqhohEnyaZXIaqdiJJdmRLBW7XeTjs03uFIdQoidizpJIfZCqHSlZkuRy762xs9plZnTxDDBFHib2EmlyMUAOABUkuxeI8Ddkxp5cYSYi/hki8C2oAJBnkEiTHSeCYi5TFyTPCKOjTr4IjUtAxp8boUjZ1PMKBg8sHgq56lUhICFRq3Mczsvhqqt25L4EAtMZWsL3zXzRFF3UdEzJhcamLqVDunMqQ4muocue+BAIQiK9mT+eDNH+o9fyHGcWOSfQvupZKT+5LIADhxHrqg3+hLfoK/wuFf5jhtY+nj/+LlLnPymtSiCW30BS8n47oC13LRd3LLRLdBa1ctjFU+q6j1HNhXpGUZGo7baE/E4o81bV80N3D5H8Mu3UEJb4vUuS5LK9ISjq9h2D4fqLRJ7o6vh7JsFoH4/Ncj8dzRV5LB4rSQDz8APHoYyCi/K+o2P8YsqUfTs91OD1XI+UR5RBKM+nI30hHHwcR6pGBXIPV83msnmuQ8ohyCLUdEfkHRB8H0cmHi6N1/W+5Asl9FbivRtLZEl3Qsa+CI1LQMSMhBBs6/sna9j/3mCNxqLrHx5d9jVHF1+Q0we7ofIT1bb85OFFrMzI5FMOKr2dEyddyYuwP/odNbT/tcnL0anhk1uf7+a9hWOn3c5rEWyLPs621e3dSdkal51IGl/0s61PsoQpEX2NX69e6ckn0GJkcgBL3efQv+3VOywiR2DvUt12HEClDDI/zNPqU3Ycsuw0z4on3aW27BiFiBhjgcMymvPTvyDlMsKnkKkJtVyNEyBDDapuCr+xBZLnYMENNbSTR/nlQO7IwMhzJOhpH6T+QdCqkHi6R3oFo/wKoLQYYMlgHIZU8iGSpNswo6NhSLvN3YddMQUe1Nnb8i7XtfwbIWoCre3xN2/9jU+Bhw4ydnY+xvu3XZCpXZGNkEge3Bv7K5o77DDPqQ8+wqe0OQDVQSCzD2Bt8iC3tvzLMaI28zLbW75CZKIwxmiP/YXvb7Rh9HumMvcXO1q8gSBtgZI7ZEX2B3a03IoSxYnPR+GL2t16dWQIwyIjE51Pf9mWEMFbVM5FcSUvrZQgRNcgQJBLv0dJ2VVfkJLvSqY0E2y5BiKBhRjq1gmDbFQg1ZoihpneSaLvMoBOS4Yj0JhLtVyAMNgwUSj2i7QqDTgiACuldiPYrEb3SzK+gY10FR6Sgo1aN0eWsab8/r9eubruPptjKrHbt8XWsa7s7L8bWwN9ojC7MahdKbmVj64/yYuwLPkxj+KWsdvHUni4nJPe6Iy2Rp2gOP5HVMpluZGfrDXRPmrkwArGXaA79PaulonRQ33otPe9I0ZNKNP42bcE/ZLdUo7S2XkWmFHkuDIVkchmdwezOoRAJgm1Xg0jmzFBS64kE7zDAUEi2X9e13JNLlVwFkd5BsvN2AwyB6LgBDDlTH2agNCAC38vhNQV9VlVwRAo6arU58KiJyp8WNnf8O6vdzs5HTPSnkdke+FdWq33BR/Oq9trN2N35YFarxvCjXdGafFZaJeqDf8kaFWmLPNYVpchvNbc59BeE0J/MOiNPoIoI+fVdEXSE/oaaJWIRjT2NKjryZKiEI/9CVSO6VsnYKwi1ifx6u6gkok+iZokmqIl3EMqevBlq/GWEckDfLLUM0lvyZCiQXIhI78rjtQV9llRwRAo6KhVONdAQXZx3PxSBQn30XSKpRk2beLqNhsgbJnquqLTFVxBK7tS0SKkhDoSfM8UIJTfSmVivaaGocZpCj5N/QzNBPL2bYOIDbQuRoiX0EGYas6WUJjpjb+kwVDrCD2KmwZwqOglHtSNIQghC4b9hplGeEDGisad1beKRf2Du9pomEdWPUqUjD5F/2fmuY0Qf1x0XkUdMMiyI6GMmXl/QZ0EFR6Sgo1I7gy+a6qQLmT0iO0Mvao7vD7/YtfvGDMPCntBzmuNN4ZdRRUpz3CijPvSU5nh77DUUETbFAEuXM9OzOmPvkFbbTDNaw49qjsYS75NW9ptkyATC2vlBqdQa0umtmOumKxEOa0fClPQO0qkVmOumK4hHHtIeVQ6gJhdhrpuuSjr6iDZDDUDidZMMBWJPGs7dKeizqYIjUtBRqVBqv0kXAUAinKrXHI2k9pl2dgQq0ZT25BlN7zXVHTbDUIimdmuOx1N7kDBXoAsUYmntyE4ivQfztwuFhA4jmd5t8vgAKkkdRjq9pxcYgrSifRylV64DVGW/5nKZmt6LOWeq+0DtCBHveUxpwJwz1SURAbXT/HEKOmZVcEQKOiqVFlHM3gQFCik1qss4vJx5PpSUqh2NULLkEhhVWmeHgyKimFlqOHgcnevI5G2Yv13ovR+qiPYKI3McrbHe+TwyW361xrT5uUkFNJyEXmMAWp97L71XvX6sgo45FRyRgo5K2aT8usIeKgkLNp26EtZeYICETaeuhKWXSlfbZO19+BbJTW88HVt0rkOWPPTG07E+w90rjMy55j6WiyRJ+3ulN5abLIBGp+beLIkua3Rq7qX3KsMoFDcrSFsFR6Sgo1I+e79eOIrAZ9M+jtfe30QSaUYSMh6bdtl3j21gV80NMwwLHtsgzXGXbZBpBlhw27RLyzttgzDvJFhw6jDstiEmjw8gY9dhWK3m+xeBjNWq/XlYdMaMS0K29NMsmCdbBtAbUTDkSu1KrpZaML3kB0g+kIrMH6egY1YFR6Sgo1KD/WebfsYXwCD/2Zrjdd6zeiV/o7//As3xKs8ZyJLGU20OjFrfxZrjpe5TsOhETIxJocp7ueao3zkHm6XKNKPce5XmqMs+FZulP+YmWJUS7zWao3b7GGy2MZi79al4PddqjlqsA7Haj8fsjhan5/OaY5KlEtlxokmGjNWt/XlIchE4zzDJsIA7v/L7BX12VHBECjoq5bZWUeuZZaqOSF/PHNzWCk0bh6WEWu9pphgVzuPx6kRdrLKHPt4LTDg8Mn7HOHyOEdoWkoNq7+XkP2FIuGxD8DkmaVtIFsq912DmlmGz9MHvPEGHIVHi+2LexwewyKV4Xafr2ng9X8RMdEeSPLhd5+naOD3XYm63iQ2HW9v5BLC6rzHJAKv7Mt1xyX2FSYaK5LrUxOsL+iyo4IgUdNRqZPEVpuqIjCi+Iqvd4KKr8t7CK1AYUqz99N2tfv6ryDzl5/OkrzKgKPvkXOW7qmvnTD4MQa3/K1n75pR7L+uK7uR326jy35C1b47fczGy5M+bUeL7StbGdG73echyBfk5bhJez3VZe9rYnachW+ryZMg43Fciy/rLGbJjNpJlSN4Mi+sCJIu2ow6A7TiwjsubgWMekrU3llkLOpZVcEQKOmpV6ZrIceU35vXaSeU3U+Eal9Wu2DGSCeX5lV8fWfINKt0zstp57IMYU/Er8kkoHVB0PVWeU7PaOa21DK/4U9d/5eKMSFR5r6bCc2FWS5ulgsEVf+86fi63DolS90VUeLWXGrplkYuoq3iYzMSXC0PG6zqDUt/Xs1tKLirK/t3lsOTGcDhOoMifvWy5JNnwlz7S1eU2l0ncgtU2CU/RDw0wZOyl/wDJnzNDso7E5v8/AwwJqeQ+kEtzZmAZgFRkvFdSQZ9dFRyRgo5qjSi+nOPKvwOQdXmje3xS+c0ML77EMKO//wLGl/8IkA0zRpV+m6HFxpcRqr1nMabiHiQsBpZpMuMDi7/KkJLvGGaUuk9meMX9XZERY4wa3xcYVPpjw12Efc6ZDKn4J5JkN8wo81xG/7J7DDNcjsn0rXiiaxdNNkbmFuZznUtN2f8z3KnYbh9DRfkzSIYm8cwxnc5TKS99MGvEpVsW2xD85c8gGZrEMwybYyb+soe1E0gPf5W1Dkf5kyBXkv12nonKSbaJOMoeRTLYqViyVCGVPt6VvGqMgXUEUumjSFo7cgoq6BBJwmjbzU9AubQRLujYVktsLZsDj7E/8k7Xv0gI1K7tt5mvcF/viQwvupwK19i8GB2JDewIPEJD5HUEAukwhkBQ7Z7D4KKrKHdNzosRSmxmT/BfNIZfQpBGwoJAQULuugqVctds+hVdQ5lrZl6MaHIbB0L/oDn8DIJUF0Plf5EShSLnLGp811LqPikvRjy1i5bQg7RF/oMq4mQm2g8zvI5pVPiuo9h1umEn5FCl0vvoCP2NQOTfCBEhs4OjmyEBaZz2SZT4vojPdV5ejLRygHD474QjD3d1yT2SYbONw+f9Im7XRXklXapKC/HIP4hHHkKIQI8Mi3UETs91ONwXG3Z0DpVQ20lHHiYdfRjUth4ZkmUwVs/nsbgvMezofJgRhOi/EdFHQG0+hAEZByUNln5I7qvBfSmSySTtgj7dymX+LjgiBX2qFEu3sjP0EuHUflJqBJvswWfry0DfWbisZb3CSCjt7Av9l1BqN2k1jFX24LbW0s93Li6r2Z0jGSWVDg6EXyCS3E5ahLFIbpzWPvTxno/LVtsrjLQapCX8HNHUZtJqCFly4rD2ocJzPi7bwF5hKGqY9shzxFLrSatBZMmJzVJNqecCXDrbaHORqkYJRp8jnlyDqgaQJCdWSyU+9wU47aN6hSFEnGjsvyQSy1BFEAkbFkslbte52O3je4mRJBl/hVRiCULtBMmGLJdhd52F1XZcXo7UkYwUavwNlOTiTDVTyQJyKRbnaci2Kb3EUCCxAJFc1FUxVQa5GMlxMtiP7xVGQZ9+FRyRggoqqKCCCiroE1Mu83chR6SgggoqqKCCCvrEVHBECiqooIIKKqigT0wFR6SgggoqqKCCCvrEVHBECiqooIIKKqigT0wFR6SgggoqqKCCCvrE1AutFT86dW/oCQaDn/CZFFRQQQUVVFBBRtU9bxvZmHtUOyKhUAiAvn37fsJnUlBBBRVUUEEF5apQKERRkX7fpKO6joiqqjQ0NODz+Xq9SE4wGKRv377s27fvmKxRUri+T7+O9Ws81q8Pjv1rLFzfp18f1TUKIQiFQvTp0wdZ1s8COaojIrIsU1dX95Ey/H7/MfsFg8L1HQs61q/xWL8+OPavsXB9n359FNeYLRLSrUKyakEFFVRQQQUV9Imp4IgUVFBBBRVUUEGfmD6zjojD4eCOO+7A4ci9C+WnQYXr+/TrWL/GY/364Ni/xsL1ffp1NFzjUZ2sWlBBBRVUUEEFHdv6zEZECiqooIIKKqigT14FR6SgggoqqKCCCvrEVHBECiqooIIKKqigT0wFR6SgggoqqKCCCvrEVHBEgK1bt3LeeedRXl6O3+9n1qxZvP3225/0afWqXnrpJaZNm4bL5aKkpITzzz//kz6lj0SJRIIJEyYgSRKrV6/+pE+nV7R7926++MUvMnDgQFwuF4MHD+aOO+4gmUx+0qdmSvfddx8DBgzA6XQybdo0li5d+kmfUq/ozjvvZMqUKfh8PiorKzn//PPZsmXLJ31aH5l+9atfIUkSN9544yd9Kr2q+vp6rrrqKsrKynC5XIwdO5bly5d/0qfVK1IUhR/96Ecfuqf87Gc/M9QX5qNQwREBzj77bNLpNG+99RYrVqxg/PjxnH322TQ2Nn7Sp9Yrevrpp7n66qv5whe+wJo1a3jvvfe44oorPunT+kh0yy230KdPn0/6NHpVmzdvRlVVHnjgATZs2MDvfvc7/vznP3P77bd/0qeWt5544gluuukm7rjjDlauXMn48eM57bTTaG5u/qRPzbQWLFjA17/+dZYsWcL8+fNJpVKceuqpRCKRT/rUel3Lli3jgQceYNy4cZ/0qfSqOjo6mDlzJjabjVdeeYWNGzfym9/8hpKSkk/61HpFd911F/fffz9/+tOf2LRpE3fddRd3330399577ydzQuIzrpaWFgGIhQsXHvy3YDAoADF//vxP8Mx6R6lUStTW1oq//e1vn/SpfOR6+eWXxYgRI8SGDRsEIFatWvVJn9JHprvvvlsMHDjwkz6NvDV16lTx9a9//eB/K4oi+vTpI+68885P8Kw+GjU3NwtALFiw4JM+lV5VKBQSQ4cOFfPnzxdz5swR3/72tz/pU+o1ff/73xezZs36pE/jI9NZZ50lrrvuug/924UXXiiuvPLKT+R8PvMRkbKyMoYPH85DDz1EJBIhnU7zwAMPUFlZyaRJkz7p0zOtlStXUl9fjyzLTJz4/9u7v5Cm+jAO4N9zVpsMRmF5JCQtKdiFN2uSkl1YLksisMJuvFghk2LWoqJW3nSxdWVdtAujgim5WH+ozIiCBtKKwpEYjZgrVMg/hf3TYOBie7p4YbzSW+3Vs/fn9j4fOBf+tovvw8Hj1992NhNWrFiBuro6hMNh0dFU9eHDB9hsNly5cgV6vV50nIybmppCfn6+6BhzEo/H8eLFC1gsltSaLMuwWCx49uyZwGSZMTU1BQBZe75+xW63Y/v27bPOY664e/cuysvL0dDQAEVRYDKZcOnSJdGxVLNhwwYEAgFEo1EAwMuXL/HkyRPU1dUJybOgv/TuvyBJEh49eoT6+noYDAbIsgxFUfDgwYOc2IYbGhoCAJw+fRrnzp3DqlWrcPbsWVRXVyMajebExZGIsHfvXuzfvx/l5eUYGRkRHSmj3r59C4/Hg7a2NtFR5uTjx49IJBIoLCyctV5YWIhIJCIoVWYkk0kcPnwYVVVVKCsrEx1HNX6/H/39/QiFQqKjZMTQ0BDa29tx5MgRnDp1CqFQCIcOHYJWq4XVahUdb96cTiemp6dhNBqh0WiQSCTgdrvR2NgoJE/O7og4nU5IkvTbIxKJgIhgt9uhKAqCwSD6+vpQX1+PHTt2YGJiQvQYv5TufMlkEgDQ2tqK3bt3w2w2w+v1QpIk3LhxQ/AUv5fujB6PB9++fcPJkydFR/5X0p3v78bGxrBt2zY0NDTAZrMJSs7SZbfbEQ6H4ff7RUdRzbt37+BwOODz+ZCXlyc6TkYkk0msW7cOZ86cgclkQnNzM2w2Gy5cuCA6miquX78On8+Hq1evor+/H52dnWhra0NnZ6eQPDn7Ee+Tk5P49OnTb59TWlqKYDCI2tpafPnyZdZXIK9duxZNTU1wOp2Zjjon6c739OlTbN68GcFgEBs3bkw9VlFRAYvFArfbnemoc5bujHv27EFPTw8kSUqtJxIJaDQaNDY2Cvvl+pN059NqtQCA8fFxVFdXo7KyEh0dHZDl7Pw/Ih6PQ6/X4+bNm7Pu3rJarfj69Su6u7vFhVNRS0sLuru78fjxY6xevVp0HNXcuXMHO3fuhEajSa0lEglIkgRZljEzMzPrsWxUUlKCLVu24PLly6m19vZ2uFwujI2NCUymjpUrV8LpdMJut6fWXC4Xurq6hOxK5uxLMwUFBSgoKPjj82KxGAD8dFGXZTm1m7AQpTuf2WyGTqfD4OBgqoh8//4dIyMjKCkpyXTMeUl3xvPnz8PlcqV+Hh8fx9atW3Ht2jVUVFRkMuK8pDsf8NdOyKZNm1I7WtlaQgBAq9XCbDYjEAikikgymUQgEEBLS4vYcCogIhw8eBC3b99Gb29vTpUQAKipqcGrV69mre3btw9GoxEnTpzI+hICAFVVVT/dch2NRhf8NTNdsVjsp2uIRqMR9zdPyFtkF5DJyUlatmwZ7dq1iwYGBmhwcJCOHTtGixcvpoGBAdHxVOFwOKioqIgePnxIkUiEmpqaSFEU+vz5s+hoGTE8PJxTd82Mjo7SmjVrqKamhkZHR2liYiJ1ZCu/3086nY46Ojro9evX1NzcTEuXLqX379+LjjZvBw4coCVLllBvb++scxWLxURHy5hcu2umr6+PFi1aRG63m968eUM+n4/0ej11dXWJjqYKq9VKRUVFdO/ePRoeHqZbt27R8uXL6fjx40Ly/O+LCBFRKBSi2tpays/PJ4PBQJWVlXT//n3RsVQTj8fp6NGjpCgKGQwGslgsFA6HRcfKmFwrIl6vlwD845HNPB4PFRcXk1arpfXr19Pz589FR1LFr86V1+sVHS1jcq2IEBH19PRQWVkZ6XQ6MhqNdPHiRdGRVDM9PU0Oh4OKi4spLy+PSktLqbW1lWZmZoTkydn3iDDGGGNs4cveF5oZY4wxlvW4iDDGGGNMGC4ijDHGGBOGiwhjjDHGhOEiwhhjjDFhuIgwxhhjTBguIowxxhgThosIY4wxxoThIsIYY4wxYbiIMMYYY0wYLiKMMcYYE4aLCGOMMcaE+QGSVvAy2mt4ewAAAABJRU5ErkJggg==", "text/plain": [ "<Figure size 640x480 with 1 Axes>" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "plotCompPinPow(fuelPin)" ] }, { "cell_type": "markdown", "id": "ab592f78", "metadata": {}, "source": [ "## Multi-pin example\n", "\n", "For the last example, we'll define a semi-covoluted but demonstrative block that has two fuel pin types existing on the same lattice grid. The use of yaml anchors `&`, aliases `*`, and merge keys `<<`. This helps use similar fuel and clad definitions (e.g., material, dimension) but overwrite things like `latticeIDs` and `flags` that we want to be specific to each fuel pin type." ] }, { "cell_type": "code", "execution_count": 24, "id": "686d9504", "metadata": {}, "outputs": [], "source": [ "BP_STR = \"\"\"\n", "blocks:\n", " fuel: &fuel_block\n", " grid name: fuel grid\n", " fuel 1: &fuel_def\n", " shape: Circle\n", " # Use void material because we don't need nuclides, just components with flags\n", " material: Void\n", " od: 0.68\n", " Tinput: 25\n", " Thot: 600\n", " latticeIDs: [1]\n", " flags: primary fuel\n", " clad 1: &clad_def\n", " shape: Circle\n", " material: Void\n", " id: 0.7\n", " od: 0.71\n", " Tinput: 600\n", " Thot: 450\n", " latticeIDs: [1]\n", " fuel 2:\n", " <<: *fuel_def\n", " latticeIDs: [2]\n", " flags: secondary fuel\n", " clad 2:\n", " <<: *clad_def\n", " latticeIDs: [2]\n", " duct:\n", " shape: Hexagon\n", " material: Void\n", " Tinput: 25\n", " Thot: 450\n", " ip: 15.3\n", " op: 16\n", "grids:\n", " fuel grid:\n", " geom: hex_corners_up\n", " symmetry: full\n", " # Kind of a convoluted map but helps test a lot of edge conditions\n", " lattice map: |\n", " - - - 1 1 1 1\n", " - - 1 1 1 1 1\n", " - 1 1 2 2 1 1\n", " 1 1 2 1 2 1 1\n", " 1 1 2 2 1 1\n", " 1 1 1 1 1\n", " 1 2 1 1\n", "# Stuff that isn't germane to this example, but necessary to make the blueprints build correctly\n", "assemblies:\n", " fuel:\n", " specifier: F\n", " blocks: [*fuel_block]\n", " height: [10]\n", " axial mesh points: [1]\n", " xs types: [A]\n", "nuclide flags:\n", "\"\"\"" ] }, { "cell_type": "code", "execution_count": 25, "id": "1749933b", "metadata": {}, "outputs": [], "source": [ "from armi.reactor.blueprints import Blueprints\n", "from armi.settings import Settings" ] }, { "cell_type": "code", "execution_count": 26, "id": "f2aa9d37", "metadata": {}, "outputs": [], "source": [ "def buildMultiPinBlock() -> HexBlock:\n", " cs = Settings()\n", " bp = Blueprints.load(BP_STR)\n", " bp._prepConstruction(cs)\n", " block = bp.blockDesigns[\"fuel\"].construct(cs, bp, 0, 2, 10, \"A\", {})\n", " block.assignPinIndices()\n", " setPinPow(block)\n", " return block" ] }, { "cell_type": "code", "execution_count": 27, "id": "e6c09f49", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[info] Will expand HE, NA, AL, SI, V, CR, MN, FE, CO, NI, ZR, NB, MO, W elementals to have natural isotopics\n", "[info] Constructing assembly `fuel`\n", "[info] Block design <fuel block-bol-000 at ExCore XS: A ENV GP: A> is too complicated to verify dimensions. Make sure they are correct!\n", "=========== Verifying Assembly Configurations ===========\n", "[info] Block design <fuel block-bol-000 at ExCore XS: A ENV GP: A> is too complicated to verify dimensions. Make sure they are correct!\n" ] } ], "source": [ "multiPinBlock = buildMultiPinBlock()" ] }, { "cell_type": "markdown", "id": "701e0de0", "metadata": {}, "source": [ "Plotting our block-level pin power shows a similar profile to before." ] }, { "cell_type": "code", "execution_count": 28, "id": "46ab1a77", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "<matplotlib.collections.PathCollection at 0x1bafa69ca50>" ] }, "execution_count": 28, "metadata": {}, "output_type": "execute_result" }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiIAAAGdCAYAAAAvwBgXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAV7tJREFUeJzt3Xl8VOWhPvDnPWeSSSbLZCEkhAQIO8gWQFZRQEQRUVQQlyoutdWrtta2t9rear1dbK3Xen/WW+0irqiIAm5sKrIIyBrZQdZAAiQhySSZycxkznl/f4SkLpBkZs7knMw8Xz+5F8jkzJPTSd7nvOc9Z4SUUoKIiIjIBIrZAYiIiCh2sYgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmsZkdoCW6rqO0tBQpKSkQQpgdh4iIiNpASona2lrk5uZCUVqe87B0ESktLUV+fr7ZMYiIiCgEx48fR15eXouPsXQRSUlJAdD4jaSmppqchoiIiNqipqYG+fn5zeN4SyxdRJpOx6SmprKIEBERdTBtWVbBxapERERkGhYRIiIiMg2LCBEREZmGRYSIiIhMY+nFqkTfdtxdiXeLt6O4rhLugA9JNju6JWfg+m7DkZeUbnY8S3E1uLCuYh2Oe47Do3lgV+zIiM/A+E7j0c3Rzex4ltKg16G49iNUer+EX6+BKuJhVzOQl3w5OiWM4H2MvkZKP+BdDulbB+jVgFAAJR3CPhmwT4QQHFYoOEJKKc0OcT41NTVwOp1wuVy8aibGbSg7hHkH12N9+SEoQkBKQIeEAgEhAF1KjO/cG3f0HocxWT3Njmuqo+6jWHZqGTZXboZE44+3hIQ4+58OHT2TemJq9lSMyhgV04Osu+EEDlS/guLa96BJPwQEJHQAAgIKJDQkx/VAb+dN6JF6LRQRZ3Zk00i9EtI9D/C8CUgXABWAdvazZ/+sZEE4vgc4boVQks0LS6YLZvxmESFLk1Li7wfW4v/t+xSqENBaeLk2ff7BAZfi+30uiskBdsOZDfjn4X8CAHTo531c44ArcXGni3Fbj9ugCrW9IlpGRf1WrD/5I2jSB9k8oJ5L4+uoc+JojM55CnFKUvsEtBAZOARZeQeglwMt7isAUABbT4j0FyHUnPaIRxYUzPjNNSJkaf/4qrGEAGixhHz988/s/QT/Ovh5xLNZzebKzfj74b9DP/tfS5pmStZWrMVLR1+ChY9HIqLSuxPrSu9FQHpbKSEAIAFIlNdvOltc/O0R0TKkVgJ55uY2lhAA0IHAEcjKWyD1qkjHoyjAIkKWtbH8MP5376chfe1f9nyMzRVHjQ1kYWXeMrxw+IWgv05CYl3FOqypWBOBVNYU0Oux/uQD0KEBrRS2r5PQcca7HXvO/DVy4SxGSglZdQ8ga9C2EtJEA7RSyOqfRyoaRREWEbKslw9ugBri6RVVKHjp4HqDE1nXp2WfhjWrsfTk0piZFTle9xH8ugvBlJB/kzhc8zYCusfoWNbUsBkI7EdwJaSJBvjXQAaOGJ2KogyLCFlSiacKa8u+avV0zPloUsfq0wdQ6qk2NpgF+XU/VpevbvV0TEtO+05jf+1+A1NZk5QSB6vno2ndRyg06UVx7UfGhbIw6X4NjQtRQ6VCet4wKg5FKRYRsqRFxUVQwlxsKoTA4uIiYwJZ2NaqrfDq3rC2oUDBZ+WfGRPIwqp9e1DbcBhAOLM/Akdq3jYqkmVJvRrwrUBosyFNNKD+bUgZMCgVRSMWEbKkYnclwj1TIAAc90T/YrkybxnUsI5aG6+wOeU9ZVAi63IHThiwFQl3gxHbsTitFKGdvvoW6QZ0V/jboajFIkKW5An4oYd11Np4bxFPwGdQIuvy6b5wzjQ0q9fqw9+IxRm1tiMgw5uB6hCk25rboqjDIkKWlGSLhxLm6KoIBQ6b3aBE1mVXjPkeE9VEQ7ZjZTbFYcx2RPTvKwgD75fCm5tRC1hEyJJ6JHcK+yhfSokeSZnGBLKwnIQcaDKc8/iNa0S6JnY1KJF1Jcf1MGArAslxMXCLfLUrDHkXEJECCGf426GoxSJClnRtt2HhrSf8+nai3PD04XCo4R3p69AxMWuiMYEsLM3eD874fgjvV59ET+cNRkWyLKE4gYRpCPeqGThuhIjBO/dS27GIkCXlJDpxSU7fsO4jcmmX/uicGP1vDRCnxGFi1kQoIf44Cwh0SeiC3sm9DU5mTb2cNyGcRZg24UBe8uXGBbIw4bgZ4V01o0MkzjEqDkUpFhGyrDt6j4Me4qUzutQxt/dYgxNZ16TOk0J+vxgJiSu7XBkz782Tn3w57GomREi//gR6Om+ETYmBNSIAEDccsA1BaLMiCmCfAmGLgdNYFBYWEbKsEZnd8Z+DQjvyfGTwNBRmxM4vwE72TviP3v/R/A67wZicNRnjM8dHKJn1qEoCxnd5DoqIR3C/AhV0ThyNgRn3RCqa5QghINKfA5QMBFdGVEDtAeH8Y6SiURRhESFLu63XWDw86AoINJ5uaYkqGq+z+eXgabil5+h2yWclw9KG4f7e90MVaqunaZo+PzV7Km7pfkvMzIY0SbP3w4TcfyBOSYZodYBt3FddHBdjTM7TUERc5ANaiFCzITLePLt4tbUhQzR+2PpDZLwOoaS0Q0Lq6IS08BtMBPM2whTdtlcW45WDG/DxqX2AbDxS06WEIkTje6QI4LIuA3Bbr7EYlpFvdlxTldSXYOXplVhfsR4NsgGqUKFLvXmmRIeOgakDMTV7KoamDTU5rbnqA2U45HoDR2oWokGvg4ANEv/eVxIa0uIHolfajeiWfGVML7qUeg3gmQ/peQ3Qy9B4RU3TWhsFQABQu0E4bgUccyBEgnlhyXTBjN8sItShlHtrsbi4CMfdlagL+JBss6NbciauyR+KrAQefX2dJ+DB+jPrcaL+BDwBD+yqHRnxGRibORY5CTlmx7MUTfehxL0SZ7xFaNBroSAOdlsn5CVPRbp9oNnxLEVKDfCthvSvPXvHVAVQ0iDslwLxY2Judo3OjUWEiIiITBPM+M01IkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi07CIEBERkWlYRIiIiMg0LCJERERkGhYRIiIiMg2LCBEREZmGRYSIiIhMwyJCREREpmERISIiItOwiBAREZFpWESIiIjINCwiREREZBoWESIiIjINiwgRERGZhkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQRLSJPPPEELrzwQqSkpKBz586YOXMm9u/fH8mnJCIiog4kokVk9erVuO+++7Bx40asXLkSDQ0NmDp1KtxudySfloiIiDoIIaWU7fVk5eXl6Ny5M1avXo2LL7641cfX1NTA6XTC5XIhNTW1HRISERFRuIIZv23tlAkA4HK5AAAZGRnn/LzP54PP52v+e01NTbvkIiIiInO022JVXdfx4IMPYvz48Rg0aNA5H/PEE0/A6XQ2f+Tn57dXPCIiIjJBu52auffee7F06VKsW7cOeXl553zMuWZE8vPzeWqGiIioA7HcqZn7778fH3zwAdasWXPeEgIAdrsddru9PSIRERGRBUS0iEgp8cADD2DRokX47LPPUFBQEMmnIyIiog4mokXkvvvuw/z587FkyRKkpKTg1KlTAACn04nExMRIPjURERF1ABFdIyKEOOe/z5s3D7fffnurX8/Ld4mIiDoey6wRacdblBAREVEHxPeaISIiItO06w3NqOPwBQL4aN8BrD9WDFe9D4oikJ6YiMv69MIlPXtAVdhhm2i6js9KD2FlyQFUej2QkHDGJ+KinAJM69YfdpU/Zk2klNhdcxiry7aiyl+DBhlAss2B/ik9MDn7QiTZuHbs6yq8h7HHtQK1gTIEdC/ilSRk2gtwQdoVSLKd+8aQsSoQOA635w00BA5D6rUQSjJsanckJd2EOBsvlLCydr3Fe7C4RqT9VbjdeHHzNrxRtBO1Ph9UIaCdfYmoigJN15GTkoxbhw/DrcOHwREfZ3Ji87gb/Hj5wBa8emALTtfXQRUKNKkDQPN+S41LwM19CnFn/1HolJBkcmLzaFLD8pMbsKRkNU7Un4YqFOhShwSgQIEOHfFKHC7NHoXr8iYjNzHL7MimkVLiYO0abKt8B6e8eyCgQkIHICGgAJAABHqnTMCIjBuQndjP5MTm8vrWo7b2/+D1fQo07x/97J8FAA12+8VISb4HiQmTzIwaU4IZv1lEqNmB8grc/ta7OOPxNJeP81GEQL+sTnhx9rXISo69Abasvg5zV72Br6or0Dicnp8qBDolJOGVyTehjzP2Bth6zYcn9ryIrVV7IYAW95YqFMSJODx6wd0Ymt63vSJahi41fHb6r9hZ/T4ElLMF5NwEVAASl3X5GQY4p7ZfSIuQUqK27nm4av4bgApAa+HRjZ9PTfkZUlMeOu+FFGScYMZvzq8TAOBYVTVumr+gTSUEAHQpcaC8Are88TZqvN52SGgdLn89bvz4VRx0tV5CAECTEhVeN+asfBXH66ojH9BCArqG/971d2yv2geg5RICAJrU4dP9eHTX37DHdTjyAS1ESolVp/4fdlZ/0Pj3FkpI4+c1SOhYcfJJ7K/5tD0iWkpd3QtnSwjQcgn59+drap9CTe3TEc1FwWMRIehS4u6Fi1Hn87ephDTRpMSxqmo8vHRlBNNZz883fIDjddVB76vaBh/uWr0gpq4me+3Yh9jpOtimwtZEQkKXOh7f/QI8gfoIprOWva4V2OX6EK3Xte9aUfonVPlPGB/Kony+TaiueTykr62pfQr13lUGJ6JwsIgQ1h05hsOVVUENrE00KbHywEGcOPvOytHuWG0VPin5KuR9ddBVgfWnjxofzIK8mg/vl6yFDGFg1SFRF6jHp2VbIpDMeqSU2FL5FhrXNITw9ZDYUbXE2FAWVlv3dzSebgmFitq6F4yMQ2FiESG8uq0IahjnTBUh8GbRTgMTWdcbB7dDCWNfqULg1QNbDUxkXavLtsGr+1p/4HkIAO+VrI6JGaTS+l2o8hcjlNkQoPE0zq7qpWjQo38GSdNOod67FK2fjjnvFuDzrUYgcNTAVBQOFpEYV17nxmeHjoR0hN9EkxJvFO2I+gFDlxJvHNwe9r76+MRXqPC6DUxmTUtProMI8QgfaBySS+rLsK/2qGGZrGp39UdnF5+GLiC9+KpmjUGJrMvtWWDAVlTUuecbsB0yAotIjDvhqgnxGOybXF4f6vx+A7ZkXTV+L2obQj/Cb6JDosQd/aeySusrQjot820n6ysMSGNtVf7jkCEf4TdSoMLVcNKgRNbVOJMR7tAlEdCOGZCGjMAiEuM8DcaVB7e/wbBtWZE7YOC+MnC/W5VPN+Z7rNei/6osvyGnVAT8useA7VibLt1AK1cUtWErkHqtEXHIACwiMc4RF2/YtpLjjduWFSUbua/i7IZty6oSVGP2l0NNMGQ7VhavOCy1HStTRBLCH7oUCIX3prIKFpEY1y3NGdbiyyYZjkQkRfldVlPiEpAWH/6gqAiBvCSnAYmsLS8xO6w1Ik26OjobkMbaMuzdw14joiOA9Pg8gxJZl83WC+HPiAjE2XoaEYcMwCIS4zKTHJjSpxdUJbyrZm4eNiTq71aoCIGb+wwP86oZBdPy+yMjIfqPXK/MvSisNSICAt0dXdAnuZuBqaxpcNr0sNeIxCsO9E6ZYFAi60py3IBQL3P+Nx1JjpuMiEMGYBEh3Dp8KDQ9vEWFc4YONiiNtd3UuzCsq4M0qeN7fYYbmMi6JmQVhnVaRUJiRteLo77gAkB2Qn9k2gsQ6gAroGBQ2nTYlOg/5aeqWUhMvArh3EckwT4ZNlu+kbEoDCwihDHd8tE/q1NI9xJRhMCV/fuiS2pKBJJZT9ckJ6Z16x/SrIgqBAamZ2NU5+g/wgeAeCUO13SdGNLXKhBIjUvCxM4jjQ1lUUIIjMy4EaHdR0RACAVD0mYYHcuyUpJ/iNBPz2hISbnXyDgUJhYRghACf591DZyJCUGVEVUI9OmUid9dPiWC6aznj6Ono3dqcMVNFQLpdgf+ccnsmDjCb3JT98sxMn1gUGtFFAjYFBv+e9C9SFSj/wi/SX/npRiWfl2QX9X4NoLTcv8LzvjcSMSyJHt8IdLTngjpa52pv0aCfbzBiSgcLCIEAMhNTcWCW+YgJyWl1aN9cfZjcJccvHbTLCTbo/tqmW9LjrNj/qW3YFBGTvO+aIkCgVxHKhZcdiu6OGJrpb4qVPxy4J0YmzkEAFotJAoUOGwJ+MOQ+9EnJTZmjr7u4s73YETGDQAaT7e0RECFAhVX5j6K3ikXtUc8S0lOmot05x/R+BPY2mmaxs87Ux9FSjJnQ6xGSAvfDjOYtxEmY1TXe/HatiK8tu1LVHg8sCkK9LMvEUUIBHQdBelpuHVEIeYMHQS7zWZyYvP4tADePLgdLx/YgqO1VbAJpfnN3RQIBKSOrIRk3NZ3BL7Xdzic8YkmJzaPLnV8VrYV75euxoHaYqhCgZSN60AUIaBJHQ41AVd0GYercy9BVkK62ZFNdbRuE7ZXvotiz5azhURAQocCBTp0qMKG/qlTUJhxPTLtPcyOayqffxtq6/6O+voP0HhqS0HjaRvl7N8lEhOuQEry3bDbx5gZNaYEM36ziNA5BXQdnx48jM+PFsPl9UIRAhmJibisb2+Myu8aU6cXWiOlxBdlxVhx4gCqfB7oUiItPhHjc3pgctc+sCmcePy6Q3XHsbpsG6r9tWiQASTZEtE/pQcmZBXCbtC9R6JFtb8Ue10rURsoQ0Cvh11JRqa9AP2dU5Cgxsa6rLbStHK4PQsQCByGLmuhiGTYbN3hcNwAm9rF7Hgxh0WEiIiITBPM+M1DNSIiIjINiwgRERGZhkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi07CIEBERkWlYRIiIiMg0LCJERERkGhYRIiIiMg2LCBEREZmGRYSIiIhMwyJCREREpmERISIiItOwiBAREZFpWESIiIjINCwiREREZBoWESIiIjINiwgRERGZhkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyTUSLyJo1azBjxgzk5uZCCIHFixdH8umIiIiog4loEXG73Rg6dCiee+65SD4NERERdVC2SG582rRpmDZtWiSfgoiIiDqwiBaRYPl8Pvh8vua/19TUmJim/Rw4VoYPPt+DsspaeP0BJDvi0Sc/CzMmDEJGqsPseJZyvLIa72zfjWNnquH2+5EUH4/umWmYNXwQ8tKdZsezlHKPG29/tQt7K8tR6/MiMS4OXZJSMKvPIAzM7Gx2PEupC3ixrLQIO6qKURuoR5yiIiM+GVO7DEFhegGEEGZHtIyA3oAdri9woHYHPIE6CCGQZEvBwNQRGJA6HKpQzY5oGVJqqPauwRnPCgS0SkgZgE1NhzNhLDId06EqCWZHtAQhpZTt8kRCYNGiRZg5c+Z5H/Ob3/wGjz/++Hf+3eVyITU1NYLp2p+UEiu/2I/5y7dhz5FTUBUBXUpICShCQAJQFIEpF/bFrdNGom/32B441h86hhc/34rPDx1r3j+6lFCEgDj754t6d8dd40diTM9uZsc11c6K03hhxyZ8dGQ/pAQgGvePAKAIBZrUMSyrC74/aCSu6tkvpgfZEk8lXjuyFh+UbINfb4CAgI5v7qvuSZ0wp/s4zMy7EDYldgfZukAN1pR/iA1nVqJec0OBAh06AECBCh0aUmxpGN/pclzUaRoS1ESTE5tH0+txqvZVnKx9GX7tJBqP+TUAEoAKQIMqUpCdciNyU+5CvC36fr/X1NTA6XS2afy2VBE514xIfn5+1BWRQEDDEy9/gvfX7oIiGgvI+aiKAITA43dfgalj+rdjSmuQUuKFNZvwzKfroSoCmt7CvhICmpR4aMp43H3RhTE5wC4+uAcPrf4IgIAm9fM+rul1d1O/Ifjd+MtgU2LvArptlUfw0NZX4NMbWtxXTa+iCzN740+FtyDJZm+fgBZS5i3BC4d/h5qGakicf18BgIBAZ3tX/KDXr+CMy2inhNbh1yqwt+xOuP270Vg8WqIiTknHwOyXkRQ/oD3itZtgioilfvvY7XakpqZ+4yPaSCnxu3kr8MG6XQDQYgkBAE2X0DUd//X8R1i15av2iGgpL6zdjGc+XQ8ALZYQANDO7sunP/4c/1y3JeLZrObDw/vx488+hCZliwMr8O/X3Zv7d+CRdcvRTscjlrGr+jge2PIivJq/1X0lz35sOXMIP9n6Mvx6oF0yWkWlvxzPHXwMtW0oIQAgIVHuK8VzBx+DO1DbDgmtI6DXYvfpW+D270XrJQQANDToVdh16kbUNxyOdDzLslQRiQXvrtqBjz7fi2B+70sAQgD/9fyHKCl3RSyb1Ww8XIxnPvk8pK/9n4/XYdPREwYnsq5jNVX48WcfINg5IAlgwYFdeHP/jkjEsqT6gB8/2foyNF2H3qbBopEOiS+rjuFvB1ZEMJ21SCkx78iTqNfczadh2kKHjmp/Bd4o/msE01nPoTO/Qn3DITSehmkrDZr0YG/ZXZAymK+LHhEtInV1dSgqKkJRUREA4MiRIygqKkJxcXEkn9aydF3i1Y82Bz1YAICUjTMC76760vBcVvXi+q1QQzy9oioC8z7fanAi63p1b1HjGqMQvlYAeGHH5piZFVl2sgiuBk9QJaSJhMQ7xV/AE/C1/uAocNi9Fye9xUGVkCY6dOyrLUK5rzQCyazHFziJM54PgRD2FaDBGziGqvrVRsfqECJaRLZs2YLCwkIUFhYCAB566CEUFhbi0UcfjeTTWtaWvcUoragJabAAGovMolU74fNH/9TwiSoX1n51tPl0S7A0XeKzA4dRWh39V155Aw14Y9+OkPeVBHCkpgobTx43NpgFSSnx5rH1IR0MNPHqDVhWWmRUJEv7vGI5lDCGCQUKNlR8bGAi6zpd9yYQ1itLxanaV4yK06FEtIhMnDgRUsrvfLz00kuRfFrLWrx6Z+Pi0zDU1fuwettBgxJZ17vbd0MJc7GpEALvbt9tUCLrWnb0K9Q1+MPahioE3oiB0zN7a0pwpK4s5IMBoHGoeef4F0ZFsixPoA47XZtCmg1pokPHF5WfQIvyUw5SSpyqfR2hzYY00VDtXQtf4KRRsToMrhFpR0dPVra64LI1qiJwoiz614kUV7rCGiyAxnU1x6uif18dq6mGTYT3o6xJiUPVlQYlsq4ST/jfozRoO1ZX5S9v0+LU1vh0LzyBOgMSWZcuvQjoRrwmJHyB6J+Z/DYWkXbk8TaEvQ0hBDze8I5+OwKP39/qFUWt0XUJjz/8fW517gY/jLhSua4h+tc9eDRjfna8WvS/rny618Bt1Ru2LSvSpNuS2+ooWETaUVJCfNjbkBJISgx/O1aXZI8P+9SMogg44uMMSmRdSXHxQV2FdT4p8dF/fwyHaszPToJB27Eyu4E3JEtQo/sO0apItuS2OgoWkXbUs2tm2GtENF1Ht5x0gxJZV0Fm+N+jlMZsx+p6OjMQaOVeGK1RhUCftEyDEllX96ROYW9DQKCbAduxuvS4TlAQ/p1kExQHHGp0D66qkoA4NcuALQkk2LobsJ2OhUWkHV07cXDYa0RSkxJwcWEvgxJZ13WFFxiynWsN2o6VTe3RG6lhzmZoUuLm/kMNSmRdfVNz0TelC5Qwrm6QkJjVbbSBqazJYUvG0LQxYV01I6BgTOYUKGGuYeoIcpK/h/CGVBXpiZOj8nbvrYn+V4eFFPbLC2s2Q1EErps0BHG26H+/ixxnCib2LQh5BklVBKb074Xs1Og+EgMAu2rDLf2HhnzPFQGgd1oGRmZ3NTaYRc3pPi6ke4g0cajxmNpliIGJrGtcp6lhXTUjoWNs5hQDE1lXdvKcMLegISflVkOydDQsIu1ICIHbrrwwxK8F4lQV10+K/qPWJneOHwE9xBkkXZe4fdwIgxNZ1/cGDINNCe04XwK4d8jomHlvnsu6DEFGfHJIsyICwA3dx8bEGhEA6OHoh/zEXiHNiggoGJR6ITLt2RFIZj3xts7ISroGoQ2rKhLjeiMt4SKjY3UILCLtbMaEC3D9pOCOpgQaz0v/8f6rkJ2ZEplgFjSyex4evuKSkL72l9MmYni3XIMTWVdeihP/N/lqAMHdUkkAuG3AMFzfJ/pPYTVJUOPw/0bejjjFFlQZUSAwKrM3ftA7No7wgcaDp9sLfoZkW2pQZUSBgix7DuZ0+48IprOenhm/RVLcACCotTUqbEoqBnR+ESIGTmGdS2x+1yYSQuBnt07GnMsa7zartHLqQVUEbDYVT/7oaowf2rM9IlrK3LHD8csrLoEAWj1NoyoCAsCvpk3ErWMK2yWflUzp3hsvTJkJm6K0epqm6fN3DhqB34y9NGZmQ5r0Tc3F86O+j+S4hFb3lThbViZ07o8/D/8ebEr0nxr9OmdcBu7r/Vukx2c174uWCeQmdse9vX6DxCi/WubbVMWBC7JfQ4p9GJoOIVumIF7tjEE5C5Bgy4t8QIsS0sJvMBHM2wh3RGu3H8IbK7Zhy97jzYOsLiUURYGu64izqZg+/gLcNHU4euTG3ttpf9224lK8vGEbVu5tvKusEGf3lRDNl65OHdgHc8cWojA/dmZCzuVAVQVe3LUV7x7cDb+mQRUKdCnPlg0JTUpclNsddw4agUu7Rf/C55aUeV1469gGLDq+CXUBL2xCgSZl4xGaENCkjv6pXTGn+1hckTsMaowesQJAvebG+ooV+LxiOWoCVVCgoundjQQEdGjIjM/GRZ2uwJjMKYhTYuP01bno0ofTdQtwsuZleAOHIWBrvjmcgAKJAOKUTOSk3IqclFsRp0bf1X3BjN8sIhZQfKoKS9fvwenKOnj9DUhOtKNPfhamjRuAZEf039shGGW1dVhctAfFlS7U+XxIttvRPSMNM4cNRFZKktnxLMXl82LRwT3YV1mOWr8PibY4dElOwXW9L0CBM/p+8YXDpzXgk1O78GX1MdQ11MOm2JBpT8ZlOUMwwBkbi3jbSpc69tZsx/7aL1Gv1UFAwGFLwQWpI9E7+YKYm11riZQStb7NOONZjoBeBSkDsClpcCaMRbpjChQRvfc5YhEhIiIi0wQzfsfuPCMRERGZjkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi07CIEBERkWlYRIiIiMg0LCJERERkGhYRIiIiMg2LCBEREZmGRYSIiIhMwyJCREREpmERISIiItOwiBAREZFpWESIiIjINCwiREREZBoWESIiIjINiwgRERGZhkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi09jMDtCe3LVefPxBEfbtPIG6mnrE2+OQnpmMidMG44Jh3SCEMDuiZfj8AazasB+bdxxDbZ0XiqLAmZqIiy/sjTGFBVBVdtgmmq5j7d4jWLXrMKrcHugScDrsGNO3O6YO6QN7XEz9mLVISonNpSV4/8A+lHvcaNA0pNoTUJjTBTP7D0Sq3W52REvZ7zqNRce+xMl6F+oDDUiOs6Nvamdc36MQWQnJZsezlHJfBVaXf46T9adRr9UjQU1AdkIWLsm6CDkJnc2OZylerQqHaz5Ale8AGvQ6qMIOh60zClKuREZC/3bPI6SUst2ftY1qamrgdDrhcrmQmpoa8nZKjp3Bwlc+x8fvFyHQEIAQArouIQSgKAo0TUe3nlmYefNYXD6zEKpNNfC76Fgqq9148/0tWLJyB+o8PqiKgKY3vkRUtXFfZWUmY9YVhbh+WiESE+JNTmwej8+P+euKMH9dEcpr3N/cV2f/nJJoxw1jh+DWi4cjM8VhcmLzBHQdb+3eiZeKtuFQVSVsQoEmdUgAqhDQpUS8quK6ARfg7uEj0SMt3ezIppFSYnnJXrx0cCOKKk9AFQr0s/tKwdmDJQFcnjsAd/Ydh8HpuabmNduemv34oHQZvnTtggIF8ux/4ux/OnQMSh2A6V2mYkjaILPjmqrK9xX2Vr2GY3UrIaEDEMDZ/y+gQEJDhr0/+qXdhB7Jl4d1cB7M+B31RWT7F4fw+IPz0eDXoGn6eR8nBCAlMGpCX/zyyRuQkBh7A+zh4nI8+NuFqHJ5oOstvyyEEOjdvRP+51ezkJme1E4JraO8pg4//PsiHDp1BnorP0KqEMhIceAfP7wevXIy2ymhdbj9ftz30ftYW3wUANDS3lKFgN1mw9+vmolx+d3aJZ+VaFLHb4uW4s0jW6GcLWjno4rGQfcPw6/GzO5D2zGlNUgp8dGpFZhfvBAKFOg4/+/3ps9f13UGrus6IyZnv4vrPsHnpx4FICGhtfBIBYCOgpQrMarzL6GKuJCeL5jxO6rn13dtP4Zf3/cqfN5AiyUEaCwhALDl86/wmwfnI9DQ0v9Q0efEqWr8x6/falMJARp/CRwursD9j72JWre3HRJah8vjxR3PvY3Dp1svIQCgSYnKOg/mPrcAJ8642iGhdTRoGn7wwWKsO34MEi2XEKBxX3kDAdyx5B1sPVnSHhEtQ0qJx7d/hLeObAWAVl9bmtShS4mHty7BB8d3tUdES1l6aiXmFy8EgBZLyNc//27J+3i35P2IZ7OaE+41WHfqV5DQWikhAM7uqyO1S7Hx9H9Dypb3rRGitoi4a7147IHXoOsSwUz66LrEl5uO4LUXVkUwnbXousR/PvEuPPW+NpWQJpouceJUNZ74v+URTGc9v35zOU5UuppPw7SFpkvUeX24/1+Lg3o9dnR/2bgeG08cb1Nha6JLCU1K3PXeItT6fBFMZy2Lir/EgqPbWi1r3yYAPLxlMY7UnolELEvaX3sQrxe/HdLXvlvyPnZUx05xczecxrpTvzz7t2BeXRLH6lbggGthJGJ9Q7sUkeeeew49evRAQkICRo8ejU2bNkX8OT/+oAged3ADaxMpJd578wv4vA0RSGY9m3ccxbGSyqAG1ia6LrH6i69wsiw2jvSPV1Tjs92HQ9pXmi5x6HQlvvjqeASSWY+noQGv7Nge9MAKNJaRWp8Pi/fvNTyXFUkp8a8D6xHKCYPGmSaJ+Yc3Gx3LspaeXAklxOFLgYKPTq00OJF1Hax5F1JqCK6E/Nve6tciPisS8SLy1ltv4aGHHsJjjz2Gbdu2YejQobj88stRVlYWseeUUmLJ/I1hbcNT58OaFbHRmt9Zuh2qEvo5U0URWLJyh4GJrOvtDTughLGvVEXgjc+LjAtkYe8f2AdPQ3hl/uWibTExg7TtzHEcqq0IcahoPKW18Oh2eAJ+Q3NZUZW/Gluqtrd6OuZ8dOjY6dqD097IjUFWoUk/vnK9e3Zhamg8gdM46fnCwFTfFfEi8vTTT+Puu+/GHXfcgYEDB+L555+Hw+HAiy++GLHn3FNUjNLjlQjn95dQBD58O/qPMM5UubF+W2hH+E10XWLxii+jfsDQdYmFG3eFta80XeKz3YdxptZjYDJrmr/zy5CO8JtIAIerq7D91EmjIlnW20e3QRXh/Tqu1xqwrGSPQYmsa015aDNHX6dAwWfl6wzJY2Ul7nXw6zVhbUNAxcGaRQYlOreIFhG/34+tW7diypQp/35CRcGUKVOwYcOG7zze5/OhpqbmGx+hKD1eGXLmJlKXhmzH6k6Wu8IqbE1q3V546qP7aKzW60OtN/w1C7qUOFkV3i+HjuBYdXXIR/hfV+yqNmAr1na49gy0MKe/bULBCXeVQYms67SvDCLMKiIhcdpbblAi66prKIFAeLejkNBQ4y82KNG5RbSIVFRUQNM0ZGdnf+Pfs7OzcerUqe88/oknnoDT6Wz+yM/PD+l56z1+GHF1ljfKB1YAqDdwHUy0FxGPz7jvz23gtqyqPmDMa6vOH/37yh0wZlGuOwZOzXg1H/QwK66ERL0W/Vf7BXQPEPb8ERCQ7vDDtMBSV8088sgjcLlczR/Hj4e2qC/REW/IUX4s3EskMSG0a8TPxeGI7rtiOuzGvR6SY+BGcIlxxry2kuOj+3UFAMlxCYZsJ8kW/a+rBNUe9oyIgIBDTTQokXXZFAcQxvqQJnFKZO8VFdEi0qlTJ6iqitOnT3/j30+fPo2cnJzvPN5utyM1NfUbH6HI69EppK/7OkURyDdgO1bXNdsJxYDpo7TURDgMLDVWlJJgh9MR/oChCIHcdKcBiaytV3qGIa+tgvTov8tq75ROYa8RCUgdPZKj/4Z5XRJyIMOcEREQ6JKY3foDO7jUuG5hLVQFGteIpMYVGJTo3CJaROLj4zFixAh88sknzf+m6zo++eQTjB07NmLP239wHvILssI6PaPrElfdMMq4UBaV7kzChFG9w75q5trLh0X93QoVReCGsUPCGlxVReCyIX2Qnhz9R2O3DB4a1P1Dvk0A6JvZCUM6R/+AcUPB8LDXiCTZ4jG16wCDElnXxVnjwp4R0aHjkqyLDEpkXblJ42FXwyvyEhr6OK8zKNG5RfzUzEMPPYR//OMfePnll7F3717ce++9cLvduOOOOyL2nEIIXHPT6LBOzySnJmL8lIHGhbKw668oDOtKEEjg6ilDjAtkYbPGDA7r6iBNl5gzPjZuxz29Tz+kxId+qkACmDu0MOoLLgAMSe+KvqmdQx5eVSFwQ8FwJKjRPSsJAM64VIzKGBHWfUSGOgchyx79s0eKsKFP6vUQIQ/1Asm2rshOHGlorm+LeBGZM2cOnnrqKTz66KMYNmwYioqKsGzZsu8sYDXa5OlDkZrmCPmeD9feMgbx8bHxrqnDB+WjV/eskGZFFEVg8rh+6JyZEoFk1pObkYrLhvYJaVZEVQT652ZhZM+uEUhmPXabDbcPGx7S4KoIgYzERFzdt/3fCdQMQgjc3W98SCccBABFKLi554VGx7KsK7tcFvLpGR06pne53OBE1tXbOROKiENoi1YlBqR/L+IHA+2yWPX+++/HsWPH4PP58MUXX2D06NERf05Hkh2//eutUG1qUGVEKAKjL+mHG79/SQTTWYsQAk8+fC1SkhOCKiOKIlCQl4n/vGdqBNNZz3/fMBW9sjOC2leqIpCWlIhn77omJo7wmzwwaiwu6V4QVHFThECcomLeNdcjKYwZlY5mRv5gzO0d3O/Gpr369IXXIz8p+tfSNOmVXIDbe9wc0tfelD8LFzhjo+ACgMOWhYu7PHn2dFYwv3sEeqbMQO/UayMVrZmlrpoxWr9BXfHHF+YiwREPRW35WxVnB5UJlw7Er568AWorj482OVmpeP73NyMrM6VNg4YAMKB3Dp59fA6SYuDqoq9LSojHv+6djQF5ndv0o60IgZy0FLxy/xzkpMXGzFETm6Lg/6bPwGU9ewNAq68tVQgkx8fj9etmY3AMrA35tl8Mnoq7+jSun1Nb3VcKVKHgL6Nn4bKusTOwNpmSPRF39Ljl7BvYt/z7uunzN3ebheldYuvACQC6OMbg4i5PQRXxrd5XpOk0Tp/U6zCq88PtcuAkpIVvhxnM2wi3pOxkNRbP34hl726Fx+2DalOg6xJCNK6e1jQd/Qbl4ZqbRmPitMFQlNgqIV9XU1uPd5YV4d1l21Hp8kBVleY1EUI07qv8LumYdWUhZlw6BPYYOX11Lr6GABZu3In564pQXFENm6I0L85UhEBA19EpNQk3jR+KOeOGGnLFTUelS4n39u/FS19ux47Tp85eIdI4ud60r5Lj43HToCGYO7QQuSmh/7xHgzWnvsIrBzfh87JDZ8ubgC4lVCGgSR1xioqr84dgbp/R6JPa2ey4pjpYdxjLTn6MLyq3QkJCQEBCQoFofh+ekenDcEXOFPRP7Wt2XFPV+I9hf/VbOFz7ATTph4ACCb25fEhoyE4cgX7OOeiadHFYJSSY8TsmikgTb70fa5bvwr5dJ+Cu9SIuzob0TsmYeMVg9OrfxYDE0SOg6fh8yyFs2XEMNXVeKIqAMzURl4zqg2ED82Lq9EJrpJTYcugEPt11CFXueuhSwulIwJg+3XDJwJ6wxdjsWmt2l53G+1/tR4XbDb+mIcVux/AuuZjepy8SbNG/2DIYxXWVWFy8Ayc9LtRrDUiJS0BfZ2dcnT8Yzvjov/IqGK6GGqwpX49T3tOo1+qRoCYg256FCVnjkBEfO6et2qJBr8PR2uWo8h2AX6+DTdiRaOuMgpRpSI3vbshzsIgQERGRaYIZv3moRkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi07CIEBERkWlYRIiIiMg0LCJERERkGhYRIiIiMg2LCBEREZmGRYSIiIhMwyJCREREpmERISIiItOwiBAREZFpWESIiIjINCwiREREZBoWESIiIjINiwgRERGZhkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi07CIEBERkWlYRIiIiMg0NrMDEHBoTwk+fmczykur4fP6kZyaiIIBuZg6axTSOqWYHc9SSk9W46PlO1BSWgVPvR+OxHh0zU3H9CuGoEtOmtnxLOVMjRvvbdiNr05UoLbeh0R7HLLTUzBjzED0zcsyO56l1Pl8WLJrH7afKIXL60O8qqJTkgPTB/bDhd26QghhdkTL8GkBLCvejzWlR1Dt80IRAun2REzJ743JXXvDpvD4tokmdWyp3IfPK3bC5a+DJnWkxjkwLL0PLskaBrsab3ZESxBSSml2iPOpqamB0+mEy+VCamqq2XEMJaXE6g+KsOhfq3Hgy2KoNgW6JiGlhKIISACKIjBh+jDM+sEk9BrY1ezIptqy7SjeemcTNm890rh/ZOM+FEJACEDXJUaNLMCNs0Zj+LDuZsc11d7i03hl5RZ8vO0r6BIQAHQpIQSgCAFNlxjUIwe3XDocU0f0jelBtriqGi9+sRXv7tgDXyAAIUTjvgKgKAo0XUdBRjrmXliI2cMGIU5VzY5smjNeD/61dzNeP7AdLr8XqlCgSR0Amv/cOTEJt/Ubgdv7j0BynN3kxObxan68V7IOi0vWotxX/Y19pUCBDh1JagKuzB2L6/MuQabdaXJi4wUzfrOImCDQoOHZ/3obKxZsglAEpH7+/wlUVQEE8LOnb8HEGYXtmNIapJR47c0N+NfLa6EoAnoL+6rp83ffcQluvmF0TA6wSzftw6MvLwMAaC3tq7MD7rXjB+GRmy6FTY29o9jNxSfwgwWL4W0IQGvh12DTq2hcQTc8e90MJNtj7yj2oOsMbv34TZTV17W4rwBAgUAvZyZenTIHOY7Ym9Gt8tfiVzv+joN1JZBobV8pcMYl4YmhP0Sv5Og62Axm/I693z4mk1LimYffwsq3NzX+vYXBAgA0TYem6fjTj1/F58t2tEdES3n9rY3418trAaDFEvL1z/9j3mq88fYXEc9mNSu3HsCv5i2FpssWSwjQOEMCAIvX78LvXv8YFj4eiYgvS07ijjfeRX0rJQQA5NmPDUeP4wcLFsMfCLRLRqs4UefCDctfa1MJAQAdEodrzuCG5a+jylffDgmtwx3w4udFz+FQXWmrJQQAdOhwNbjx0+1/xQlPWTsktCYWkXb20fwN+OTdLQjq9/7Z6fU//uhVnDp+JlLRLGdb0TH886U1IX3t319cjaIdxQYnsq7j5dX41bylCHYOSErgvQ27sejzXRHJZUUefwPuXrAYAV1vLmRtoUuJbSdK8fTq9RFMZy1SSnx/1UK4/N42lZAmmpQocbvwk3XvRzCd9TyzfwGOe8qgQ2/z1+jQUa/58aud/2g+fRNrWETaka7rePuFTxH0aIHGAUPXdXz4euz8EnzrnU1QlNBOr6iqggXvbDY4kXUtXLMDUrblGOy7BIBXVm6JmVmR93bvRXW9N6gS0kSXEvO3fQm33x+BZNbzRdlx7KsuD6qENNGkxGelh3G4pjICyayn3FuN1eVF0EP4KdSho7S+Alsq90YgmfWxiLSjL9cfxOnjlQhptACgaxJL52+A39dgbDALOnnKhS82H271dMz5aJqODZsO4nRZjcHJrMfrD+DddTtbPR1zPhJAcVk1tn51wthgFiSlxCubt4dyLNDM2xDA+7v2GZbJyl7ZtxWqCH2YUIXA6we2G5jIuj48uSGs15UCBYtPrDMsT0fCItKOlr65EUqYiwLdtV6sXxH90+jLVu4IeTakiRACS1dE/7qaVUUH4faGd4SuKgLvrt1pUCLr2nXqNA5WVIZ6LACgcQZp/rYvjYpkWdW+eiw7fiCs0wWalHjzqy8R0KP7lIOUEh+Urg9pNqSJDh1bqvah3FttXLAOgkWkHZ04dBq6Ft4PpKoqOHmswqBE1lVSWh32NgQaZ1ai3fHy6rCvetF0iWNlVQYlsq7iqvBfDxJAcXX0v65OuF0hnb76NnfAj+ooX7Tq0xvgaqgzZFunvLGzDrAJi0g7qnf7wt6GUIQh27G6+np/yKdlmmi6hMcT/efyPT5jvse6+hjYV35jTmt6G6L/yhlPg3GngN2B6H5t1WvG/U72GLitjoJFpB05khPC3oaU0pDtWF1iYnzYp2ZUVcDhiP57Pjjs8SGvO/q65MQY2FfxcYZsJzHOmO1YWVKcca+HaL+5mUM17vtLUqP/9/u3sYi0o259csJeI6IFdHQtiP7bc3fLywh7G1ICeV3TDUhjbd2z08M+B68qAj27ZBqUyLoKMsN/PQgAPTLSwt6O1eUlOWELY6Fqk9Q4O9Lio3twtavxyIgP/6abAgJdEqP/5/DbWETa0bSbxoS9RiQlzYExUy4wKJF1XTF1cHD3WjkHKYFpUwcbE8jCJg3thZTE8I7INF3i+gnRv68GZnfGgOwshDPZJgHcMmKoYZmsymlPwPTu/aGGcYdiVQjc1HcY1Bh4/5kZueOhhHHdjAIFozMHRuXt3lsT/a8OCxk8ulfjbEaIr1VFEbjy5nGIi4/+9yrsnJWKcaN7hXEfEYEJ4/qgU2b032I6Ps6G6ycMDnlfCQEU5GRgaM9cg5NZ020jCxHO8qOk+DhMH9jPuEAWdmu/4SHdQ6SJLiVu6RMbb00xrcvosL5eh46ru15kUJqOJWJF5Pe//z3GjRsHh8OBtLS0SD1NhyKEwOx7Jod0Pl8IwBZnw/RbxhofzKLmzBoVxn1EJG64/kKDE1nXrIuHwqaEdjwmJXD71JEx89480wf2Q6ckR0hH+gLA90YOi4k1IgAwIqsrhmZ2CWlfKUJgan4fdEtJMz6YBWXanZicPSKkWREFCro5sjEivW8EkllfxIqI3+/H7Nmzce+990bqKTqkqbNHYfot44L6GiEa/8+v/jYXWbnRv+ahyZBB+bjvh5ND+tof3TsFgwbmGZzIunIzU/Gnu6cD4uzrJQg3XDIUV40ZGJlgFpQQZ8M/b7wWcaoKJYidpQiBcQXd8KMJsXMwIITA3ydeh8yEpKDKiCoEClIy8Odx0yOYznp+1HcWeibnBlVGVKEg2ZaI3w/+ARQD1uR0RBH7rh9//HH85Cc/weDB0X/eORhCCNz7+HW45vYJAABFbfkFq6oKbHE2PPr8HRg1KXYGiyazr70Q999zKYDG0y0tafr8j+6dguuuGRHxbFZzyZBeeOoHM2BTFKitnKZp+vwtlw7Hz2+YGDOzIU0GZnfGq9+bjRR7fKsDbFNZmdynJ/5v1tWIU9X2iGgZ2Y4UvHP595CX5Gy1uImzHwPSO+OtqTcjNcoXqX5bomrHk0P/AwNSezTvi5YoEMiIT8VfCh9ATmL4C/Q7KiEj/AYTL730Eh588EFUV1e3+lifzwef79/XUNfU1CA/P79NbyPcEW38eDcWz1uDL9d/1Xw1jdQlFFVA13TExdsw5foLMfPOi5HfK9vktObatecEFry7BevWHwDQWOh0XUJRRPN7pFx8UT/MnjkSFwyMrrfTDtbhk2fw+qfb8OHGvWgIaFAVBbqUzTMlmi4xun833Dy5EBMG9zQ3rMlO1dbh1c3b8eb2naj1+WBr2lcAIAQ0XcegnM649cJCXH1B/5hYdHk+Lr8Xr+3fjlf2b8Xp+jrYhNJ8J1EFAgGpo1tyGm7vPwI39xmGBFtsnL46F78ewLKTG7HoxFqcqC+DKpTm31NCCGhSR1pcMq7uehGu6XoRUuOSTE5svJqaGjidzjaN35YqIr/5zW/w+OOPf+ffo7WINCk5Uo5PFm1Bxclq+Oob4EhJQM8BuZg8cwSSUhPNjmcpZ87UYdnHu1BSWgWPxweHw468rum4fMogZGYkmx3PUmo9Xny4aR++KilHXb0fCfE25KSnYProAejWOXZO8bWFLxDA0r0HsO1EKWq8PsSpKrKSHbhyQD8M6hLbBwHfpuk6VpUcwpqTR1Dt80IRAmn2BEzN64OxOd1jbnatJVJK7HIdxrqKHXA1uKFJHSk2BwrT+2Bs5iDYlOidXYtYEXn44Yfxpz/9qcXH7N27F/3792/+O2dEiIiIYkswRSSo60B/+tOf4vbbb2/xMT17hj7Va7fbYbdH9x34iIiI6N+CKiJZWVnIyor+u3oSERFR+4jYnbGKi4tRWVmJ4uJiaJqGoqIiAEDv3r2RnMxz+URERBTBIvLoo4/i5Zdfbv57YWHj3fVWrVqFiRMnRuppiYiIqAOJ+FUz4QhmsQsRERFZQzDjd+xeFE9ERESmYxEhIiIi07CIEBERkWlYRIiIiMg0LCJERERkGhYRIiIiMg2LCBEREZmGRYSIiIhMwyJCREREpmERISIiItOwiBAREZFpWESIiIjINCwiREREZBoWESIiIjINiwgRERGZhkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi07CIEBERkWlYRIiIiMg0LCJERERkGhYRIiIiMg2LCBEREZnGZnYAsia/twFrlmxB0eq9qK12Q1EUpGYkY9z0YRg5ZTBUlR22iabp2LThINavPQBXtQdSl0hJTcDwC3vi4kkDEG/nj1kTKSWKDpRg5Yb9OONyw9+gITXJjkG9u2Da+IFIdtjNjmgpB05VYNH23ThZXYt6fwNSEuzok90J1424AFkpSWbHs5TjtS68vW8nDruqUOf3IykuHt1T03BD/0Ho4Uw3Ox61QEgppdkhzqempgZOpxMulwupqalmx4kJVWU1ePf/VuCjl9bAXVMPRVWgazoAQLUp0AI6OuWm4+q7J+Hq709GQlLsDhz1Hj8WL9yMJQs340xFHVRVgda0r87+OTklAVfNHI7r5oxGekbsDhwBTcd7n+3Em8u349jJSqiKgK5LSACKIiB1ibg4FdMnXIBbrhyB/OzYHTiklFi++yu8/Pk2fHn8ZOO+khJSAooQzY+bekEf3DlhBAZ1zTExrfk2lBbj70Wb8dnxI1CEgASgSwlFCAgAmpS4qGt33D10JC7JLzA7bswIZvxmEaFmR/eU4JfX/wXVFbXN5eN8hCJQMDAPv3v7x8jIdrZTQus4U1GLR37yBo4eKYfUW/4RUhSB9Iwk/Ol/b0H3gqx2SmgdHq8fjzz7ATbuOAoBoKW9pSoC8XE2PPWTazDygm7tFdEyNF3H7z5Yhbc27YAiGgvI+aiKgJTA76+bimsKB7ZjSmuQUuIfO7bgDxtXQxUCWkv76uznHxwxDj8eMRbia4WOIiOY8Zvz6wQAKD1Shp9d9WSbSggASF3i6N4S/OfVT6HO5WmHhNZRW1OPn973KoqPtl5CAEDXJaqq3PjJva/gZGlVOyS0jkBAw8+eXoJNO48BaLmEAICmS3j9Afz4z+9ix4HSyAe0ECkl/vu9T7Bg0w4AaLGEAI37SpcSj7yzHB9+ua89IlrKv3ZuxR82rgaAFkvI1z//zNb1+N+tGyKejYLDIkLQdR2P3fgs6mu9bSohzV+n6Sg9XIanH3gpcuEs6M+/fx8nS6qgaW2fTNQ1CY/bi1//7C1YeBLScC+8sx7b9h1vdVD9OikldF3iof9ZhLp6XwTTWcvi7Xvw9pZdrZa1bxMAHnlnOY5WxE7J3XKqBL/b8FlIX/vM1vVYffyIsYEoLCwihG2r9uD4V6ea1zcEQ9d0rP9wO04VV0QgmfWUnqjEhrUHoLdhJuTbNE3i2NEKbN961PhgFlTvbcDbK4sQSu/SpUSdx4dln+81PpgFSSnxr7VbEMoJAwlAQmL+F0UGp7Kuf+7YAjXE0yuqEPjnji0GJ6JwsIgQ3v/nKihhXAWjKAqWvrzGwETW9eGS7VCU0M8vq6qC9xbGxi/BlRv3od7XENY2FqzYHhMzSNuOleJweWXQsyFNNF3inS274fGHt787gtPuOqw4erDV0zHno0mJtSeO4Zir2thgFDIWkRhXedqFTSt2BnVK5tt0TceH81ZH/YCh6xIfLtkW0mxIE03TsX7dAVRVug1MZk3vfroD4awJlACOnazCroMnDctkVQu37oQaRsEFgPqGBizfdcCgRNa18MDusLehCoG39u00IA0ZgUUkxp0urjCkQNRVe+Cp9RqQyLrq6rxw14W/ZkHqEmWnXAYksrbjp6tDOi3zbSfKqsPfiMUdKa+CFkbBBQCbouBEZfS/ro65qsIeuCSAYzXVBqQhI7CIxLh6t3GLAevroruIeD1+w7bl8UT/IkyfQacJPPXRf7rB7TPmteX2G/catSp3QwNCn79tpEuJuobo/xnsKFhEYlyigTckc6QkGrYtK0p0xBu2LUcM3AguIT7OkO0kJRq3360qOcGY7zEpPvr3VVJcHJSQlvX+myIEUuKj/2ewo2ARiXFdemRBhHluGgCcnVKQmBzdP9hJyQlISQ2/bCmKQE6XtPADWVz33AxDbhzVrUv032W1d+fMsNeIBHQd3TtF/77qmZYBPeRlvY0EgJ687btlsIjEuLSsVIy7chhUWzhXzQhcdcclUX+3QkURuGrm8LCvmpkwsT+caQ4Dk1nT9ZcODWv9kRBAr7xMDCjINjCVNc0aOTjsNSJJ9nhMvaCPQYms6/q+F4Q5H9J4auaG/oMNyUPhYxEhzPj+JGiB0M+6SgBX3DbBuEAWNv2awrCvmplx/UgDE1nXpaP6hnVaRUpg9mWFUV9wAWBIXg76ZncK+SojVQjMHjkYCXHR/waLWY4kTOvZN6z7iEzML0BeSuy9NYVVsYgQhk7oj4IL8kJ6R11FEbh45khkdc2IQDLrye6ShosnDwhpVkRVBXr1ycaQYbHxHir2eBtuvHx4SEeviiKQlpKIy8f1NzyXFQkh8P2LLwzpKiOBxv110+ghhueyqu8PGRnU3Xq/TpMSdw+90OBEFA4WEYIQAo+/cT+S05OCurGZoiro3j8XP/7LbRFMZz0/feQqdOvRKagyoqoCqU4HfvvnOTFxhN/kzpljMHZoj2+8a2xrFCEQp6p45ufXwWHQIs6O4Kqh/XHbuMKgvqZpr/7PnOnIz0gzPJNVDevcBb+dMCWkr31k9MUY1zU2DgY6ChYRAgB0zsvE00t/gU656a0PsKLx/H3fwh7403s/gyMloX1CWoQjyY6n/nor+vTvAnF2X7REKAJZ2U785fm5yOocW+8ibVMVPPGjGbhkRC8AaLWQqIpAUmI8nntkVkysDfm2/7ziEtx50QgAaPXUg6oIqIqCp2+cjikDe7dHPEv53sBh+O1FUyDQhn119vO/HHMJfsDZEMsR0sK3wwzmbYTJGLVVbrz3z1V4/5+rUF1eA9WmQuo6IASEALSAjq69snHNDybjilsnID7BmEs0OyK/L4CP3tuOxW9vRsmJSqiq0rw4UwgBTdORkZmMq68fiauvG2HIFTcdla5LLN+wFwtWbMeew6ehKgoACV02nlbQNB1JifGYOWkw5kwdjuzMFLMjm2rNgSN4df12rD94rLG8icYFlopQoOs64lQVM4YNwG3jCtEnu5PZcU21/fRJvLhzCz46fAASgAIBTUqoAs33G7msR2/cNXgERnXJMzNqTAlm/GYRoXPSAho2LvsS21fvRW2VG4qqwJmRjHFXFWLwuL4xdXqhNVJK7Nh+DJ+vOYCaag90KZGSmojhIwswZnyfsK5Iikb7j5Zh5cZ9OOPywN+gISXJjsG9u+DS0X0Nu/dItCg+U40lRXtwsroW9Q0NSE2wo092J8wYNgDOxNiaiWxNuceNdw7sxhFXFer8PiTFxaN7ahqu73cBcpJiu9iagUWEiIiITBPM+M1DNSIiIjINiwgRERGZhkWEiIiITBOxInL06FHcddddKCgoQGJiInr16oXHHnsM/hh4d0giIiJqm4jdD3jfvn3QdR0vvPACevfujV27duHuu++G2+3GU089FamnJSIiog6kXa+a+fOf/4y//e1vOHz4cJsez6tmiIiIOp5gxu92fYckl8uFjIzzvyeJz+eDz+dr/ntNTU17xCIiIiKTtNti1YMHD+LZZ5/FD3/4w/M+5oknnoDT6Wz+yM/Pb694REREZIKgi8jDDz8MIUSLH/v27fvG15SUlOCKK67A7Nmzcffdd59324888ghcLlfzx/Hjx4P/joiIiKjDCHqNSHl5Oc6cOdPiY3r27In4+MZ3zSwtLcXEiRMxZswYvPTSS1CUtncfrhEhIiLqeCK6RiQrKwtZWVltemxJSQkmTZqEESNGYN68eUGVECIiIop+EVusWlJSgokTJ6J79+546qmnUF5e3vy5nJycSD0tERERdSARKyIrV67EwYMHcfDgQeTlffOtly38PntERETUjiJ2ruT222+HlPKcH0REREQA32uGiIiITMQiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi07CIEBERkWlYRIiIiMg0LCJERERkGhYRIiIiMg2LCBEREZmGRYSIiIhMwyJCREREpmERISIiItOwiBAREZFpWESIiIjINCwiREREZBqb2QGIgnHy8Gkse/FTlBw8ifpaLxJTEtC1dxdccddkdCnINjuepVRV1GHF4q04cuAU3HVe2BPikZXjxGXXFKJnvy5mx7MUt8eHFav3YOe+UtS5fYizqUhPc+DSi/pj2AV5EEKYHdEy/IEAVn55EBv2H4PL44UigLSkREy8oBcmDCyATeXxLQVHSCml2SHOp6amBk6nEy6XC6mpqWbHIRNt+3gH3n7qPWxZ+SUURYGUElKXEIqAEAK6rmPk5cNww8+uRuHkwWbHNdVXe0qwcN5arF25G1JKCAC6LiGEgKIIaJqOfoPzcN1t43Hx5YNjepAtPVWNN5ZsxtJPd8PvD0Ao4uy+AhRFgabp6NY1HbOuGoEZUwbDZlPNjmyayjoPXl29DW9/vgM19T6oioCmNw4fTX/ulJqEGy8ailsmFCIpId7kxGSmYMZvFhGyNCkl5v/hXbz06zehqAp0TT/vY5s+f9cfbsacX8yMyQF21Ydf4s+/WggBQGtpX50dcK+4fiQe+K+rocbgAFu0+zh+8ftF8PkamgfUc2l6GY0Y0h2//8U1cCTG3gB75HQlfvD8Oyh3uaG3MmQoQqCgcwaev+c6ZKclt1NCsppgxm/OoZGlvfHEIrz06zcBoMUS8vXP/+uX8/HWk0sins1q1izfiT89vAC6prdYQoDGGRIAWP7uFvzv44th4eORiNh94CQeenwhvN6WSwgASNn4sW1nMf7zd+/A3xBop5TWUFpZg9ufXYCKmtZLCADoUuJoeSXu+OsCVLvr2yEhdXQsImRZ2z/diXn/9UZIX/uvR17Hl6t3G5zIukqPn8GTDy8AgpwEkhJYsXgblr2zJTLBLKje68d//u4daJrepoG1ia5L7Nhbin+8vi6C6axFSokf/WsJauq9rRa2r9N0iZNVNfjl68simI6iBYsIWdbCp9+HEuLCN9WmYOHT7xucyLo+fGtT46AaysSGAN6etzZmZkVWrtmLmlpv86xQMKSUWLysCJ56fwSSWc/WQyU4UFoRVAlpoukS6/YexdGyqggko2jCIkKWdOpoGTYt3d7q6Zjz0QI6vvhgG8qKyw1OZj0+bwOWLtwMXQuxSEigtPgMdmw5YmwwC5JS4u0PtiGc5UNeXwAr1+w1LpSFvbGuCKoS+s5SFYG31+8wMBFFIxYRsqTl81ZBUcJ7eQpFYPm8z4wJZGHrP9kDj9sX1jZUVcHShZsNSmRd+w+dxtHjZxDO5I8QwOJlRYZlsiqX24tPdhwMaTakiaZLvLNxJwIhHlBQbGARIUsqPXQq7G0IACePnA4/jMWVFp+BagvvR1nTdJw4WmFQIusqOVUd9jakNGY7VldaVRPUGprz8fga4PJ4DUhE0YpFhCypvs4b8mmZJpquw1Mb/av26+v9EMGuUj0HT114syodQb23wZDt+HzRf+WMx2fcOhgjt0XRh0WELMmRkhjyQtUmqqrAkZJoUCLrSkyMhwxpleo3OZLtBqSxtsSEOEO2k2DQdqzMyBuS8eZm1BIWEbKkvL65YW9DSqBrn+i/lXleQSdogfBmj1RVQfdenQ1KZF3dumaEvQ0hgPzcdAPSWFtueipsYa7TAoCUBDucjgQDElG0YhEhS7r8jklhX04qpcTld0wyKJF1jZ08EMkp4f2i1zQdV84eZVAi6+pT0Bl9CjqHddddKYFrrxhmXCiLSnUkYOqwvmFdNaMIgVnjBkM1oNBQ9OKrgywpKy8TY64aASXERZiqTcH4maPQKTf8I2Cri4+34crZo6CEOGAIAeT3zMLAYd0MTmZN108vDKvkOhLicOmE/gYmsq4544eEddWMlBKzx8b2ez9R61hEyLJm//Tq0O8joumY9dAMgxNZ1/Q5o6Da1JDujyElcMOdF8fMe/NMuag/MtIcIRU3IYDrpg9Hgj3614gAwLCCXAzqlh3SrIgiBCYN7oW8TmnGB6OowiJCljV4wgDc+z+3h/S19z1zJy4Y18/YQBaWnZuOX/3PjQBE0IVixo2jMeXqwsgEsyC7PQ5PPToLcTY1qDKiCIGRQ7vjrhvHRTCdtQgh8MydVyMj2RFUGVEVge5Z6fjtTZdHMB1FCxYRsrTrHpyOe/9yOyDQ6r0yVJsCCOC+/70TMx+Y1j4BLWTMxAH49V9uhmpToLZyxZGiNg4q1946Hvc8fFXMzIY06VPQGf/vt3OQ5LC3Wkaa9s34C3vhDw/PhC3G3qm4szMZL/9oDnLTU6G08joRZz/65mbhxftnIyUx+q/EovAJaeE3mAjmbYQpuu1evx/v/OUDfL54E4DGwUHXdSiKAqnrgBCYcN1oXPfgdAwcGzszIedy7FAZFr/6OT5+vwiBhgAUVYGuSyhCQKLxXYoLx/TCzFvHYfTFsbHW4XzKz9Ri4Yfb8N7yHajz+KCe3VdCNL7GNE1Hv17ZmDV9OC67eECrBS+a1dR7seDzHXhjbRHKa9ywKUrzDc8UIRDQdeRlOnHzhGGYNXYIEuJtJicmMwUzfrOIUIdy5mQVVr78GUoOnoKnth6OlER07dMFU+degoyc6L+kMhh1NfX49IMiHDlwCu46H+wJccjKceLSGcPQtXsns+NZis8fwKr1+7FrbylqPV7E2VRkpCXh0ov6oV+vHLPjWYqm61i75wjW7z8Gl8cLRQg4HQmYNLgXRvXOj7nZNTo3FhEiIiIyTTDjd+zOMxIREZHpWESIiIjINCwiREREZBoWESIiIjINiwgRERGZxtIXejdd0FNTU2NyEiIiImqrpnG7LRfmWrqI1NbWAgDy8/NNTkJERETBqq2thdPpbPExlr6PiK7rKC0tRUpKiuE3yampqUF+fj6OHz/Oe5S0gvuq7biv2o77qu24r4LD/dV2kdpXUkrU1tYiNzcXitLyKhBLz4goioK8vLyIPkdqaipfqG3EfdV23Fdtx33VdtxXweH+artI7KvWZkKacLEqERERmYZFhIiIiEwTs0XEbrfjscceg93Ot6luDfdV23FftR33VdtxXwWH+6vtrLCvLL1YlYiIiKJbzM6IEBERkflYRIiIiMg0LCJERERkGhYRIiIiMg2LCICrr74a3bp1Q0JCArp06YJbb70VpaWlZseynKNHj+Kuu+5CQUEBEhMT0atXLzz22GPw+/1mR7Ok3//+9xg3bhwcDgfS0tLMjmM5zz33HHr06IGEhASMHj0amzZtMjuS5axZswYzZsxAbm4uhBBYvHix2ZEs64knnsCFF16IlJQUdO7cGTNnzsT+/fvNjmVJf/vb3zBkyJDmm5iNHTsWS5cuNS0PiwiASZMmYcGCBdi/fz/eeecdHDp0CLNmzTI7luXs27cPuq7jhRdewO7du/GXv/wFzz//PH75y1+aHc2S/H4/Zs+ejXvvvdfsKJbz1ltv4aGHHsJjjz2Gbdu2YejQobj88stRVlZmdjRLcbvdGDp0KJ577jmzo1je6tWrcd9992Hjxo1YuXIlGhoaMHXqVLjdbrOjWU5eXh7++Mc/YuvWrdiyZQsmT56Ma665Brt37zYnkKTvWLJkiRRCSL/fb3YUy3vyySdlQUGB2TEsbd68edLpdJodw1JGjRol77vvvua/a5omc3Nz5RNPPGFiKmsDIBctWmR2jA6jrKxMApCrV682O0qHkJ6eLv/5z3+a8tycEfmWyspKvP766xg3bhzi4uLMjmN5LpcLGRkZZsegDsTv92Pr1q2YMmVK878pioIpU6Zgw4YNJiajaOJyuQCAv59aoWka3nzzTbjdbowdO9aUDCwiZ/3iF79AUlISMjMzUVxcjCVLlpgdyfIOHjyIZ599Fj/84Q/NjkIdSEVFBTRNQ3Z29jf+PTs7G6dOnTIpFUUTXdfx4IMPYvz48Rg0aJDZcSxp586dSE5Oht1uxz333INFixZh4MCBpmSJ2iLy8MMPQwjR4se+ffuaH//zn/8c27dvx4oVK6CqKm677TbIGLnpbLD7CgBKSkpwxRVXYPbs2bj77rtNSt7+QtlXRNS+7rvvPuzatQtvvvmm2VEsq1+/figqKsIXX3yBe++9F3PnzsWePXtMyRK1t3gvLy/HmTNnWnxMz549ER8f/51/P3HiBPLz87F+/XrTpqraU7D7qrS0FBMnTsSYMWPw0ksvQVGits9+Ryivq5deegkPPvggqqurI5yuY/D7/XA4HFi4cCFmzpzZ/O9z585FdXU1ZyPPQwiBRYsWfWOf0Xfdf//9WLJkCdasWYOCggKz43QYU6ZMQa9evfDCCy+0+3Pb2v0Z20lWVhaysrJC+lpd1wEAPp/PyEiWFcy+KikpwaRJkzBixAjMmzcvpkoIEN7rihrFx8djxIgR+OSTT5oHVV3X8cknn+D+++83Nxx1WFJKPPDAA1i0aBE+++wzlpAg6bpu2pgXtUWkrb744gts3rwZF110EdLT03Ho0CH8+te/Rq9evWJiNiQYJSUlmDhxIrp3746nnnoK5eXlzZ/LyckxMZk1FRcXo7KyEsXFxdA0DUVFRQCA3r17Izk52dxwJnvooYcwd+5cjBw5EqNGjcIzzzwDt9uNO+64w+xollJXV4eDBw82//3IkSMoKipCRkYGunXrZmIy67nvvvswf/58LFmyBCkpKc3rjZxOJxITE01OZy2PPPIIpk2bhm7duqG2thbz58/HZ599huXLl5sTyJRrdSxkx44dctKkSTIjI0Pa7XbZo0cPec8998gTJ06YHc1y5s2bJwGc84O+a+7cuefcV6tWrTI7miU8++yzslu3bjI+Pl6OGjVKbty40exIlrNq1apzvobmzp1rdjTLOd/vpnnz5pkdzXLuvPNO2b17dxkfHy+zsrLkpZdeKlesWGFanqhdI0JERETWF1sn+ImIiMhSWESIiIjINCwiREREZBoWESIiIjINiwgRERGZhkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyzf8HSkww4J2r1zoAAAAASUVORK5CYII=", "text/plain": [ "<Figure size 640x480 with 1 Axes>" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "plotPinPow(multiPinBlock)" ] }, { "cell_type": "code", "execution_count": 29, "id": "aec706b3", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,\n", " 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29], dtype=uint16)" ] }, "execution_count": 29, "metadata": {}, "output_type": "execute_result" } ], "source": [ "primaryFuel: Circle = multiPinBlock.getComponent(Flags.PRIMARY)\n", "primaryFuel.getPinIndices()" ] }, { "cell_type": "markdown", "id": "5c98dcb0", "metadata": {}, "source": [ "The ordering is worth discussing. `primaryFuel.getPinIndices()` being sequential `[0, 29]` would imply, at first, that all the `primaryFuel` pins reside in some sequence adjacent to each other. However, the lattice map has `primaryFuel` in the center of the block, and then in the second and third full rings. This ordering is still consistent with `Block.getPinLocations` and is a side-effect of\n", "\n", "1. How the hexagonal ascii maps are processed,\n", "2. How pin locations are discovered within a block,\n", " - For each clad component, extend it's spatial locators\n", "\n", "We can see the first \"pin\" location in our block is not the center, but the north west pin in the block." ] }, { "cell_type": "code", "execution_count": 30, "id": "34f9f9ce", "metadata": {}, "outputs": [], "source": [ "assert multiPinBlock.getPinLocations()[0].getRingPos() == (4, 4)" ] }, { "cell_type": "code", "execution_count": 31, "id": "388c2c64", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([30, 31, 32, 33, 34, 35, 36], dtype=uint16)" ] }, "execution_count": 31, "metadata": {}, "output_type": "execute_result" } ], "source": [ "secondaryFuel: Circle = multiPinBlock.getComponent(Flags.SECONDARY)\n", "secondaryFuel.getPinIndices()" ] }, { "cell_type": "markdown", "id": "99bff8cd", "metadata": {}, "source": [ "The component level pin plotter shows that we can still collect the same power profile by connecting\n", "\n", "1. `Block.getPinLocations`\n", "2. `Block.p.linPowByPin`\n", "3. `Circle.getPinIndices`" ] }, { "cell_type": "code", "execution_count": 32, "id": "125a9793", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "<matplotlib.legend.Legend at 0x1baf72d34d0>" ] }, "execution_count": 32, "metadata": {}, "output_type": "execute_result" }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiIAAAGdCAYAAAAvwBgXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAOQNJREFUeJzt3Xl4VOXd//HPmUkme8IWCIEAYVFAFimKIlVRKC51QSpYWisguP3ASuljC60bj1ZsxaXlse4GulC1VMANRSloKwqyRERwARMSwhZISCB7Zs7vDyQaEUgyZ3KfzLxf1zVXneTM93xzd8J8cp/7nGPZtm0LAADAAI/pBgAAQOQiiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwJsp0AycSCAS0a9cuJSUlybIs0+0AAIAGsG1bhw4dUnp6ujyeE895uDqI7Nq1SxkZGabbAAAATZCfn6/OnTufcBtXB5GkpCRJR36Q5ORkw90AAICGKC0tVUZGRt3n+Im4OogcPRyTnJxMEAEAoIVpyLIKFqsCAABjCCIAAMAYgggAADDG1WtEAAAtl23bqq2tld/vN90KQiA6OlperzfoOgQRuNaOwwdUVlsVVI2EqBh1TWzrUEfutadyjyr9lUHViPXGKi02zaGO3Otw9Q7V2OVB1Yi24pXo6+pQR+5l1+ZKdlmTXltdY2v33lpVVHgkK9rZxuAKlmWpc+fOSkxMDKoOQQSutOPwAV26Yp4jtV4fcWtYh5E9lXs06+NZjtSa039OWIeRw9U7tDx/tCO1RmUsCeswYtfmyt4/qkmvDdhe5Rz4X0X5MpSeFqfo2HR5PDEOdwiTbNtWYWGhdu7cqV69egU1M0IQgSsFOxMSqlpuFOxMSKhquVGwMyGhquVKTZwJkaRqf6oCaqWOaXGKj/NIXp8sT6yDzcENUlNTlZubq5qaGoIIAMBFbI8kS54G3pqDw7Atk1O3XiGIAACM4TAsOH0XAGBMOByGzc3NlWVZys7ONrL/lo4ZEQAAgpCRkaHdu3erXbt2pltpkZgRAQCgiaqrq+X1epWWlqaoqOb/276mpqbZ9+k0gggAAF8ZPny4pk2bpmnTpiklJUXt2rXTnXfeKdu2JUndunXTvffeq+uuu07Jycm68cYbjzk0s2rVKlmWpTfffFODBg1SXFycLrzwQu3bt0/Lli1Tnz59lJycrJ/85CcqL//67Ks33nhD3//+99WqVSu1bdtWl112mbZv3173/aP7eeGFF3T++ecrNjZWTz31lJKTk7Vo0aJ6P8eSJUuUkJCgQ4cOhX7QgkQQAQDgGxYsWKCoqCitXbtWf/zjH/Xwww/rmWeeqfv+3LlzNXDgQG3cuFF33nnncevcc889+r//+z+tXr1a+fn5GjdunB599FEtXLhQr732mpYvX655875eqFtWVqYZM2Zo3bp1WrFihTwej6666ioFAoF6dWfOnKnbbrtNW7du1ZgxY/TjH/9YWVlZ9bbJysrS1VdfraSkJIdGJXRYIwIAwDdkZGTokUcekWVZOvXUU/Xxxx/rkUce0Q033CBJuvDCC/XLX/6ybvvc3NzvrHPfffdp2LBhkqTJkydr1qxZ2r59u7p37y5Juvrqq7Vy5Ur9+te/liT96Ec/qvf65557TqmpqdqyZYv69etX9/Xp06drzJgxdc+nTJmic845R7t371bHjh21b98+vf7663r77beDH4xmwIwIAADfcPbZZ9e7RsbQoUP1xRdf1N0z54wzzmhQnQEDBtT9d4cOHRQfH18XQo5+bd++fXXPv/jiC40fP17du3dXcnKyunXrJknKy8urV/fb+x8yZIhOO+00LViwQJL0t7/9TV27dtV5553XoD5NI4gAANAICQkJDdouOvrre+xYllXv+dGvffOwy+WXX66ioiI9/fTTWrNmjdasWSPpyILYk+1/ypQpmj9/vqQjh2UmTZrk2AXHQo0gAgDANxwNAEd98MEHQd9P5WQOHDigzz77THfccYdGjBihPn36qLi4uMGvv/baa7Vjxw796U9/0pYtWzRhwoSQ9eo01ogAAPANeXl5mjFjhm666SZt2LBB8+bN00MPPRTSfbZu3Vpt27bVU089pY4dOyovL08zZ85s1OvHjBmj22+/XaNGjVLnzp1D2K2zmBEBAOAbrrvuOlVUVGjIkCGaOnWqbrvtNt14440h3afH49Hzzz+v9evXq1+/fvrFL36hBx98sFE1Jk+erOrqal1//fUh6jI0mBEBAOAboqOj9eijj+rxxx8/5nvfdYZMt27d6q4zIh25Fsk3n0vSxIkTNXHixHpfu+eee3TPPffUPR85cqS2bNlSb5tv1vn2fr6toKBAbdu21ZVXXnncbdyIIAIAQAtWXl6u3bt364EHHtBNN90kn89nuqVG4dAMXCkhKsaVtdwo1hvrylpuFG3Fu7KWK1kNOzOkYbX4qAmlP/zhD+rdu7fS0tI0a9Ys0+00mmWfaJ7HsNLSUqWkpKikpETJycmm20Ez23H4QNB300yIiomI24LvqdyjSn9lUDVivbFKi01zqCP3Oly9QzV2+ck3PIFoK16Jvq4OdeRedm2uZJc1+nWVlQHl5tvq1q2LYuPiZFnH/2Ngy8FdGvvOU0F0+bV/nn+j+rZKd6QWTq6yslI5OTnKzMxUbGz9P2Ia8/nNoRm4ViQECKdEQoBwSiQECKdYUd2a9jp/pWTlyPLEnjCEABKHZgAABnEYFsyIAACM6ZrYVq+PuJXDsBGMIAIAMIoAEdkIIgAA1zlYWaFFX3yij/bv0aGqSsVERalDfKKu6tlXp6d2bDH3UcHJEUQAAK7xefF+PbXpQy3ZvkW1gcCRG8PZtixJXsujBVs2qk+bVF3fb7Cu7tVPHgJJi8diVQCAK6zI267LlvxFL237RDWBgGxJga+uMGFLqrWP3Kn2s6JC3f7uG5q64mVV1taaa9gFVq1aJcuydPDgQdOtNBlBBABg3Ls7czTlrcWq9vvlP8nlrQJf/e8buV9o2r9flj8QOOH2cDeCCADAqP0VZbrx7SWSbasxV9gMyNZbedv1xKa1oWotIlRXVxvdP0EEAGDUC599rKpav5o6r/Hs5nWqCfgd6WXRokXq37+/4uLi1LZtW40cOVJlZUeuLvvMM8+oT58+io2NVe/evfXnP/+53mt37typ8ePHq02bNkpISNAZZ5yhNWvW1H3/8ccfV48ePeTz+XTqqafqr3/9a73XW5alZ555RldddZXi4+PVq1cvvfzyy/W2ef3113XKKacoLi5OF1xwwTE34Ttw4IDGjx+vTp06KT4+Xv3799c//vGPetsMHz5c06ZN0/Tp09WuXTtddNFFuv7663XZZZfV266mpkbt27fXs88+26SxbCiCCADAGH8goAVbNirQqLmQ+g5UVmj5jm1B97J7926NHz9e119/vbZu3apVq1ZpzJgxsm1bf//733XXXXfpd7/7nbZu3ar7779fd955pxYsWCBJOnz4sM4//3wVFBTo5Zdf1kcffaRf/epXCnx12Gjx4sW67bbb9Mtf/lKbN2/WTTfdpEmTJmnlypX1epg9e7bGjRunTZs26dJLL9VPf/pTFRUVSZLy8/M1ZswYXX755crOztaUKVM0c+bMeq+vrKzU4MGD9dprr2nz5s268cYb9bOf/Uxr19afNVqwYIF8Pp/ee+89PfHEE5oyZYreeOMN7d69u26bV199VeXl5brmmmuCHtsT4V4zAABHnegeJN/2zs4cXffGoqD257Usnd0xQwsvDe4Dc8OGDRo8eLByc3PVtWv9WwH07NlT9957r8aPH1/3tfvuu0+vv/66Vq9eraeeekr/8z//o9zcXLVp0+aY2sOGDdNpp52mp576+r4648aNU1lZmV577TVJR2ZE7rjjDt17772SpLKyMiUmJmrZsmW6+OKL9Zvf/EZLly7VJ598Uldj5syZ+v3vf6/i4mK1atXqO3+uyy67TL1799bcuXMlHZkRKS0t1YYNG+ptd9ppp2nChAn61a9+JUm64oor1LZtW2VlZX1nXafuNcOMCADAmNzSYgV7Aq7ftpVTUhx0LwMHDtSIESPUv39/jR07Vk8//bSKi4tVVlam7du3a/LkyUpMTKx73Hfffdq+fbskKTs7W4MGDfrOECJJW7du1bBhw+p9bdiwYdq6dWu9rw0YMKDuvxMSEpScnKx9+/bV1TjrrLPqbT906NB6z/1+v+699171799fbdq0UWJiot58803l5eXV227w4MHH9DhlypS60LF3714tW7ZM119//XHHyylcRwQAYEx5TY08lnXSM2VOpqy2JuhevF6v3nrrLa1evVrLly/XvHnz9Nvf/lavvPKKJOnpp58+Jgh4vV5JUlxcXND7l6To6Oh6zy3Lqju80xAPPvig/vjHP+rRRx9V//79lZCQoOnTpx+zIDUhIeGY11533XWaOXOm3n//fa1evVqZmZk699xzm/aDNAIzIgAAYxKifXXXCglGYrTPgW6OfPAPGzZMs2fP1saNG+vWUaSnp+vLL79Uz5496z0yMzMlHZnJyM7OrlvP8W19+vTRe++9V+9r7733nvr27dvg3vr06XPMWo8PPvjgmJpXXnmlrr32Wg0cOFDdu3fX559/3qD6bdu21ejRo5WVlaX58+dr0qRJDe4tGMyIAACM6Z7SJohlqkd4LUs9W333IZHGWLNmjVasWKFRo0apffv2WrNmjQoLC9WnTx/Nnj1bP//5z5WSkqKLL75YVVVVWrdunYqLizVjxgyNHz9e999/v0aPHq05c+aoY8eO2rhxo9LT0zV06FDdfvvtGjdunAYNGqSRI0fqlVde0UsvvaS33367wf3dfPPNeuihh3T77bdrypQpWr9+vebPn19vm169emnRokVavXq1WrdurYcfflh79+5tcOCZMmWKLrvsMvn9fk2YMKExw9dkzIgAAIw5J72L0hOSgqrht21d2+f0oHtJTk7Wu+++q0svvVSnnHKK7rjjDj300EO65JJLNGXKFD3zzDPKyspS//79df7552v+/Pl1MyI+n0/Lly9X+/btdemll6p///564IEH6g7djB49Wn/84x81d+5cnXbaaXryySeVlZWl4cOHN7i/Ll266F//+peWLFmigQMH6oknntD9999fb5s77rhD3/ve93TRRRdp+PDhSktL0+jRoxu8j5EjR6pjx4666KKLlJ6e3uDXBYOzZgAAjmrMWTOS9OSmtZqz9p0mz4y0j0vQ++NvVpSHv62DdfjwYXXq1ElZWVkaM2bMCbflrBkAQFgYd0p/JUb7mnwDuxsHnEkICVIgENC+fft07733qlWrVrriiiuabd/8PwcAMKp1bJyeu+hH8siSpxEn81qSLu/eW5P7nRG65iJEXl6eOnTooIULF+q5555TVFTzLSFlsSoAwLghaZ31l0uu1pTli1Xlrz3h6bwey1LAtnV1r36ac+6oJs+k4GvdunWTqZUazIgAAFxhWHpXvTlmoq7rO0jxUUeupxFleeSxLHm/ekjS4Pbp+vOFV+jB8y5WtMdrsmU4gBkRAIBrdElupXuGjtDtZ5yrl7dv1ab9e1RaVaXYqCh1iE/U6J59dUrrdqbbhIMIIgCAkAhmqj8h2qfxvQdqvAY62BGc5NShnJAempkzZ47OPPNMJSUlqX379ho9erQ+++yzUO4SAGDY0cuUl5eXG+4EoXT0svFHr5XSVCGdEXnnnXc0depUnXnmmaqtrdVvfvMbjRo1Slu2bPnO69wDAFo+r9erVq1a1d2sLT4+XhYLSsNKIBBQYWGh4uPjgz7DplkvaFZYWKj27dvrnXfe0XnnnXfS7bmgGQC0TLZta8+ePTp48KDpVhAiHo9HmZmZ8vmOvc9PYz6/m3WNSElJiSQd9zbJVVVVqqqqqnteWlraLH0BAJxlWZY6duyo9u3bq6Ym+Dvjwn18Pp88DlxIrtmCSCAQ0PTp0zVs2DD169fvO7eZM2eOZs+e3VwtAQBCzOv1Br2GAOGt2Q7N3HLLLVq2bJn++9//qnPnzt+5zXfNiGRkZHBoBgCAFsR1h2amTZumV199Ve++++5xQ4gkxcTEKCYmpjlaAgAALhDSIGLbtm699VYtXrxYq1atqrtdMgAAgBTiIDJ16lQtXLhQS5cuVVJSkvbs2SNJSklJUVxcXCh3DQAAWoCQrhE53nnjWVlZmjhx4klfz+m7AAC0PK5ZI2LqTn4AAKBl4O67AADAGG56B+UWFevwV/cMaKpEn0/d2rR2qCN3yyktUlltcOOVEOVTZvJ3X9gvnBRU7FNFbdXJNzyBuKgYdYpr71BH7lVcvVM1gYqgakR74tTad/wzE8NFTe2XsgOHg6pheRIVHdXdoY4QDIJIhMstKtbIp+c7UuvtGyaGfRjJKS3SiFefcKTWistuDuswUlCxTzd+eJ8jtZ46846wDiPF1Tv1ly8nOlLruu7zwzqM1NR+qT17hzlSK63De4QRF+DQTIQLdiYkVLXcKtiZkFDVcqNgZ0JCVcuNgp0JCVUtNwp2JiRUtdB0BBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRCJcos/nylpulRDl3M/oZC03iouKcWUtN4r2xLmylhtZnkRX1kLTWbZt26abOJ7S0lKlpKSopKREycnJptsJW7lFxTpcXR1UjUSfT93atHaoI3fLKS1SWW1w45UQ5VNmchuHOnKvgop9qqitCqpGXFSMOsW1d6gj9yqu3qmaQEVQNaI9cWrt6+xQR+5VU/ul7MDhoGpYnkRFR3V3qCN8W2M+v6OaqSe4WKQECKdEQoBwSiQECKdEQoBwCgEivHBoBgAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGBMSIPIu+++q8svv1zp6emyLEtLliwJ5e4AAEALE9IgUlZWpoEDB+qxxx4L5W4AAEALFRXK4pdccokuueSSUO4CAAC0YCENIo1VVVWlqqqquuelpaUGuwmNvD3FKq+sDqpGfKxPXdJaO9SRe+UeKFZZVXBjlRDjU7e24T9W36WwvEz//GKzthYV6lBVpeKio9UxIUlX9+qnvm3bm27PVQ7XVuqNXdnaVJynQ7UVivZ41caXqFEdB2hQ60xZlmW6RSMKq3aryl8RVI0Yb5xSYzo61JG7VdTkyB8oC6qG15OguOhMhzpqGVwVRObMmaPZs2ebbiNk8vYU6+qZWY7UWvTApLAOI7kHinXxn+Y7UuuNn0+MqDDy8f69enLTWr2e85lsW5IlBWxbliSP5dGzm9fr9NSOmtLvDF3W/dSI/ZCVpILyIv0t5z96tWCDqgM1smQpoK/H6qX8teqa0E7XdD1HozufqSiP13TLzaaward+/+l0R2r9uvejYR9GKmpytHHXCEdqDUpfEVFhxFVnzcyaNUslJSV1j/z8fNMtOSrYmZBQ1XKjYGdCQlXL7ZZs26Irl/5Vr+d8Lr9tKyBbAduWJNmS/HZAkrRp/x5NW/mKZv13uWoDAYMdm7OhKEc/fW+eluz8UFWBGtmSAjp2rPLK9uvBLS9r+voFKqutOn7BMBPsTEioarlVsDMhoarVErgqiMTExCg5ObneA0DDvPblZ7pt1Wvy23bdh+jxHA0nz3+2SbP++6bsr55His0H83XruudU6a8+6VjZXz3WHdiuX6xfoOpAbbP0CEQKVwURAE2zo7RYt616VY09yGJLevHzzXr+s02haMuVKmqr9Yv1C+QPBOpmQBoiIFsfFe/Q458vD2F3QOQJaRA5fPiwsrOzlZ2dLUnKyclRdna28vLyQrlbIOL8dWu2ArbdiI/Vr1mSntz0YcTMiryxO1slNeWNCiFH2bL1r7w1Ko+gQzRAqIU0iKxbt06DBg3SoEGDJEkzZszQoEGDdNddd4Vyt0BEqayt0T8+3SR/E4OELSmntFgf7A6vNVnfxbZtPb9jdaNnjr6pMlCjN3ZlO9USEPFCetbM8OHDI+avLMCUN3K/0OGa4Bbkei1L//hsk4amd3GoK3faWlqgnMP7gqphSfpX/hqN6XKWM00BEY41IkALt6P0oKKs4H6V/bat7QeLHOrIvQrKg/8ZbYfqADiCIAK0cGU11XLiUiCHa8J/3UO535lTuSv9NY7UAUAQAVq8hGifnDgCmuSLCb6Iy8V7fY7UiXWoDgCCCNDidU9po9qTXAvjZLyWpV6t2jrUkXt1TWgXdA1Llro4UAfAEQQRoIUb1a2nkoOczfDbtn7Se6BDHbnXKcnpOiWpozxBnDdjy9bVLFQFHEMQAVq4GG+Uftp7oLxNXChiSerZqo3O6NDJ2cZc6pqu5zTpGiJHxXt9GtVxgIMdAZGNIAKEgWv7nK4oT9P+zrcl3TLgrIi5+d0POg5QG19ik2ZFLEnjug5ljQjgIIIIEAY6J6XozxdeIUmN+ni1JF3X53T9qNdpIenLjWK90frTGRMV7YlqVBjxyNKQtj11Y8+RIewOiDwEESBMjOzaU0+OHK0oj+ekh2mOfv/6foN1z9ARETMbctQpyel6YsgUJUbHnnSsrK/Cyrnte+vB712rKI+3OVoEIkZIr6wKoHld1K2XXr9qgp7bvF4vbftE1X6/vJZHAdv+KmzY8tu2hnbsouv7DdaILj1Mt2zMaa0ytHDYz/XCjve1OH+tDtdWKsryyG/bR/5Csyz57YBOTU7XNV2H6uL00+UN8sJxAI5FEGlG8bHOHVd2spYbJcQ49/M5WaslOKV1Oz1w7kWaNeR8Ld62RZ8WFepQdZXioqLVMTFJY3qepsyU1qbbdIX2sSm69dSLdWPPEVqxZ7M+OrhDh2sqFOWJUtuYRP0gbYD6pETGIt5vi/HGubKWW3k9Ca6s1RJYtotvBlNaWqqUlBSVlJQoOTnZdDuOyNtTrPLK4K7uGB/rU5e08P8gyT1QrLKq4MYqIcanbm3Df6yAUCis2q0qf0VQNWK8cUqN6ehQR+5WUZMjf6AsqBpeT4LiojMd6sicxnx+MyPSzCIhQDiFAAGYFSkBwinhECBM4IAnAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAY6JMNxAqBTsOqLy8Kqga8fEx6tS1rUMduVf+7mKVV1QHVSM+zqeMjq0d6qhl8QcC+s/WHK3c/KWKy8oVsKWU+BidfUpXjRrQSzHRYftrdlI5B4tVVh3ceyvB51Nmq8h8b31WsleLd3yk3RUlqqitUWJ0jE5Jbq8fdRuk1NhE0+0Zs6dyryr8lUHViPPGKi22g0MduVdpdZ5qA+VB1YjyxCvZ18Whjo5l2bZth6x6kEpLS5WSkqKSkhIlJyc3+HUFOw5o8pV/dKSHZ5feFtZhJH93sX5867OO1Hp+3uSICiPlVdVa+N9sLfxvtgpLy+T1WPIHjvw6Hf3vpLgYjRs6QD8773tqmxRvuOPmlXOwWCP+8pwjtVZcd33EhBHbtvVmwVbN3/aBsot2ymt5FLADsiV5ZB3ZyJIuSu+j6085R/1bpxvtt7ntqdyrX350hyO1Hhp4X1iHkdLqPL2aN9aRWpd1+WejwkhjPr/D8k+1YGdCQlXLjYKdCQlVLbcrLD2sm55arO17DijwVZY/GkK++d+HKqo0f+U6vbxui56+6UfqkRa+ofbbgp0JCVUtN/PbAd2bvUzP56yXx7LqvnbUkTgiyZaW7/pUb+7aqvu/d4VGdx1ool0jgp0JCVUtNwp2JiRUtb6NNSJAI5WUV2rSY//Ul3u/DiEn4rdtFR0u14THXtTOAyXN0CFaItu2NXvj63ohZ70knfS95bcDCti2Zq5fqlfzNzdHi0BIEESARrrz+Te1s6ik3gzIyfgDtg5XVmnas0vk4qOhMGhx3kd6MXeDGvvusCTNXLdEOYcOhKItIOSaJYg89thj6tatm2JjY3XWWWdp7dq1zbFbwHH5+w9q1SdfNiqEHOUP2Nq+t0hrvsgPQWdoyWzb1rOfrz66AqRxr5Vky9bCLz90ui2gWYQ8iLzwwguaMWOG7r77bm3YsEEDBw7URRddpH379oV614Dj/vn+Jnk8Tfm4OMLrsfSP97KdawhhYcOBfG0/tL/RsyFH+W1bi3I3qrw2MtbSILyEPIg8/PDDuuGGGzRp0iT17dtXTzzxhOLj4/Xcc86spgeaSyBga9EHm5s0G3KUP2Br1Sdf6sCh0C38Qsvzz9wN8lrB/XNc4a/RGwVbHOoIaD4hDSLV1dVav369Ro4c+fUOPR6NHDlS77///jHbV1VVqbS0tN4DcItDlVU6VBn8WVQB29buYt7b+NqXhw7UOzumKaIsj3aWFTvUEdB8QhpE9u/fL7/frw4d6p+n3aFDB+3Zs+eY7efMmaOUlJS6R0ZGRijbAxqlvMrB01EdrIWWr6zWmcsElHFoBi2Qq86amTVrlkpKSuoe+fks6oN7xMf4HKuVGOtcLbR8idGxjtRJiOJ9hZYnpBc0a9eunbxer/bu3Vvv63v37lVaWtox28fExCgmJiaULQFNlhQbo5T4WJWUB3cRJI9lKb11ikNdIRz0TGqnzcW7gjo8U2sH1C0xci6Yh/AR0hkRn8+nwYMHa8WKFXVfCwQCWrFihYYOHRrKXQOO83gsjRs6oO6Kl03h9Vj6wYBeap0Y52BnaOnGZX4v6DUiCVE+jerUx6GOgOYT8kMzM2bM0NNPP60FCxZo69atuuWWW1RWVqZJkyaFeteA464+u39QFyTzB2xdMyxyLseNhhnQupNOSW7fpOuISJLXsjQu83uK9UY72hfQHEIeRK655hrNnTtXd911l04//XRlZ2frjTfeOGYBK9ASpLdJ1g8G9mrSrIjXY6l3eqrO6N4pBJ2hJbMsSzecOqxJ1xGxJHksj37S/Uyn2wKaRbMsVp02bZp27NihqqoqrVmzRmeddVZz7BYIif8dN0o9OrSRtxEXNvN6LLVKiNO8yVfKCuLQDsLX5Rn9NaFn4/5tPPpOevjMHykjITLuTozw46qzZoCWICHWp2dvGas+nY9MpZ8sVngsS2mtkvSXadcorVVSc7SIFurX/Udpcq8j6+e8JwmsXssjr+XRI2ddrR906t0c7QEhEdKzZoBw1ToxTvP/3zgt+uBjLfxvtvL2H1SUx1N3x1SPZak2EFC75ASNHzZQ15wzUCnxzpyiifDlsSzd3v8HOiu1m/6yba3e27f9q8OAlgK2La9lyW8HFO3x6oqMAZrQ6yz1Sm5vum0gKGEZROLjnTsF2MlabhQf59x1B5ys1RLEREfpp+cO0k++f7rWbd+pf2/eruKyCgVsWynxsTq7Vxed37e7oryROfGY4HPu/eBkrZbgvLReOi+tl/IOF2lJ3ibtLi9Rhb9GSdGxOiWlva7I6K8UX2SeeRXndS7QO1nLjaI88a6s9W2W7eJ7kpeWliolJUUlJSVKTk5u1GsLdhxQeXlwVyuMj49Rp67hf15+/u5ilVcEd0XG+DifMjpyjBr15RwsVll1cO+tBJ9Pma14b+Freyr3qsIf3PV84ryxSosN/5MmSqvzVBsI7t5WUZ54Jfu6NG6/jfj8DssZEUkRESCcQoBAqBAgEAqRECCc0tgAYUJkzhkDAABXIIgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAmCjTDUSagpxCVZRVBVUjLiFGnTJTHerIvXYWFKm8vDqoGvHxPnXu1MahjlqWA6Vlevn9T/TFzv06VFGluJhodWidpMvP7qtTOof/+6cxDldVaenmT7Vx5y6VVFbJ5/WqXUK8ftj3VJ3ZpZMsyzLdohE5pUUqqwnudzAh2qfM5Mj4HdxZXqgKf2VQNeK8seocH1m/n5Zt27bpJo6ntLRUKSkpKikpUXJysul2glaQU6gpF85xpNYz/54V1mFkZ0GRrp38tCO1/vbsDREVRrbm7dVf3lqntzd8oYAtWZICti3LkjyWJX/AVr9uafrpiO9p1OBTIvZDVpLyig/quTXr9dKmLaqqrZVlWUfGSpLH45E/EFBmm9aacOYgjT29n6K9XtMtN5uc0iJdsPQpR2qtvPLGsA8jO8sLNWnt/Y7UyhrymxYfRhrz+c2MSDMKdiYkVLXcKNiZkFDVcrtlaz/VXQvekCT5A0f+xjj6l4ZtS/6v/u7YsmOvZj37utZ+mqdZ40coyht5R2k/zNupG19cosqa2rpxOfp3mS3JHwhIknKLijX7zX/rrc+3ad6Yy5UY4zPVcrMKdiYkVLXcKtiZkFDVagki718fIEy9tf5z/TZrmfwBuy6EHE/gqw/cJas3676/vy0XT4yGxEcFuzXpHy+p4hsh5Hjsrx7v5+brxheXqLq2tll6BCIFQQQIA/mFB/XbrGVq7EEW25Zefv8TLX5vc0j6cqPy6hrd8OIS1QYCdYGsIQK2rQ07d+nhd1aHsDsg8hBEgDCw6N1Nsm1bTZnXsCT95a11ETMr8vInW3WworJRIeSogG1r4YaPVFYd/ocagOZCEAFauMrqWr30349PejjmeGxJefsOav0XO51tzIVs29ZfPtzY6Jmjb6qsqdUrmz91rCcg0hFEgBZuZfY2lVUG9xe612Pppf987FBH7rV5z15t21/UpJmjoyxJCzd85FRLQMQjiAAtXH7hwaDPevEHbO3YV+xQR+6VV1wSdA1bUt7B4OsAOIIgArRw5VXOrFc4XBH+6x7Kq2scqVNZw5kzgFMIIkALFx/jU1DHGr6SGBf+18eI90U7Uicu2pk6AAgiQIvXtUNr1X518a2m8nosde/Y1qGO3Cuzbeuga1iSurVpFXQdAEcQRIAW7oKBPZQUFxNUDX/A1o/O7e9QR+7Vt0N79emQKk8Qp83Ykn46eKBjPQGRjiACtHC+6Cj96Nz+8jTx09WypMy0NhrYPd3hztzpujMGqYlnOkuSEnzR+mHfU51rCIhwIQsiv/vd73TOOecoPj5erVq1CtVuAEi6+ryBivJ4mnR9DNuWJo46I2JufvfDvqeqXUK8vE34eS1J155xOmtEAAeFLIhUV1dr7NixuuWWW0K1CwBfSW+brN/f8EPJOjLD0Rjjzh+oy87uG5rGXCg2OkrP/PgqRXu98jRisDyWpXMyu+jn5w4NYXdA5AlZEJk9e7Z+8YtfqH//8D/uDLjB+QN6aO6NlyvK45H3JIdpjn7/pyO+p9vHDY+Y2ZCj+nZor79eO1ZJMb6TzowcDSsX9uquP199haK93uZoEYgYUaYb+KaqqipVVX19e/vS0lKD3QAtzwWn99Q/fnut/v7vDXrtg62qqfXL6/EoYNt1MyX+gK0zTsnQTy4cpHP7dzfbsEED09P0yg3X6a8fbtTzGz/WoaoqRR0dK0myLPkDAfXtkKqfnTlIV5zWW14Py+oAp7kqiMyZM0ezZ8823UbIxCUEd2ZDqGq5UXy8c9e0cLJWS9C9Y1vd+dMfaPpV5+q1tZ/qi4JCHa6oVqwvSmmtk/TDs/qoS/vgT2MNB2lJibr9wnP18/OGatnWz7Vh5y6VVlYp2utVamK8Lu1zqvp17GC6TSMSop37vXGyllvFeWNdWaslsOxG3HJz5syZ+v3vf3/CbbZu3arevXvXPZ8/f76mT5+ugwcPnrT+d82IZGRkqKSkRMnJyQ1t09UKcgpVUVZ18g1PIC4hRp0yUx3qyL12FhSpvDy4q33Gx/vUuVMbhzoCIktOaZHKaoL7HUyI9ikzOTJ+B3eWF6rCXxlUjThvrDrHt/x/30tLS5WSktKgz+9GzYj88pe/1MSJE0+4TffuTZ/qjYmJUUxMeP+lHwkBwikECMCsSAkQTgmHAGFCo4JIamqqUlMZaAAA4IyQrRHJy8tTUVGR8vLy5Pf7lZ2dLUnq2bOnEhMTQ7VbAADQgoQsiNx1111asGBB3fNBgwZJklauXKnhw4eHarcAAKAFadRi1ebWmMUuAADAHRrz+c1J8QAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMiTLdAMwr2L5X5Ycrg6oRnxirTj06ONSRu+3ML1JFeVVQNeLiY9Q5o41DHblX3p5ilVdWB1UjPtanLmmtHerIvXL3F6u8Osix8vnUrV34j1VOSbHKghyrBJ9PmSnhP1YtAUEkwhVs36vJZ97hSK1nP7wv7MPIzvwiTbrmz47Uynrh/4V1GMnbU6yxt2c5UuufD04K6zCSu79Ylz4635Far0+fGNZhJKekWBc8/6wjtVb+eDJhxAU4NBPhgp0JCVUttwp2JiRUtdwo2JmQUNVyo2BnQkJVy42CnQkJVS00HUEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBJEIF58Y68pabhUXH+PKWm4UH+tzZS03ivc5OFYO1nKjBAd/Pidroeks27Zt000cT2lpqVJSUlRSUqLk5GTT7YStgu17VX64Mqga8Ymx6tSjg0MdudvO/CJVlFcFVSMuPkadM9o41JF75e0pVnlldVA14mN96pLW2qGO3Ct3f7HKq4McK59P3dqF/1jllBSrLMixSvD5lJkS/mNlSmM+vwkiAADAUY35/ObQDAAAMIYgAgAAjAlZEMnNzdXkyZOVmZmpuLg49ejRQ3fffbeqgzyuBwAAwkdUqAp/+umnCgQCevLJJ9WzZ09t3rxZN9xwg8rKyjR37txQ7RYAALQgzbpY9cEHH9Tjjz+uL7/8skHbs1gVAICWpzGf3yGbEfkuJSUlatPm+KcsVlVVqarq69MiS0tLm6MtAABgSLMtVt22bZvmzZunm2666bjbzJkzRykpKXWPjIyM5moPAAAY0OggMnPmTFmWdcLHp59+Wu81BQUFuvjiizV27FjdcMMNx609a9YslZSU1D3y8/Mb/xMBAIAWo9FrRAoLC3XgwIETbtO9e3f5vrp07q5duzR8+HCdffbZmj9/vjyehmcf1ogAANDyhHSNSGpqqlJTUxu0bUFBgS644AINHjxYWVlZjQohAAAg/IVssWpBQYGGDx+url27au7cuSosLKz7XlpaWqh2CwAAWpCQBZG33npL27Zt07Zt29S5c+d633Px7W0AAEAzCtmxkokTJ8q27e98AAAASNxrBgAAGEQQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMZEmW4AOJ6dX+xWxaGKoGrEJcWpc6+ODnXUshTvP6zlS9Yr5/M9KjtcqZhYn1LTUvSDKwep+6mROSaSlL+rWOUV1UHViI/zKSO9tUMdudeOwmKVVQY3VgmxPnVNDf+xQtMRROBKO7/YrUmn/tyRWlmf/SmiwsgXWwq0KOs/+s9bn8i2bVmSAgFblmXJ47G0+K/v6dT+nTXmumE676L+sizLdMvNJn9XsX4y9VlHai18bHJYh5EdhcW6/P75jtR65TcTCSM4LoIIXCnYmZBQ1XK7la99pAd/u+hI+PAHJEn2V9+zbVt+/5FnX3xSoDm3v6CNH2zXrXdcIW+U10zDzSzYmZBQ1XKjYGdCQlUL4YcgAoSJd9/8WL+f+WKDtg0EjgSSN19ap4A/oF/875iImhkB4B4sVgXCwK78A/rDzBelRmYJ25aWL9mgN/61LjSNAcBJEESAMPDaC2sVsO2vj8M0hiX9M+s/su2mvBgAgkMQAVq4qsoaLVv0oQL+JgYJW9qVd0Cb1uU42xgANABBBGjhVq/YovKyqqBqeL0eLVv0oUMdAUDDEUSAFm5X3gF5o4L7Vfb7A9qZu9+hjgCg4QgiQAtXUVEtq7GrVL9D+eHgZlUAoCkIIkALFxfnk92kVar1xSfGONANADQOQQRo4TpntpO/NhBUDa/Xo6492jvUEQA0HEEEaOGGXthXiUmxQdXw+wO6dOwQhzoCgIYjiAAtnM8XpUvHDpHH07R1IpYlZXRPVd/TuzjcGQCcHEEECAM/vGaIvFFeNeUq7bYtjbv+PC7xDsAIgggQBjqkt9ZvH/qxJKvRgeLyH5+lkVcMCk1jAHASBBEgTJw9vI/ufOQn8kZ55PWe+Ffb4z0SVq762TDdPPMyZkMAGMPdd4Ewcs6Ivnrsn9O05K/v6e1XslVbUyuP16NAwJbHsmRLCvgDGnhmd43+2Tk667zeplsGEOEIInCluKQ4V9ZqCbr2aK/b7rlKk2dcrH+/mq2cz/eo7HCVYmKjlZqWohGXn65OXduZbtOI+DifK2u5UUKscz+fk7UQfizbxbfcLC0tVUpKikpKSpScnGy6HTSznV/sVsWhiqBqxCXFqXOvjg51hHCQv6tY5RXVQdWIj/MpI721Qx25147CYpVVBjdWCbE+dU0N/7FCfY35/GZGBK5FgEAoREKAcAoBAs2BxaoAAMAYgggAADCGIAIAAIwhiAAAAGNcvVj16Ak9paWlhjsBAAANdfRzuyEn5ro6iBw6dEiSlJGRYbgTAADQWIcOHVJKSsoJt3H1dUQCgYB27dqlpKQkxy9BXVpaqoyMDOXn53ONkpNgrBqOsWo4xqrhGKvGYbwaLlRjZdu2Dh06pPT0dHk8J14F4uoZEY/Ho86dO4d0H8nJybxRG4ixajjGquEYq4ZjrBqH8Wq4UIzVyWZCjmKxKgAAMIYgAgAAjInYIBITE6O7775bMTExpltxPcaq4RirhmOsGo6xahzGq+HcMFauXqwKAADCW8TOiAAAAPMIIgAAwBiCCAAAMIYgAgAAjCGISLriiivUpUsXxcbGqmPHjvrZz36mXbt2mW7LdXJzczV58mRlZmYqLi5OPXr00N13363q6mrTrbnS7373O51zzjmKj49Xq1atTLfjOo899pi6deum2NhYnXXWWVq7dq3pllzn3Xff1eWXX6709HRZlqUlS5aYbsm15syZozPPPFNJSUlq3769Ro8erc8++8x0W670+OOPa8CAAXUXMRs6dKiWLVtmrB+CiKQLLrhAL774oj777DP961//0vbt23X11Vebbst1Pv30UwUCAT355JP65JNP9Mgjj+iJJ57Qb37zG9OtuVJ1dbXGjh2rW265xXQrrvPCCy9oxowZuvvuu7VhwwYNHDhQF110kfbt22e6NVcpKyvTwIED9dhjj5luxfXeeecdTZ06VR988IHeeust1dTUaNSoUSorKzPdmut07txZDzzwgNavX69169bpwgsv1JVXXqlPPvnETEM2jrF06VLbsiy7urradCuu94c//MHOzMw03YarZWVl2SkpKabbcJUhQ4bYU6dOrXvu9/vt9PR0e86cOQa7cjdJ9uLFi0230WLs27fPlmS/8847pltpEVq3bm0/88wzRvbNjMi3FBUV6e9//7vOOeccRUdHm27H9UpKStSmTRvTbaAFqa6u1vr16zVy5Mi6r3k8Ho0cOVLvv/++wc4QTkpKSiSJf59Owu/36/nnn1dZWZmGDh1qpAeCyFd+/etfKyEhQW3btlVeXp6WLl1quiXX27Ztm+bNm6ebbrrJdCtoQfbv3y+/368OHTrU+3qHDh20Z88eQ10hnAQCAU2fPl3Dhg1Tv379TLfjSh9//LESExMVExOjm2++WYsXL1bfvn2N9BK2QWTmzJmyLOuEj08//bRu+9tvv10bN27U8uXL5fV6dd1118mOkIvONnasJKmgoEAXX3yxxo4dqxtuuMFQ582vKWMFoHlNnTpVmzdv1vPPP2+6Fdc69dRTlZ2drTVr1uiWW27RhAkTtGXLFiO9hO0l3gsLC3XgwIETbtO9e3f5fL5jvr5z505lZGRo9erVxqaqmlNjx2rXrl0aPny4zj77bM2fP18eT9jm2WM05X01f/58TZ8+XQcPHgxxdy1DdXW14uPjtWjRIo0ePbru6xMmTNDBgweZjTwOy7K0ePHiemOGY02bNk1Lly7Vu+++q8zMTNPttBgjR45Ujx499OSTTzb7vqOafY/NJDU1VampqU16bSAQkCRVVVU52ZJrNWasCgoKdMEFF2jw4MHKysqKqBAiBfe+whE+n0+DBw/WihUr6j5UA4GAVqxYoWnTppltDi2Wbdu69dZbtXjxYq1atYoQ0kiBQMDYZ17YBpGGWrNmjT788EN9//vfV+vWrbV9+3bdeeed6tGjR0TMhjRGQUGBhg8frq5du2ru3LkqLCys+15aWprBztwpLy9PRUVFysvLk9/vV3Z2tiSpZ8+eSkxMNNucYTNmzNCECRN0xhlnaMiQIXr00UdVVlamSZMmmW7NVQ4fPqxt27bVPc/JyVF2drbatGmjLl26GOzMfaZOnaqFCxdq6dKlSkpKqltvlJKSori4OMPducusWbN0ySWXqEuXLjp06JAWLlyoVatW6c033zTTkJFzdVxk06ZN9gUXXGC3adPGjomJsbt162bffPPN9s6dO0235jpZWVm2pO984FgTJkz4zrFauXKl6dZcYd68eXaXLl1sn89nDxkyxP7ggw9Mt+Q6K1eu/M730IQJE0y35jrH+7cpKyvLdGuuc/3119tdu3a1fT6fnZqaao8YMcJevny5sX7Cdo0IAABwv8g6wA8AAFyFIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMCY/w+CptFf/M5eDgAAAABJRU5ErkJggg==", "text/plain": [ "<Figure size 640x480 with 1 Axes>" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "plotCompPinPow(primaryFuel, marker=\"s\", label=\"primary\")\n", "plotCompPinPow(secondaryFuel, marker=\"o\", label=\"secondary\")\n", "pyplot.legend()" ] }, { "cell_type": "markdown", "id": "eb6f7e6a", "metadata": {}, "source": [ "Rotate 60 degrees CCW or pi/3" ] }, { "cell_type": "code", "execution_count": 33, "id": "00afb1b7", "metadata": {}, "outputs": [], "source": [ "multiPinBlock.rotate(math.pi / 3)" ] }, { "cell_type": "code", "execution_count": 34, "id": "f89722ec", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "<matplotlib.legend.Legend at 0x1baf73611d0>" ] }, "execution_count": 34, "metadata": {}, "output_type": "execute_result" }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiIAAAGdCAYAAAAvwBgXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAORRJREFUeJzt3Xl8VOXd9/HvmUkmKwkQwhIJu7KLFEURF1AeUSuKVrS0yiK4PWCleNuCtxuPVmzFqvW2rjVQ69aq4FJRFFHvioIscWFRQCALW4CQkD2ZOc8fSDSyJTlncp3JfN6v17xqJmd+55erE+ab65xzHcu2bVsAAAAG+Ew3AAAAohdBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxMaYbOJpQKKRt27apRYsWsizLdDsAAKAebNvW/v37lZGRIZ/v6HMeng4i27ZtU2Zmpuk2AABAI+Tm5qpjx45H3cbTQaRFixaSDvwgKSkphrsBAAD1UVxcrMzMzNrP8aPxdBA5eDgmJSWFIAIAQISpz2kVnKwKAACMIYgAAABjCCIAAMAYT58jAgCIXLZtq6amRsFg0HQrCIPY2Fj5/X7HdQgi8Cy7ZotklzorYiXJiuniRjueVl3znexQiaMali9ZsTHdXOrIu8qrNysYcva+8vuSlBDb1aWOvKu4Kkc1obJGvbamOqR9BdWqrvDJZ/FR0xxZlqWOHTsqOTnZUR3eHfAku2aL7N3nuVOszaJmHUaqa77Tjp1DXanVvt0nzTqMlFdv1upt57pSa2DG4mYdRoqrcvRWzpjGvdj2q0vZrWoRn6FWbZPUMiFDMf44dxuEUbZtq6CgQHl5eTr++OMdzYwQROBNTmdCwlXLg5zOhISrlhc5nQkJVy0vauxMiCTFhlor1kpVWttkxSX4FB8IKMYX72J38IL09HRt2bJF1dXVBBEAgHdY8smSZNXzcoitJXtUWlPpaJ9JMXHqnJzmqAYaxq1brxBEAADGbC3ZowsXP+pKrbfPvYkwEoG4fBcAYIzTmZBw1WqILVu2yLIsZWdnG9l/pGNGBAAABzIzM7V9+3a1adPGdCsRiRkRAAAaqaqqSn6/X+3bt1dMTNP/bV9dXd3k+3QbQQQAgO8NGzZMU6dO1dSpU5Wamqo2bdrojjvukG3bkqQuXbronnvu0bhx45SSkqLrrrvukEMzH374oSzL0rvvvquBAwcqISFB55xzjnbt2qWFCxeqd+/eSklJ0a9+9SuVlf1wddI777yjM844Qy1btlRaWpouuugibdq0qfb7B/fz8ssv6+yzz1Z8fLyeeuoppaSk6JVXXqnzcyxYsEBJSUnav39/+AfNIYIIAAA/Mm/ePMXExGj58uV65JFH9Oc//1nPPPNM7ffnzJmjAQMGaPXq1brjjjuOWOfuu+/W//zP/2jp0qXKzc3VFVdcoYcfflgvvPCC/v3vf2vRokV69NEfTtQtLS3V9OnTtWLFCi1evFg+n0+XXnqpQqFQnbozZszQzTffrHXr1umyyy7TL3/5S2VlZdXZJisrS5dffrlatGjh0qiED+eIAADwI5mZmXrooYdkWZZ69uypr776Sg899JCuvfZaSdI555yjW265pXb7LVu2HLbOvffeq6FDDyw2OGnSJM2cOVObNm1St24HFg28/PLLtWTJEv3+97+XJP3iF7+o8/pnn31W6enpWrt2rfr161f7/LRp03TZZZfVfj158mSdfvrp2r59uzp06KBdu3bp7bff1vvvv+98MJoAMyIAAPzIaaedVmeNjCFDhmjDhg2198w5+eST61XnxBNPrP3vdu3aKTExsTaEHHxu165dtV9v2LBBY8eOVbdu3ZSSkqIuXbpIknJycurU/en+Bw8erL59+2revHmSpH/84x/q3LmzzjrrrHr1aRpBBACABkhKSqrXdrGxsbX/bVlWna8PPvfjwy6jRo3S3r179fTTT2vZsmVatmyZpAMnxB5r/5MnT9bcuXMlHTgsM3HiRNcWHAs3gggAAD9yMAAc9Nlnnzm+n8qx7NmzR998841uv/12nXvuuerdu7cKCwvr/fqrrrpKW7du1V/+8hetXbtW48ePD1uvbuMcEQAAfiQnJ0fTp0/X9ddfr1WrVunRRx/Vgw8+GNZ9tmrVSmlpaXrqqafUoUMH5eTkaMaMGQ16/WWXXaZbb71V5513njp27BjGbt3FjAgAAD8ybtw4lZeXa/DgwZoyZYpuvvlmXXfddWHdp8/n00svvaSVK1eqX79++u1vf6sHHnigQTUmTZqkqqoqXXPNNWHqMjyYEQEA4EdiY2P18MMP6/HHHz/ke4e7QqZLly6164xIB9Yi+fHXkjRhwgRNmDChznN333237r777tqvR4wYobVr19bZ5sd1frqfn8rPz1daWpouueSSI27jRQQRAAAiWFlZmbZv3677779f119/vQKBgOmWGoRDM/Amq35npTd5LQ+yfMmerOVFfp977wU3a3lRjC/RtVoWHzVh9ac//Um9evVS+/btNXPmTNPtNJhlH22ex7Di4mKlpqaqqKhIKSkppttBE7Nrtkh2qbMiVpKsmC5utONp1TXfyQ6VOKph+ZIVG9Pt2BtGuPLqzQqGnL2v/L4kJcR2dakj7yquylFNqOzYG/5EVWVIe/Jr1LlLJyXEJ8jvizvitmv3bdOYj55y0matf519nfq0zHClFo6toqJCmzdvVteuXRUfH1/new35/ObQDDwrGgKEW6IhQLglGgKEW1ICnRr1ugpVqNDarBhf/FFDCCBxaAYAYFBSjHtBxc1aaDrMiAAAjOmcnKa3z71JpTWVjuokxcSpc3KaS12hKRFEAABGESCiG0EEAOA5+yrK9cqGNfpi9w7tr6xQXEyM2iUm69IefXRSeoeIuY8Kjo0gAgDwjG8Ld+upLz/Xgk1rVRMKHbgxnG3LkuS3fJq3drV6t07XNf0G6fLj+8lHIIl4nKwKAPCExTmbdNGCv+u1jWtUHQrJlhT6foUJW1KNfeBOtd/sLdCtH7+jKYvfUEVNjbmGPeDDDz+UZVnat2+f6VYajSACADDu47zNmvzefFUFgwoeY3mr0Pf/+86WDZr6wRsKhkJH3R7eRhABABi1u7xU172/QLJtNWSFzZBsvZezSU98uTxcrUWFqqoqo/sniAAAjHr5m69UWRNUY+c1/vb1ClWHgq708sorr6h///5KSEhQWlqaRowYodLSAyvxPvPMM+rdu7fi4+PVq1cv/fWvf63z2ry8PI0dO1atW7dWUlKSTj75ZC1btqz2+48//ri6d++uQCCgnj176rnnnqvzesuy9Mwzz+jSSy9VYmKijj/+eL3xxht1tnn77bd1wgknKCEhQcOHDz/kJnx79uzR2LFjddxxxykxMVH9+/fXiy++WGebYcOGaerUqZo2bZratGmjkSNH6pprrtFFF11UZ7vq6mq1bdtWf/vb3xo1lvVFEAEAGBMMhTRv7WqFGjQXUteeinIt2rrRcS/bt2/X2LFjdc0112jdunX68MMPddlll8m2bT3//PO688479Yc//EHr1q3TfffdpzvuuEPz5s2TJJWUlOjss89Wfn6+3njjDX3xxRf63e9+p9D3h43mz5+vm2++Wbfccou+/vprXX/99Zo4caKWLFlSp4dZs2bpiiuu0JdffqkLL7xQv/71r7V3715JUm5uri677DKNGjVK2dnZmjx5smbMmFHn9RUVFRo0aJD+/e9/6+uvv9Z1112nq6++WsuX1501mjdvngKBgD755BM98cQTmjx5st555x1t3769dpu33npLZWVluvLKKx2P7dFwrxkAgKuOdg+Sn/oob7PGvfOKo/35LUundcjUCxc6+8BctWqVBg0apC1btqhz5851vtejRw/dc889Gjt2bO1z9957r95++20tXbpUTz31lP7rv/5LW7ZsUevWrQ+pPXToUPXt21dPPfXDfXWuuOIKlZaW6t///rekAzMit99+u+655x5JUmlpqZKTk7Vw4UKdf/75uu222/T6669rzZo1tTVmzJihP/7xjyosLFTLli0P+3NddNFF6tWrl+bMmSPpwIxIcXGxVq1aVWe7vn37avz48frd734nSbr44ouVlpamrKysw9Z1614zzIgAAIzZUlwopxfgBm1bm4sKHfcyYMAAnXvuuerfv7/GjBmjp59+WoWFhSotLdWmTZs0adIkJScn1z7uvfdebdq0SZKUnZ2tgQMHHjaESNK6des0dOjQOs8NHTpU69atq/PciSeeWPvfSUlJSklJ0a5du2prnHrqqXW2HzJkSJ2vg8Gg7rnnHvXv31+tW7dWcnKy3n33XeXk5NTZbtCgQYf0OHny5NrQsXPnTi1cuFDXXHPNEcfLLawjAgAwpqy6Wj7LOuaVMsdSWlPtuBe/36/33ntPS5cu1aJFi/Too4/qv//7v/Xmm29Kkp5++ulDgoDf75ckJSQkON6/JMXGxtb52rKs2sM79fHAAw/okUce0cMPP6z+/fsrKSlJ06ZNO+SE1KSkpENeO27cOM2YMUOffvqpli5dqq5du+rMM89s3A/SAMyIAACMSYoN1K4V4kRybMCFbg588A8dOlSzZs3S6tWra8+jyMjI0HfffacePXrUeXTteuBuzieeeKKys7Nrz+f4qd69e+uTTz6p89wnn3yiPn361Lu33r17H3Kux2effXZIzUsuuURXXXWVBgwYoG7duunbb7+tV/20tDSNHj1aWVlZmjt3riZOnFjv3pxgRgQAYEy31NYOTlM9wG9Z6tHy8IdEGmLZsmVavHixzjvvPLVt21bLli1TQUGBevfurVmzZuk3v/mNUlNTdf7556uyslIrVqxQYWGhpk+frrFjx+q+++7T6NGjNXv2bHXo0EGrV69WRkaGhgwZoltvvVVXXHGFBg4cqBEjRujNN9/Ua6+9pvfff7/e/d1www168MEHdeutt2ry5MlauXKl5s6dW2eb448/Xq+88oqWLl2qVq1a6c9//rN27txZ78AzefJkXXTRRQoGgxo/fnxDhq/RmBEBABhzekYnZSS1cFQjaNu6qvdJjntJSUnRxx9/rAsvvFAnnHCCbr/9dj344IO64IILNHnyZD3zzDPKyspS//79dfbZZ2vu3Lm1MyKBQECLFi1S27ZtdeGFF6p///66//77aw/djB49Wo888ojmzJmjvn376sknn1RWVpaGDRtW7/46deqkV199VQsWLNCAAQP0xBNP6L777quzze23366f/exnGjlypIYNG6b27dtr9OjR9d7HiBEj1KFDB40cOVIZGRn1fp0TXDUDAHBVQ66akaQnv1yu2cs/avTMSNuEJH069gbF+Pjb2qmSkhIdd9xxysrK0mWXXXbUbblqBgDQLFxxQn8lxwYafQO76048hRDiUCgU0q5du3TPPfeoZcuWuvjii5ts3/w/BwAwqlV8gp4d+Qv5ZMnXgIt5LUmjuvXSpH4nh6+5KJGTk6N27drphRde0LPPPquYmKY7hZSTVQEAxg1u31F/v+ByTV40X5XBmqNezuuzLIVsW5cf30+zzzyv0TMp+EGXLl1k6kwNZkQAAJ4wNKOz3r1sgsb1GajEmAPracRYPvksS/7vH5I0qG2G/nrOxXrgrPMV6/ObbBkuYEYEAOAZnVJa6u4h5+rWk8/UG5vW6cvdO1RcWan4mBi1S0zW6B59dEKrNqbbhIsIIgCAsHAy1Z8UG9DYXgM0VgNc7AhucutQTlgPzcyePVunnHKKWrRoobZt22r06NH65ptvwrlLAIBhB5cpLysrM9wJwungsvEH10pprLDOiHz00UeaMmWKTjnlFNXU1Oi2227Teeedp7Vr1x52nXsAQOTz+/1q2bJl7c3aEhMTZXFCabMSCoVUUFCgxMREx1fYNOmCZgUFBWrbtq0++ugjnXXWWcfcngXNACAy2batHTt2aN++faZbQZj4fD517dpVgcCh9/lpyOd3k54jUlRUJElHvE1yZWWlKisra78uLi5ukr4AAO6yLEsdOnRQ27ZtVV3t/M648J5AICCfCwvJNVkQCYVCmjZtmoYOHap+/foddpvZs2dr1qxZTdUSACDM/H6/43MI0Lw12aGZG2+8UQsXLtR//vMfdezY8bDbHG5GJDMzk0MzAABEEM8dmpk6dareeustffzxx0cMIZIUFxenuLi4pmgJAAB4QFiDiG3buummmzR//nx9+OGHtbdLBgAAkMIcRKZMmaIXXnhBr7/+ulq0aKEdO3ZIklJTU5WQkBDOXQMAgAgQ1nNEjnTdeFZWliZMmHDM13P5LgAAkccz54iYupMfAACIDNx9FwAAGMNN76CSqq2qtp3dEyLWSlRyoLNLHXlbYVWeqkPljmrE+hLUKnDkK8iai4LK7aoMOhurOH+C0uM6uNSRd+2o2KnyYIWjGgn+eLWPb+dSR96VV1bgylh1TEx3qSM4QRCJciVVW7Uod7Qrtc7LXNDsw0hhVZ7+/t0EV2qN6za3WYeRgsrt+uP6aa7U+n2vh5t1GNlRsVO3fHG7K7UeHHBvsw4jeWUFmrj8PldqZQ2+jTDiARyaiXJOZ0LCVcurnM6EhKuWFzmdCQlXLS9y+td9uGp5EWPV/BBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxCJcrFWoidreVWsL8GTtbwozu/ez+dmLS9K8Md7spYXMVbNj2Xbtm26iSMpLi5WamqqioqKlJKSYrqdZqukaquq7TJHNWKtRCUHOrvUkbcVVuWpOlTuqEasL0GtAh1d6si7Ciq3qzLobKzi/AlKj+vgUkfetaNip8qDFY5qJPjj1T6+nUsdeVdeWYErY9UxMd2ljvBTDfn8jmminuBh0RIg3BINAcIt0RAg3BINAcItBIjmhUMzAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGPCGkQ+/vhjjRo1ShkZGbIsSwsWLAjn7gAAQIQJaxApLS3VgAED9Nhjj4VzNwAAIELFhLP4BRdcoAsuuCCcuwAAABEsrEGkoSorK1VZWVn7dXFxscFuwmNHxQ5VBCsc1Yj3x6t9fHuXOvKu/PJdKq+pPPaGR5EQE6fjEtq61FFk2VO5X2/lr9KG/dtVUl2heH9A7eJT9fPjfqYTUjqYbs9TSqor9UbOl1q9N0/FVRUK+P1Ki0vShR376pQ2nWVZlukWjdhcvFel1VWOaiTFBtQ1pbVLHXnb5qJClVY5HK9AQF1TW7nUUWTwVBCZPXu2Zs2aZbqNsNlRsUMzv5rpSq3Z/Wc36zCSX75L131+ryu1njrl9qgKI+uL8vXc5v/VBzu/Vsi2ZUkKyZYlSz7L0otbP1Hf1I76VZczNKJ9/6j9kJWk3NJCPfvtp5qfk63KYI0sy6odM5/l00ubV6prcprG9ThVl3cZqFif33TLTWZz8V4Nf/0pV2otueS6Zh9GNhcVavhLf3Ol1pJfToqqMOKpq2ZmzpypoqKi2kdubq7pllzldCYkXLW8yOlMSLhqed0727I18bPH9cHOrxW0Q7JlKyRbkmTLVtAOSZLWFeXrv794Sfetma+aUNBky8Z8vnurRi9+Uv/cskoVwRrZkkL2wbFS7VhtKdmj/5f9tq5f+qJKqqPnveR0JiRctbzK6UxIuGpFAk8Fkbi4OKWkpNR5AKif93d8pTu//KeCdqj2Q/RIDoaTN/JWaPaa+bK//wCOFl/szdOk//xD5TXVxxwr+/vHZwWbdcPSF1UVrGmSHoFo4akgAqBx8sr26M4v/qmGHmSxJb2Zv0qv560IR1ueVFZTpeuXvqiaUKg2kNVHyLa1ck+uHlr7QRi7A6JPWINISUmJsrOzlZ2dLUnavHmzsrOzlZOTE87dAlHn1ZxlatjHal3Pbf44amZF3sz9Svuqyhs1WrZsvfjdSpXWRNfUORBOYQ0iK1as0MCBAzVw4EBJ0vTp0zVw4EDdeeed4dwtEFUqgtVakPt57fkNjZFbtker9m52sStvsm1bz21c3uCZox+rCFbrzZyvXOsJiHZhvWpm2LBhUfNXFmDKhzvXqDTo7CRKv+XT/LzlGpTWzaWuvOnrfdu1cX+BoxqWpJc2r9Avuw1ypykgynGOCBDh8sr2yG85+1UO2iHllO52qSPvyi0tdFzDlpTjQh0ABxBEgAhXFqyS5ehgwwGlUXCZc5lL53ZU1FS7UgcAQQSIeIn+gOxGn6b6g6SYOBe68bbEmIArdRJiYl2pA4AgAkS8zknpx1wL41j8lk/dkpv/6rNdk9Mc17AkdXGhDoADCCJAhDu7XR+1iIl3VCNoh3Rp5qkudeRdvVu2V+/U9vI5OJRlS/pVt5PdawqIcgQRIMIFfDG6NHNwoz9cLUldktJ1YstO7jbmUVd1H+xgxRUpKSagCzv2c7EjILoRRIBm4BedTlWMz9+oKGJLGtft7Ki5+d3PM/uqTVySfI34eS1Jv+52CueIAC4iiADNQIeEVpp90lhJVoOvoLk881T9PGNgeBrzoHh/rJ4e+msFfP4GzSL5ZOn0tt10U59h4WsOiEIEEaCZOLNtb/1x4K8VY/mOua6I//vZgLGdh+qWPqOiZjbkoN4t2+vvZ45Xi9i42rE4koNhZXiHE/Q/p12pWJ+/KVoEokZYV1YF0LSGteuj54ZO1UtblurtbatVHaqRz/IpZNu1hyKCdkiDWnfXLzufrjPa9jLcsTkntj5Ob4y4Qc9tWq6XN6/U/upKxXw/VgeGylLQDql3y/a6uvtgjerU3/HCcQAORRBpQvF+Z1c2hKuWFyW4uKaFm7UiQbfkdrqt36W6qef5WrgtWxv371BJTYXi/bFqF5+qCzIGqlNSG9NtekK7hBT9V78Ruqn3ML2Tv1ar9uRqf3WFYn1+tYlL0gUd+6pfqwzTbRqRFOvOmitu1/KqpICL4+VirUhg2R6+GUxxcbFSU1NVVFSklJQU0+24YkfFDlUEKxzViPfHq318e5c68q788l0qd7jaZ0JMnI5LaP7rYwDhsLl4r0qrna1GmxQbUNeU1i515G2biwpVWuVwvAIBdU1t5VJH5jTk85sZkSYWDQHCLQQIwKxoCRBuaQ4BwgQOeAIAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADAmxnQD4bK1ZI9Kayod1UiKiVPn5DSXOvKuzcV7VVpT5ahGUkxAXVNau9RRZAmGQlqS+50Wbd2ovRXlsmUrNS5eZ2R01oVdeyo+ptn+mh3T5n2FKq1y+N4KBNS1ZSuXOoos3+zarde+XKPtxftVXl2t5Lg4nZDeRpcP6Kv05CTT7RmzZXehyhy+rxIDAXVp0/zfV1sLClVa4fB3MD6gzunhGyvLtm07bNUdKi4uVmpqqoqKipSSklLv120t2aMLFz/qSg9vn3tTsw4jm4v36ty3nnCl1uKLboiqMFJaXaW5a1Zp3trV2llWIr/lU9AOSZL8lqWgbSslEKerep+kSf0GqU1CdH1wbN5XqHP//qwrtRaPuyZqwoht23pn/QZlLV+l7Pzt8vsshUK2bEk+y6rdbmSv4zX5tEHq36G9uWYN2LK7UBc+PNeVWm9Pm9Csw8jWgkKNum+uK7XevG1Cg8JIQz6/m+Wfak5nQsJVy4uczoSEq5bX7Swr0biFr+jbfbsV+j7LHwwhB/77wHPFVZV68svlenXDGv3jgjE6oVUbI/2a4HQmJFy1vCwYCun/vbtEL67+sjZ0BEM//K0Y+tHfjYvWb9C76zdo9kXn6dL+fZq8V1OczoSEq5YXOZ0JCVetn+IcEaCBiiordOVbL2nDj0LI0QRtW7vLSzXmrReVs39f+BtERLJtW3e/s1gvrf5Sko753gratkK2rd+/+a7eWrO+KVoEwoIgAjTQLR8tVM7+fbWzHvURtG3tr6rUNe++Jg8fDYVB879aq5ezv1ZD3x2WpN+9+a427ykMR1tA2DVJEHnsscfUpUsXxcfH69RTT9Xy5cubYreA67YWF+r9nI0NCiEHBW1bG/bt0SfbcsLQGSKZbdt6+tMVso696aGv/f71z6/MdrkroGmEPYi8/PLLmj59uu666y6tWrVKAwYM0MiRI7Vr165w7xpw3fPrvqhzwmBD+S1Lf1+7ysWO0ByszNumTXv2Nng25KCgbetfX6xRWVW1q30BTSHsQeTPf/6zrr32Wk2cOFF9+vTRE088ocTERD37rDtn0wNNJWTbevGbLxs1G3JQ0Lb1Xs4m7S4vdbEzRLp/ZX8lv6/xAVeSyqur9c76b13qCGg6YQ0iVVVVWrlypUaMGPHDDn0+jRgxQp9++ukh21dWVqq4uLjOA/CK4soKFVc5v4oqZNvKL+G9jR98t6ewztUxjRHj8ym3sMiljoCmE9Ygsnv3bgWDQbVr167O8+3atdOOHTsO2X727NlKTU2tfWRmZoazPaBBSmvcm/YuqW7elw2iYdy6PDlaLnNG8+Kpq2ZmzpypoqKi2kdubq7ploBaybEB12q1iI1zrRYiX3KcO++tpIB771GgqYR1QbM2bdrI7/dr586ddZ7fuXOn2rc/dDXAuLg4xcXxDzS8qUUgTi3j4rWvssJRHZ9lqWOL+q8UjOavR5s0fbV9p6PDMzWhkLqkNd9VQtF8hXVGJBAIaNCgQVq8eHHtc6FQSIsXL9aQIUPCuWvAdT7L0q97nSS/w6tmLuxyglrHJ7rYGSLdlSf1d3yOSHIgoJE9j3epI6DphP3QzPTp0/X0009r3rx5WrdunW688UaVlpZq4sSJ4d414Lpf9TqxXqupHknQtnV1n4EudoTm4MSM9johvU2j1hGRDgTcKwb2V3xss7xrB5q5sAeRK6+8UnPmzNGdd96pk046SdnZ2XrnnXcOOYEViAQdW6Tqwq49G7WWiN+y1Kd1W53avmMYOkMksyxL159+SqPWEbEk+XyWfvWzE91uC2gSTXKy6tSpU7V161ZVVlZq2bJlOvXUU5tit0BY/OnM83V8y7QGHaLxW5ZaxSfo2ZGXyXJwaAfN16i+vTThlIbNlh18Jz00+ufq1Kql6z0BTcFTV80AkSA5ENBLP79S/dq0kyUdczrdZ1nKSE7Rqxf9Sh2SWjRFi4hQM0acrcmnDZKkYwZdv8+S3+fTI5f+XOf17NEU7QFhwQFFoBFaxyfqnz8fqxe/+VLz1qzS5uJCxVg+2bJlS/LJUo0dUtuEJI3rM1Dj+gxUaly86bbhcT7L0u/OOUunds7UvM9X65Pvth44DGgdWAjPb/kUDIUU6/frkn69NWHwQB2f3sZ024AjzTKIJMW4dwmwm7W8KCnGvXUH3KwVCeJjYjSx7880oc9AfbY9V4u2btTeijKFJLWMi9cZGZ11bqfuivFF58Sjm2taRNv6GGd376qzu3dVTuE+zf9qrbYX71d5VbVaxMfphPQ2uqRfb6UmRGewTXTxveBmLS9Kinfxd9DFWj9l2R6+J3lxcbFSU1NVVFSklJSGrbuwtWSPSmucLcedFBOnzslpjmpEgs3Fe1Va42xFxqSYgLqmtHapIzQXm/cVOl7tMykQUNeWrI+BH2zZXagyh++rxEBAXdo0//fV1oJClVY4/B2MD6hzesPGqiGf381yRkRSVAQItxAgEC4ECIRDNAQItzQ0QJgQnXPGAADAEwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMCbGdAPRZsveQpVUVTmqkRwIqEvrVi515F1b9hSqtNLZWCXFBdQlrfmP1eHs3l+qBcvX6Nvtu1VSXqn4QKzat2yhS07po54Z6abb85SS8kq9vXy9vvhum/aXVSo2xq+0FokaeXJP/ez442RZlukWjcjZUaiyCme/g4nxAXVqHx2/g7nbClVW7nC8EgLKzIiO8TrIsm3bNt3EkRQXFys1NVVFRUVKSUkx3Y5jW/YWasTTc12p9f61E5p1GNmyp1Dn/2WuK7Xe+c2EqAoja/N2KmvJCr335QYd/O0O2bYsS/JZloIhW/07tde4s36mkSedELUfspKUV7BPz72/Um98ulZV1TWyfJZCIVuWJJ/Pp2AopM7tWulXwwdq9Bn9FOv3m265yeTsKNSYW7NcqfWvByY2+zCSu61Qv5ryN1dqvfDYpIgPIw35/GZGpAk5nQkJVy0vcjoTEq5aXvfvVev13y++I0kKhur+jWHbUvD7ZLImd6du/cfb+mxDjm7/xbmK8UffUdqVG/J082MLVFldUztW9sH/lRQMhSRJOTsLdf9LH+iD7I2ac/0oJcUHTLXcpJzOhISrllc5nQkJV61IEH3/+gDN1LtffKsZzy9UMGQfEkJ+KvR9IHlt2dea9a/35eGJ0bD4avN2/d+/vKaKqppjjpX9/ePzb3P1m8cWqKq6pkl6BKIFQQRoBnJ379OM5xeqoQdZbEkLPl+jV5d9HY62PKm8slq/eWyBgsFQbSCrj1DIVvambXrsjaVh7A6IPgQRoBl4eemXsm1bjZnXsCRlLVkRNbMiby9fp6LSigaFkINs29a/Pv4iKg41AE2FIAJEuIrqGr3y2VfHPMRwJLaknN37tGJTnruNeZBt23pxyWo5OT+3oqpGb3++3r2mgChHEAEi3OKvNjo+Idfvs/TKZ1+51JF3rcvZqe+275WTyR/Lkl756Av3mgKiHEEEiHA5u/cpxufsVzkYsrWloNCljrwrt6DIcQ3blvJ2O68D4ACCCBDhyiur1OCzVA+jJArOeyivrHalTkUVV84AbiGIABEuIS6gRp2l+hPJUbA+RkJcrKfqACCIABGva3or1Xy/+FZj+X2WurdLc6kj7+rSzvlqlZYldWrb0nkzACQRRICId07/7mqREOeoRjBka8yQ/i515F09M9uqZ8d0+RwcyrJtaczZA9xrCohyBBEgwgViYjTmtP7yNfKaVEtSt7atdVKXDHcb86hfDh+oRl7pLElKjIvVyJN7utcQEOXCFkT+8Ic/6PTTT1diYqJatmwZrt0AkHTl6QMU4/c16pxVW9I155wcNTe/G3lyT6WlJMrXiGkRS9KVw05SQoBzRAC3hC2IVFVVacyYMbrxxhvDtQsA38tonaIHx/1cstTgxbp+efoAXXxyn/A05kHxgRj9z9RLFev3N2gWyWdZOrV3J90wakgYuwOiT9iCyKxZs/Tb3/5W/fs3/+POgBcM69tdD40fpRifT/5j/LXv//4D+OqzfqYZlw6LmtmQg3pmttXT08coOSFwzLE6GFbOOrGbHrzhYsX6/U3RIhA1Ykw38GOVlZWqrKys/bq4uNhgN0DkObd/D/1r+lV67n9X6c0V61RdE5Tf51PItmtnSoIhW6ccn6mrzxyos/p0M9uwQf26tNc/7xinF5es1qv/+5VKyisVU2esLAVDIfXMTNfY4QN1weBe8jtcOA7AoTwVRGbPnq1Zs2aZbiNskgPurdPgZi0vSopz7+dzs1Yk6N4+TXeP+T+aftGZemvlen27vUAl5VWKD8SofcsWGjWotzqnO7+MtTlo2zJZN196pm64aIjeW/Wtvti0TfvLKhUb41daSqLOG9RTfTq3M92mEYkurivjZi2vSkxwcbxcrBUJLLsBt9ycMWOG/vjHPx51m3Xr1qlXr161X8+dO1fTpk3Tvn37jln/cDMimZmZKioqUkpKSn3b9LQtewtVUuVsBcvkQEBdWjf/D5Itewod30MlKS6gLmnNf6yAcMjZUej4TsOJ8QF1ah8dv4O52wpVVu5wvBICysyI/PEqLi5WampqvT6/GzQjcsstt2jChAlH3aZbt8ZP9cbFxSkuztl6CF4XDQHCLQQIwKxoCRBuaQ4BwoQGBZH09HSlp6eHqxcAABBlwnaOSE5Ojvbu3aucnBwFg0FlZ2dLknr06KHk5ORw7RYAAESQsAWRO++8U/Pmzav9euDAgZKkJUuWaNiwYeHaLQAAiCANOlm1qTXkZBcAAOANDfn85qJ4AABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMbEmG4A5uXsKFRZRZWjGonxAXVq38qljrwtd3uhysodjldCQJkdmv945eXvVVmZw7FKDKjjca1d6si78nL3qrys0lGNhMQ4dcxs/mN1ODvyC7Vo/krlb92tstJKJSTGqUNma428dJAyOqWZbg9HQRCJcjk7CnX5jCxXar1y/8RmH0Zytxfqlzf9zZVaLz06qVmHkbz8vbpq0tOu1PrH365t1mEkL3evJl75V1dqZb38f6MqjHzx+Xd6Jet/9fl/vpXPZ8m2JTtky/JZsixLLz/zkQae1l2/mHCmTh56vOl2cRgEkSjndCYkXLW8yulMSLhqeZHTmZBw1fIipzMh4arlZbZt69V5/9EzD74jn98n2VIoaP/w/ZAtWwe+/uLz77T6s0266v+eo1/fcI4syzLVNg6Dc0QAABHntb9/omcefEeSFAqGjrrtwYDyj79+oOef+CDsvaFhCCIAgIiyZvVWPT1nYaNe+4+/fqAVn2xwuSM4QRABAESU1/7+ifz+xn18+fyWXpv3H5c7ghMEEQBAxNizq1hLP1ir4DEOxxxJKGhr1acbtS13j8udobEIIgCAiPHeG6vl9FRTn9+nd19b6Uo/cI4gAgCIGNty9ji+6sW2bW3P3etSR3CKIAIAiBgVZVUK2faxNzwKO2SrrKTCpY7gFEEEABAx4hMD8jmcEbF8lpJaxLvUEZwiiAAAIkZmlzaOZ0QsScd1buNOQ3CMIAIAiBgjLv6Z43NEQratkZcNcqkjOEUQAQBEjFZtknXG/+nrYB0Rn0454wS1y2i+93mKNAQRAEBE+cW4oQqFGnd4JhQM6fIJZ7rcEZwgiAAAIkrP/pmaevuoRr120vTzNWBwN5c7ghMEEQBAxPn5Fadq6u0Xy7KsYx6mOfj9ybecr8snnNEU7aEBYkw3AABAY1x05anq0TtD85/7RP/73hrZti2fZSkUsuXzWbJtW7ak04b10qVXD1W/QV1Mt4zDIIhEucT4gCdreVVigovj5WItL0pMdHGsXKzlRQmJcZ6sFQl6nZipmQ/8UjfsLtH7b6xS3tbdKiupVEJSnDIyW2vExQPVpl2q6TZxFJZtO7wgO4yKi4uVmpqqoqIipaSkmG6n2crZUaiyiipHNRLjA+rUPjrOQs/dXqiycofjlRBQZofmP155+XtVVuZwrBID6nhca5c68q683L0qL6t0VCMhMU4dM5v/WMH7GvL5zYwIoiZAuCUaAoRboiFAuIUAgWjFyaoAAMAYgggAADAmbEFky5YtmjRpkrp27aqEhAR1795dd911l6qqnB0vBgAAzUfYzhFZv369QqGQnnzySfXo0UNff/21rr32WpWWlmrOnDnh2i0AAIggTXrVzAMPPKDHH39c3333Xb2256oZAAAij2evmikqKlLr1kc+M7yyslKVlT9cvlZcXNwUbQEAAEOa7GTVjRs36tFHH9X1119/xG1mz56t1NTU2kdmZmZTtQcAAAxocBCZMWOGLMs66mP9+vV1XpOfn6/zzz9fY8aM0bXXXnvE2jNnzlRRUVHtIzc3t+E/EQAAiBgNPkekoKBAe/bsOeo23bp1UyBwYEnmbdu2adiwYTrttNM0d+5c+Xz1zz6cIwIAQOQJ6zki6enpSk9Pr9e2+fn5Gj58uAYNGqSsrKwGhRAAAND8he1k1fz8fA0bNkydO3fWnDlzVFBQUPu99u3bh2u3AAAggoQtiLz33nvauHGjNm7cqI4dO9b5nofvswcAAJpQ2I6VTJgwQbZtH/YBAAAgca8ZAABgEEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGBNjugHgSPK37lFZWaWjGomJcTquc5pLHXlX/uYClZc6G6uEpDgd1zXdpY68K3/TTpWVVDiqkZgcr+O6t3OpI+/K27Bd5fvLHdVIaJGgjsd3cKkjNEcEEXhS/tY9mnTJI67U+tvrNzfrMJK/uUCTz5ntSq1nPpjZrMNI/qadmnTK7a7U+tvn9zbrMJK3Ybsm9vyNK7WyvvkLYQRHxKEZeJLTmZBw1fIipzMh4arlRU5nQsJVy4uczoSEqxaaH4IIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCLwpMTEOE/W8qKEJPd+PjdreVFicrwna3lRQosET9ZC82PZtm2bbuJIiouLlZqaqqKiIqWkpJhuB00sf+selZVVOqqRmBin4zqnudSRd+VvLlB5qbOxSkiK03Fd013qyLvyN+1UWUmFoxqJyfE6rns7lzryrrwN21W+v9xRjYQWCep4fAeXOkKkaMjnd0wT9QQ0WDQECLdEQ4BwSzQECLcQINAUODQDAACMIYgAAABjCCIAAMAYgggAADDG0yerHrygp7i42HAnAACgvg5+btfnwlxPB5H9+/dLkjIzMw13AgAAGmr//v1KTU096jaeXkckFApp27ZtatGihSzLcrV2cXGxMjMzlZubyxolx8BY1R9jVX+MVf0xVg3DeNVfuMbKtm3t379fGRkZ8vmOfhaIp2dEfD6fOnbsGNZ9pKSk8EatJ8aq/hir+mOs6o+xahjGq/7CMVbHmgk5iJNVAQCAMQQRAABgTNQGkbi4ON11112Ki2veN/lyA2NVf4xV/TFW9cdYNQzjVX9eGCtPn6wKAACat6idEQEAAOYRRAAAgDEEEQAAYAxBBAAAGEMQkXTxxRerU6dOio+PV4cOHXT11Vdr27ZtptvynC1btmjSpEnq2rWrEhIS1L17d911112qqqoy3Zon/eEPf9Dpp5+uxMREtWzZ0nQ7nvPYY4+pS5cuio+P16mnnqrly5ebbslzPv74Y40aNUoZGRmyLEsLFiww3ZJnzZ49W6eccopatGihtm3bavTo0frmm29Mt+VJjz/+uE488cTaRcyGDBmihQsXGuuHICJp+PDh+uc//6lvvvlGr776qjZt2qTLL7/cdFues379eoVCIT355JNas2aNHnroIT3xxBO67bbbTLfmSVVVVRozZoxuvPFG0614zssvv6zp06frrrvu0qpVqzRgwACNHDlSu3btMt2ap5SWlmrAgAF67LHHTLfieR999JGmTJmizz77TO+9956qq6t13nnnqbS01HRrntOxY0fdf//9WrlypVasWKFzzjlHl1xyidasWWOmIRuHeP31123LsuyqqirTrXjen/70J7tr166m2/C0rKwsOzU11XQbnjJ48GB7ypQptV8Hg0E7IyPDnj17tsGuvE2SPX/+fNNtRIxdu3bZkuyPPvrIdCsRoVWrVvYzzzxjZN/MiPzE3r179fzzz+v0009XbGys6XY8r6ioSK1btzbdBiJIVVWVVq5cqREjRtQ+5/P5NGLECH366acGO0NzUlRUJEn8+3QMwWBQL730kkpLSzVkyBAjPRBEvvf73/9eSUlJSktLU05Ojl5//XXTLXnexo0b9eijj+r666833QoiyO7duxUMBtWuXbs6z7dr1047duww1BWak1AopGnTpmno0KHq16+f6XY86auvvlJycrLi4uJ0ww03aP78+erTp4+RXpptEJkxY4YsyzrqY/369bXb33rrrVq9erUWLVokv9+vcePGyY6SRWcbOlaSlJ+fr/PPP19jxozRtddea6jzpteYsQLQtKZMmaKvv/5aL730kulWPKtnz57Kzs7WsmXLdOONN2r8+PFau3atkV6a7RLvBQUF2rNnz1G36datmwKBwCHP5+XlKTMzU0uXLjU2VdWUGjpW27Zt07Bhw3Taaadp7ty58vmabZ49RGPeV3PnztW0adO0b9++MHcXGaqqqpSYmKhXXnlFo0ePrn1+/Pjx2rdvH7ORR2BZlubPn19nzHCoqVOn6vXXX9fHH3+srl27mm4nYowYMULdu3fXk08+2eT7jmnyPTaR9PR0paenN+q1oVBIklRZWelmS57VkLHKz8/X8OHDNWjQIGVlZUVVCJGcva9wQCAQ0KBBg7R48eLaD9VQKKTFixdr6tSpZptDxLJtWzfddJPmz5+vDz/8kBDSQKFQyNhnXrMNIvW1bNkyff755zrjjDPUqlUrbdq0SXfccYe6d+8eFbMhDZGfn69hw4apc+fOmjNnjgoKCmq/1759e4OdeVNOTo727t2rnJwcBYNBZWdnS5J69Oih5ORks80ZNn36dI0fP14nn3yyBg8erIcfflilpaWaOHGi6dY8paSkRBs3bqz9evPmzcrOzlbr1q3VqVMng515z5QpU/TCCy/o9ddfV4sWLWrPN0pNTVVCQoLh7rxl5syZuuCCC9SpUyft379fL7zwgj788EO9++67Zhoycq2Oh3z55Zf28OHD7datW9txcXF2ly5d7BtuuMHOy8sz3ZrnZGVl2ZIO+8Chxo8ff9ixWrJkienWPOHRRx+1O3XqZAcCAXvw4MH2Z599Zrolz1myZMlh30Pjx4833ZrnHOnfpqysLNOtec4111xjd+7c2Q4EAnZ6erp97rnn2osWLTLWT7M9RwQAAHhfdB3gBwAAnkIQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYMz/BwKyucymvqv/AAAAAElFTkSuQmCC", "text/plain": [ "<Figure size 640x480 with 1 Axes>" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "plotCompPinPow(primaryFuel, marker=\"s\", label=\"primary\")\n", "plotCompPinPow(secondaryFuel, marker=\"o\", label=\"secondary\")\n", "pyplot.legend()" ] }, { "cell_type": "markdown", "id": "0b146c61", "metadata": {}, "source": [ "## Bringing it all together.\n", "\n", "Pin-like parameters are ordered by a pin-index, not strictly a spatial ordering. Therefore they are invariant of rotation; `Block.p.linPowByPin[i]` is the linear power for pin `i`, wherever it may be in the block. \n", "\n", "Without looking into the components, pin `i` is located at `Block.getPinLocations()[i]`. If the block is rotated, the locator `Block.getPinLocations()[i]` will indicate a new location, but it still represents pin `i`." ] } ], "metadata": { "kernelspec": { "display_name": "armi", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.13.3" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: armi/tests/zpprTest.yaml ================================================ metadata: version: uncontrolled settings: # global Tin: 20.0 Tout: 20.0 buGroups: - 100 burnSteps: 0 comment: ZPPR test case cycleLength: 365.25 loadingFile: zpprTestGeom.yaml nTasks: 12 outputFileExtension: pdf power: 75000000.0 sortReactor: false # zpprs dont sor the right way. need better component sorting for slab... verbosity: extra # cross section crossSectionControl: AA: geometry: 1D slab externalDriver: true useHomogenizedBlockComposition: false numInternalRings: 1 numExternalRings: 1 meshSubdivisionsPerCm: 10 AC: geometry: 1D slab externalDriver: true useHomogenizedBlockComposition: false numInternalRings: 1 numExternalRings: 1 meshSubdivisionsPerCm: 10 AZ: geometry: 1D slab externalDriver: true useHomogenizedBlockComposition: false numInternalRings: 1 numExternalRings: 1 meshSubdivisionsPerCm: 10 # neutronics epsEig: 1e-10 genXS: Neutron xsBlockRepresentation: ComponentAverage1DSlab ================================================ FILE: armi/tests/zpprTestGeom.yaml ================================================ !include 1DslabXSByCompTest.yaml systems: core: grid name: core origin: x: 0.0 y: 0.0 z: 0.0 grids: core: geom: cartesian symmetry: full grid contents: [0, 0]: D2 [1, 0]: D1 [2, 0]: D1 [3, 0]: D1 [0, 1]: D2 [1, 1]: D2 [2, 1]: D2 [3, 1]: D2 [0, 2]: D2 [1, 2]: D2 [2, 2]: D2 [3, 2]: D2 ================================================ FILE: armi/utils/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generic ARMI utilities.""" # ruff: noqa: F405 import collections import getpass import hashlib import math import os import pickle import re import shutil import sys import time from armi import runLog from armi.utils import iterables from armi.utils.flags import Flag # noqa: F401 from armi.utils.mathematics import * # noqa: F403 # Read in file 1 MB at a time to reduce memory burden of reading entire file at once _HASH_BUFFER_SIZE = 1024 * 1024 def getFileSHA1Hash(filePath, digits=40): """ Generate a SHA-1 hash of input files. Parameters ---------- filePath : str Path to file or directory to obtain the SHA-1 hash digits : int, optional Number of digits to include in the hash (40 digit maximum for SHA-1) """ sha1 = hashlib.sha1() filesToHash = [] if os.path.isdir(filePath): for root, _, files in os.walk(filePath): for file in sorted(files): filesToHash.append(os.path.join(root, file)) else: filesToHash.append(filePath) for file in filesToHash: with open(file, "rb") as f: while True: data = f.read(_HASH_BUFFER_SIZE) if not data: break sha1.update(data) return sha1.hexdigest()[:digits] def getPowerFractions(cs): """ Return the power fractions for each cycle. Parameters ---------- cs : case settings object Returns ------- powerFractions : 2-list A list with nCycles elements, where each element is itself a list of the power fractions at each step of the cycle. Notes ----- This is stored outside of the Operator class so that it can be easily called to resolve case settings objects in other contexts (i.e. in the preparation of restart runs). """ if cs["cycles"] != []: return [ expandRepeatedFloats( (cycle["power fractions"]) if "power fractions" in cycle.keys() else [1] * getBurnSteps(cs)[cycleIdx] ) for (cycleIdx, cycle) in enumerate(cs["cycles"]) ] else: valuePerCycle = ( expandRepeatedFloats(cs["powerFractions"]) if cs["powerFractions"] not in [None, []] else [1.0] * cs["nCycles"] ) return [[value] * (cs["burnSteps"] if cs["burnSteps"] is not None else 0) for value in valuePerCycle] def getCycleNames(cs): """ Return the names of each cycle. If a name is omitted, it is `None`. Parameters ---------- cs : case settings object Returns ------- cycleNames : list A list of the availability factors. Notes ----- This is stored outside of the Operator class so that it can be easily called to resolve case settings objects in other contexts (i.e. in the preparation of restart runs). """ if cs["cycles"] != []: return [(cycle["name"] if "name" in cycle.keys() else None) for cycle in cs["cycles"]] else: return [None] * cs["nCycles"] def getAvailabilityFactors(cs): """ Return the availability factors for each cycle. Parameters ---------- cs : case settings object Returns ------- availabilityFactors : list A list of the availability factors. Notes ----- This is stored outside of the Operator class so that it can be easily called to resolve case settings objects in other contexts (i.e. in the preparation of restart runs). """ if cs["cycles"] != []: availabilityFactors = [] for cycle in cs["cycles"]: if "availability factor" in cycle.keys(): availabilityFactors.append(cycle["availability factor"]) else: availabilityFactors.append(1) return availabilityFactors else: return ( expandRepeatedFloats(cs["availabilityFactors"]) if cs["availabilityFactors"] not in [None, []] else ([cs["availabilityFactor"]] * cs["nCycles"] if cs["availabilityFactor"] is not None else [1]) ) def _getStepAndCycleLengths(cs): r""" Get both steps and lengths together to prevent chicken/egg problem. Notes ----- Using this method directly is more efficient than calling `getStepLengths` and `getCycleLengths` separately, but it is probably more clear to the user to call each of them separately. """ stepLengths = [] availabilityFactors = getAvailabilityFactors(cs) if cs["cycles"] != []: for cycleIdx, cycle in enumerate(cs["cycles"]): cycleKeys = cycle.keys() if "step days" in cycleKeys: stepLengths.append(expandRepeatedFloats(cycle["step days"])) elif "cumulative days" in cycleKeys: cumulativeDays = cycle["cumulative days"] stepLengths.append(getStepsFromValues(cumulativeDays)) elif "burn steps" in cycleKeys and "cycle length" in cycleKeys: stepLengths.append( [cycle["cycle length"] * availabilityFactors[cycleIdx] / cycle["burn steps"]] * cycle["burn steps"] ) else: raise ValueError(f"No cycle time history is given in the detailed cycles history for cycle {cycleIdx}") cycleLengths = [sum(cycleStepLengths) for cycleStepLengths in stepLengths] cycleLengths = [cycleLength / aFactor for (cycleLength, aFactor) in zip(cycleLengths, availabilityFactors)] else: cycleLengths = ( expandRepeatedFloats(cs["cycleLengths"]) if cs["cycleLengths"] not in [None, []] else ([cs["cycleLength"]] * cs["nCycles"] if cs["cycleLength"] is not None else [0]) ) cycleLengthsModifiedByAvailability = [ length * availability for (length, availability) in zip(cycleLengths, availabilityFactors) ] stepLengths = ( [[length / cs["burnSteps"]] * cs["burnSteps"] for length in cycleLengthsModifiedByAvailability] if cs["burnSteps"] not in [0, None] else [[]] ) return stepLengths, cycleLengths def getStepLengths(cs): """ Return the length of each step in each cycle. Parameters ---------- cs : case settings object Returns ------- stepLengths : 2-list A list with elements for each cycle, where each element itself is a list containing the step lengths in days. Notes ----- This is stored outside of the Operator class so that it can be easily called to resolve case settings objects in other contexts (i.e. in the preparation of restart runs). """ return _getStepAndCycleLengths(cs)[0] def getCycleLengths(cs): """ Return the lengths of each cycle in days. Parameters ---------- cs : case settings object Returns ------- cycleLengths : list A list of the cycle lengths in days. Notes ----- This is stored outside of the Operator class so that it can be easily called to resolve case settings objects in other contexts (i.e. in the preparation of restart runs). """ return _getStepAndCycleLengths(cs)[1] def getBurnSteps(cs): """ Return the number of burn steps for each cycle. Parameters ---------- cs : case settings object Returns ------- burnSteps : list A list of the number of burn steps. Notes ----- This is stored outside of the Operator class so that it can be easily called to resolve case settings objects in other contexts (i.e. in the preparation of restart runs). """ stepLengths = getStepLengths(cs) return [len(steps) for steps in stepLengths] def hasBurnup(cs): """Test if depletion is being modeled. Parameters ---------- cs : case settings object Returns ------- bool Are there any burnup steps? """ return sum(getBurnSteps(cs)) > 0 def getMaxBurnSteps(cs): burnSteps = getBurnSteps(cs) return max(burnSteps) def getCumulativeNodeNum(cycle, node, cs): """ Return the cumulative node number associated with a cycle and time node. Note that a cycle with n time steps has n+1 nodes, and for cycle m with n steps, nodes (m, n+1) and (m+1, 0) are counted separately. Parameters ---------- cycle : int The cycle number node : int The intra-cycle time node (0 for BOC, etc.) cs : Settings object """ nodesPerCycle = getNodesPerCycle(cs) return sum(nodesPerCycle[:cycle]) + node def getCycleNodeFromCumulativeStep(timeStepNum, cs): """ Return the (cycle, node) corresponding to a cumulative time step number. "Node" refers to the node at the start of the time step. Parameters ---------- timeStepNum : int The cumulative number of time steps since the beginning cs : case settings object A case settings object to get the steps-per-cycle from Notes ----- Time steps are the spaces between time nodes, and are 1-indexed. To get the (cycle, node) from a cumulative time node, see instead getCycleNodeFromCumulativeNode. """ stepsPerCycle = getBurnSteps(cs) if timeStepNum < 1: raise ValueError("Cumulative time step cannot be less than 1.") cSteps = 0 # cumulative steps for i in range(len(stepsPerCycle)): cSteps += stepsPerCycle[i] if timeStepNum <= cSteps: return (i, timeStepNum - (cSteps - stepsPerCycle[i]) - 1) i = len(stepsPerCycle) - 1 return (i, timeStepNum - (cSteps - stepsPerCycle[i]) - 1) def getCycleNodeFromCumulativeNode(timeNodeNum, cs): """ Return the (cycle, node) corresponding to a cumulative time node number. Parameters ---------- timeNodeNum : int The cumulative number of time nodes since the beginning cs : case settings object A case settings object to get the nodes-per-cycle from Notes ----- Time nodes are the start/end of time steps, and are 0-indexed. For a cycle with n steps, there will be n+1 nodes (one at the start of the cycle and another at the end, plus those separating the steps). For cycle m with n steps, nodes (m, n+1) and (m+1, 0) are counted separately. To get the (cycle, node) from a cumulative time step, see instead getCycleNodeFromCumulativeStep. """ nodesPerCycle = getNodesPerCycle(cs) if timeNodeNum < 0: raise ValueError("Cumulative time node cannot be less than 0.") cNodes = 0 # cumulative nodes for i in range(len(nodesPerCycle)): cNodes += nodesPerCycle[i] if timeNodeNum < cNodes: return (i, timeNodeNum - (cNodes - nodesPerCycle[i])) i = len(nodesPerCycle) - 1 return (i, timeNodeNum - (cNodes - nodesPerCycle[i])) def getNodesPerCycle(cs): """Return the number of nodes per cycle for the case settings object.""" return [s + 1 for s in getBurnSteps(cs)] def getPreviousTimeNode(cycle, node, cs): """Return the (cycle, node) before the specified (cycle, node).""" if (cycle, node) == (0, 0): raise ValueError("There is no time step before (0, 0)") if node != 0: return (cycle, node - 1) else: nodesPerCycle = getNodesPerCycle(cs) nodesInLastCycle = nodesPerCycle[cycle - 1] indexOfLastNode = nodesInLastCycle - 1 # zero based indexing for nodes return (cycle - 1, indexOfLastNode) def tryPickleOnAllContents(obj, ignore=None, verbose=False): r""" Attempts to pickle all members of this object and identifies those who cannot be pickled. Useful for debugging MPI-bcast errors. Parameters ---------- obj : object Any object to be tested. ignore : iterable list of string variable names to ignore. verbose : bool, optional Print all objects whether they fail or not """ if ignore is None: ignore = [] # pickle gives better error messages than cPickle for name, ob in obj.__dict__.items(): if name not in ignore: if verbose: print(f"Checking {name}...") try: pickle.dumps(ob) # dump as a string except Exception: print(f"{name} in {obj} cannot be pickled.") def classesInHierarchy(obj, classCounts, visited=None): """Count the number of instances of each class contained in an objects hierarchy.""" if not isinstance(classCounts, collections.defaultdict): raise TypeError("Need to pass in a default dict for classCounts (it's an out param)") if visited is None: classCounts[type(obj)] += 1 visited = set() visited.add(id(obj)) try: for c in obj.__dict__.values(): if id(c) not in visited: classCounts[type(c)] += 1 visited.add(id(c)) classesInHierarchy(c, classCounts, visited=visited) except AttributeError: pass def slantSplit(val, ratio, nodes, order="low first"): """ Returns a list of values whose sum is equal to the value specified. The ratio between the highest and lowest value is equal to the specified ratio, and the middle values trend linearly between them. """ val = float(val) ratio = float(ratio) nodes = int(nodes) v0 = 2.0 * val / (nodes * (1.0 + ratio)) X = [] for i in range(nodes): X.append(v0 + i * (v0 * ratio - v0) / (nodes - 1)) if order == "high first": X.reverse() return X def prependToList(originalList, listToPrepend): """ Add a new list to the beginning of an original list. Parameters ---------- originalList : list The list to prepend to. listToPrepend : list The list to add to the beginning of (prepend) the originalList. Returns ------- originalList : list The original list with the listToPrepend at it's beginning. """ listToPrepend.reverse() originalList.reverse() originalList.extend(listToPrepend) originalList.reverse() listToPrepend.reverse() return originalList def capStrLen(s: str, length: int) -> str: """ Truncates a string to a certain length. Adds '...' if it's too long. Parameters ---------- s : str The string to cap at length l. length : int The maximum length of the string s. """ if length <= 2: raise Exception("l must be at least 3 in utils.capStrLen") if len(s) <= length: return s return s[0 : length - 3] + "..." def list2str(strings, width=None, preStrings=None, fmt=None): """ Turn a list of strings into one string, applying the specified format to each. Parameters ---------- strings : list The items to create centered strings in the line for. Can be str, float, int, etc. width : int, optional The maximum width that the strings are allowed to take up. Only strings are affected by this parameter, because it does not make sense to truncate ints or floats. preStrings : list of str, optional Any strings that come before the centered strings. fmt : str, optional The format to apply to each string, such as ' >4d', '^12.4E'. """ if preStrings is None: preStrings = [] if fmt is None: fmt = "" newStrings = [] for string in strings: if isinstance(string, str) and width is not None: string = capStrLen(str(string), width) string = "{0:{fmt}}".format(string, fmt=fmt) newStrings.append(string) preStrings.extend(newStrings) return "".join(preStrings) def createFormattedStrWithDelimiter(dataList, maxNumberOfValuesBeforeDelimiter=9, delimiter="\n"): r""" Return a formatted string with delimiters from a list of data. Parameters ---------- dataList : list List of data that will be formatted into a string maxNumberOfValuesBeforeDelimiter : int maximum number of values to have before the delimiter is added delimiter : str A delimiter on the formatted string (default: "\n") Notes ----- As an example:: >>> createFormattedStrWithDelimiter(['hello', 'world', '1', '2', '3', '4'], ... maxNumberOfValuesBeforeDelimiter=3, delimiter = '\n') "hello, world, 1, \n2, 3, \n4, 5\n" """ formattedString = "" if not dataList: return formattedString if not maxNumberOfValuesBeforeDelimiter: numRows = 1 else: numRows = int(math.ceil(float(len(dataList)) / float(maxNumberOfValuesBeforeDelimiter))) or 1 # Create a list of string delimiters to use when joining the strings commaList = ["," for d in dataList] commaList[-1] = "" dataList = [str(d) + commaList[i] for i, d in enumerate(dataList)] for splitList in iterables.split(dataList, n=numRows, padWith=""): formattedString += " ".join(splitList) + delimiter return formattedString def plotMatrix( matrix, fName, minV=None, maxV=None, show=False, title=None, xlabel=None, ylabel=None, xticks=None, yticks=None, cmap=None, figsize=None, ): """Plots a matrix.""" import matplotlib import matplotlib.pyplot as plt if figsize: plt.figure(figsize=figsize) else: plt.figure() if cmap is None: cmap = plt.cm.jet cmap.set_bad("w") try: matrix = matrix.todense() except Exception: pass if minV: norm = matplotlib.colors.Normalize(minV, maxV) else: norm = None if title is None: title = fName # or bicubic or nearest#,vmin=0, vmax=300) plt.imshow(matrix, cmap=cmap, norm=norm, interpolation="nearest") plt.colorbar() plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) if xticks: plt.xticks(*xticks, rotation=90) if yticks: plt.yticks(*yticks) plt.grid() plt.savefig(fName) if show: plt.show() plt.close() def userName() -> str: """ Return a database-friendly username. This will return the current user's username, removing any prefix like ``pre-``, if present. Notes ----- ARMI uses the user name in a number of places, namely in the database names, which cannot contain hyphens. """ return re.sub("^[a-zA-Z]-", "", getpass.getuser()) class MergeableDict(dict): """ Overrides python dictionary and implements a merge method. Notes ----- Allows multiple dictionaries to be combined in a single line """ def merge(self, *otherDictionaries) -> None: for dictionary in otherDictionaries: self.update(dictionary) def safeCopy(src: str, dst: str) -> None: """Check that copy operation is truly completed before continuing.""" # Convert files to OS-independence src = os.path.abspath(src) dst = os.path.abspath(dst) if os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) srcSize = os.path.getsize(src) if "win" in sys.platform: # this covers Windows ("win32") and MacOS ("darwin") shutil.copyfile(src, dst) shutil.copymode(src, dst) elif "linux" in sys.platform: cmd = f'cp "{src}" "{dst}"' os.system(cmd) else: raise OSError("Cannot perform ``safeCopy`` on files because ARMI only supports Linux, MacOs, and Windows.") waitTime = 0.01 # 10 ms maxWaitTime = 300 # 5 min totalWaitTime = 0 while True: dstSize = os.path.getsize(dst) if srcSize == dstSize: break time.sleep(waitTime) totalWaitTime += waitTime if totalWaitTime > maxWaitTime: runLog.warning( f"File copy from {dst} to {src} has failed due to exceeding a maximum wait time of {maxWaitTime / 60} " "minutes." ) return runLog.extra(f"Copied {src} -> {dst}") def safeMove(src: str, dst: str) -> None: """Check that a file has been successfully moved before continuing.""" # Convert files to OS-independence src = os.path.abspath(src) dst = os.path.abspath(dst) if os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) srcSize = os.path.getsize(src) if "win" in sys.platform: # this covers Windows ("win32") and MacOS ("darwin") shutil.move(src, dst) elif "linux" in sys.platform: cmd = f'mv "{src}" "{dst}"' os.system(cmd) else: raise OSError("Cannot perform ``safeMove`` on files because ARMI only supports " + "Linux, MacOS, and Windows.") waitTime = 0.01 # 10 ms maxWaitTime = 6000 # 1 min totalWaitTime = 0 while True: try: dstSize = os.path.getsize(dst) if srcSize == dstSize: break except FileNotFoundError: pass time.sleep(waitTime) totalWaitTime += waitTime if totalWaitTime > maxWaitTime: runLog.warning( f"File move from {dst} to {src} has failed due to exceeding a maximum wait time of {maxWaitTime / 60} " "minutes." ) return runLog.extra(f"Moved {src} -> {dst}") return dst ================================================ FILE: armi/utils/asciimaps.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ASCII maps are little grids of letters/numbers that represent some kind of a lattice. These are commonly used in nuclear analysis to represent core maps, pin layouts, etc. in input files. This module reads various text and interprets them into meaningful data structures. We make classes for different geometries to share code. This will eventually be expanded for various symmetries that are applicable to cores, assemblies, etc. This is as attempted reimplementation of AsciiMaps aiming for simplicity, though inherently this work is complex. Some vocabulary used here: column, line column and line numbers in the actual ascii text representation. What you would see in a text editor. offset The number of spaces needed at the beginning a line to properly orient the ascii representation. i, j Indices in the grid itself. For Cartesian, j is like the line number, but in other geometries (like hex), it is a totally different coordinate system. See Also -------- armi.reactor.grids : More powerful, nestable lattices with specific dimensions Most input lattices eventually end up as Grid objects. armi.reactor.blueprints.latticeBlueprint : user input of generic lattices armi.reactor.geometry : a specific usage of lattices, for core maps """ import re from typing import Union from armi import runLog from armi.reactor import geometry PLACEHOLDER = "-" class AsciiMap: """ Base class for maps. These should be able to read and write ASCII maps loaded either from text or programmatically with i,j / specifiers. """ def __init__(self): self.asciiLines = [] """A list of lines, each containing a list of ascii labels for each column. No blanks.""" self.asciiOffsets = [] """A list of offset integers for each line above that will be prepended before the contents of asciiLines""" self.asciiLabelByIndices = {} """A mapping from grid location objects to ascii labels""" self._spacer = " " """Individual spacing for one 'item' of ascii""" self._placeholder = PLACEHOLDER """Placeholder for blank data. Also holds the size of ascii window for each value""" self._asciiMaxCol = 0 """max number of text columns in text representation""" self._asciiMaxLine = 0 """max number of text lines in text representation""" self._ijMax = 0 """max num of i+j indices (max(i) + max(j)), needed mostly for hex""" self._asciiLinesOffCorner = 0 """Number of ascii lines chopped of corners""" self.endsWithPlaceholder = False """Handling a special case where we don't want to trim a trailing placeholder from a ASCII map.""" def writeAscii(self, stream): """Write out the ascii representation.""" stream.write(self.__str__()) def __str__(self): """Build the human-readable ASCII string representing the lattice map. This method is useful for quickly printing out a lattice map. Returns ------- str : The custom ARMI ASCII-art-style string representing the map. """ # Do some basic validation if not self.asciiLines: raise ValueError("Cannot write ASCII map before ASCII lines are processed.") if len(self.asciiOffsets) != len(self.asciiLines): runLog.error(f"AsciiLines: {self.asciiLines}") runLog.error(f"Offsets: {self.asciiOffsets}") raise ValueError(f"Inconsistent lines ({len(self.asciiLines)}) and offsets ({len(self.asciiOffsets)})") # Finally, build the string representation. txt = "" fmt = f"{{val:{len(self._placeholder)}s}}" for offset, line in zip(self.asciiOffsets, self.asciiLines): data = [fmt.format(val=v) for v in line] line = self._spacer * offset + self._spacer.join(data) + "\n" txt += line return txt def readAscii(self, text): """ Read ascii representation from a stream. Update placeholder size according to largest thing read. Parameters ---------- text : str Custom string that describes the ASCII map of the core. """ text = text.strip().splitlines() self.endsWithPlaceholder = text[-1].rstrip().endswith(PLACEHOLDER) self.asciiLines = [] self._asciiMaxCol = 0 for li, line in enumerate(text): columns = line.split() self.asciiLines.append(columns) if len(columns) > self._asciiMaxCol: self._asciiMaxCol = len(columns) self._asciiMaxLine = li + 1 self._updateDimensionsFromAsciiLines() self._asciiLinesToIndices() self._makeOffsets() self._updateSlotSizeFromData() def _updateSlotSizeFromData(self): """After reading data, update slot size for writing.""" slotSize = max(len(v) for v in self.asciiLabelByIndices.values()) self._spacer = " " * slotSize fmt = f"{{placeholder:{slotSize}s}}" self._placeholder = fmt.format(placeholder=PLACEHOLDER) def _updateDimensionsFromAsciiLines(self): """ When converting ascii to data we need to infer the ijMax before reading the ij indices. See Also -------- _updateDimensionsFromData : used to infer this information when loading from i,j data """ raise NotImplementedError def _updateDimensionsFromData(self): """ Before converting data to ascii, inspect the data and set some map dimensions. See Also -------- _updateDimensionsFromAsciiLines : used when reading info from ascii lines """ self._ijMax = max(sum(key) for key in self.asciiLabelByIndices) @staticmethod def fromReactor(reactor): """Populate mapping from a reactor in preparation of writing out to ascii.""" raise NotImplementedError def _getLineNumsToWrite(self): """ Get order of lines to write. Most maps index from bottom to top. """ return reversed(range(self._asciiMaxLine)) def gridContentsToAscii(self): """ Convert a prepared asciiLabelByIndices to ascii lines and offsets. This is used when you have i,j/specifier data and want to create a ascii map from it as opposed to reading a ascii map from a stream. As long as the map knows how to convert lineNum and colNums into ij indices, this is universal. In some implementations, this operation is in a different method for efficiency. """ self._updateDimensionsFromData() self.asciiLines = [] for lineNum in self._getLineNumsToWrite(): line = [] for colNum in range(self._asciiMaxCol): ij = self._getIJFromColRow(colNum, lineNum) # convert to string and strip any whitespace in thing we're representing line.append(str(self.asciiLabelByIndices.get(ij, PLACEHOLDER)).replace(" ", "")) self.asciiLines.append(line) # clean data noDataLinesYet = True # handle all-placeholder rows newLines = [] lastLine = len(self.asciiLines) - 1 for i, line in enumerate(self.asciiLines): if re.search(f"^[{PLACEHOLDER}]+$", "".join(line)) and noDataLinesYet: continue noDataLinesYet = False newLine = self._removeTrailingPlaceholders(line) if newLine: if i == lastLine and self.endsWithPlaceholder and newLine[-1] != PLACEHOLDER: newLine.append(PLACEHOLDER) newLines.append(newLine) else: # If entire newline is wiped out, it's a full row of placeholders. That seems wrong. raise ValueError("Cannot write asciimaps with blank rows from pure data.") if not newLines: raise ValueError("No data found") self.asciiLines = newLines self._updateSlotSizeFromData() self._makeOffsets() @staticmethod def _removeTrailingPlaceholders(line): newLine = [] noDataYet = True for col in reversed(line): if col == PLACEHOLDER and noDataYet: continue noDataYet = False newLine.append(col) newLine.reverse() return newLine def _asciiLinesToIndices(self): """Convert read in ASCII lines to a asciiLabelByIndices structure.""" def _getIJFromColRow(self, columnNum: int, lineNum: int) -> tuple: """Get ij data indices from ascii map text coords.""" raise NotImplementedError def __getitem__(self, ijKey): """Get ascii item by grid i,j index.""" return self.asciiLabelByIndices[ijKey] def __setitem__(self, ijKey, item): self.asciiLabelByIndices[ijKey] = item def _makeOffsets(self): """Build offsets.""" raise NotImplementedError def items(self): return self.asciiLabelByIndices.items() def keys(self): return self.asciiLabelByIndices.keys() class AsciiMapCartesian(AsciiMap): """ Cartesian ascii map. Conveniently simple because offsets are always 0 i and j are equal to column, row """ def _asciiLinesToIndices(self): self.asciiLabelByIndices = {} # read from bottom to top to be consistent # with cartesian grid indexing for li, line in enumerate(reversed(self.asciiLines)): for ci, asciiLabel in enumerate(line): ij = self._getIJFromColRow(ci, li) self.asciiLabelByIndices[ij] = asciiLabel def _updateDimensionsFromData(self): AsciiMap._updateDimensionsFromData(self) self._asciiMaxCol = max(key[0] for key in self.asciiLabelByIndices) + 1 self._asciiMaxLine = max(key[1] for key in self.asciiLabelByIndices) + 1 iMin = min(key[0] for key in self.asciiLabelByIndices) jMin = min(key[1] for key in self.asciiLabelByIndices) if iMin > 0 or jMin > 0: raise ValueError( "Asciimaps only supports sets of indices that start at less than or equal to zero, got {}, {}".format( iMin, jMin ) ) def _getIJFromColRow(self, columnNum, lineNum): return columnNum, lineNum def _makeOffsets(self): """Cartesian grids have 0 offset on all lines.""" self.asciiOffsets = [] for _line in self.asciiLines: self.asciiOffsets.append(0) def _updateDimensionsFromAsciiLines(self): pass class AsciiMapHexThirdFlatsUp(AsciiMap): """ Hex ascii map for 1/3 core flats-up map. - Indices start with (0,0) in the bottom left (origin). - i increments on the 30-degree ray - j increments on the 90-degree ray In all flats-up hex maps, i increments by 2*col for each col and j decrements by col from the base. These are much more complex maps than the tips up ones because there are 2 ascii lines for every j index (jaggedly). Lines are read from the bottom of the ascii map up in this case. """ def _asciiLinesToIndices(self): self.asciiLabelByIndices = {} # read from bottom to top so we know that first item is at i,j = 0,0 for li, line in enumerate(reversed(self.asciiLines)): iBase, jBase = self._getIJBaseByAsciiLine(li) for ci, asciiLabel in enumerate(line): ij = self._getIJFromColAndBase(ci, iBase, jBase) self.asciiLabelByIndices[ij] = asciiLabel def _getIJBaseByAsciiLine(self, asciiLineNum): """ Get i,j base (starting point) for a row from bottom. These are the indices of the far-left item in a row as a function of line number from the bottom. These are used in the process of computing the indices of items while reading the ascii map. For 1/3 symmetric cases, the base is a constant pattern vs. row number at least until the top section where the hexagon comes off the 1/3 symmetry line. The base hexes (LHS) as a function of rows from bottom are: Row: 0 1 2 3 4 5 6 7 8 9 10 11 12 Base: (0,0), (1,0) (0,1), (1,1), (0,2), (-1,3), (0,3), (-1,4), (-2,5), (-1,5), (-2,6), (-3,7) (-2,7) Looking graphically, there are basically 3 rays going up at 120 degrees. So we can find a consistent pattern for each ray and use a modulus to figure out which ray we're on. """ if asciiLineNum == 0: return 0, 0 rayNum = (asciiLineNum - 1) % 3 indexOnRay = (asciiLineNum - 1) // 3 if rayNum == 0: # middle ray: (1,0), (0,2), (-1,4), (-2,6) return 1 - indexOnRay, 2 * indexOnRay elif rayNum == 1: # leftmost ray: (0,1), (-1,3), (-2,5), ... return -indexOnRay, 2 * indexOnRay + 1 else: # innermost ray: (1,1), (0,3), (-1,5) return 1 - indexOnRay, 2 * indexOnRay + 1 def _getIJFromColAndBase(self, columnNum, iBase, jBase): """Map ascii column and base to i,j hex indices.""" # To move n columns right, i increases by 2n, j decreases by n return iBase + 2 * columnNum, jBase - columnNum def _getIJFromColRow(self, columnNum, lineNum): """ Map ascii column and row to i,j hex indices. Notes ----- Not used in reading from file b/c too many calls to base but convenient for writing from ij data """ iBase, jBase = self._getIJBaseByAsciiLine(lineNum) return self._getIJFromColAndBase(columnNum, iBase, jBase) def _makeOffsets(self): """One third hex grids have larger offsets at the bottom so the overhanging top fits.""" self.asciiOffsets = [] for li, _line in enumerate(self.asciiLines): iBase, _ = self._getIJBaseByAsciiLine(li) self.asciiOffsets.append(iBase - 1) self.asciiOffsets.reverse() # since getIJ works from bottom to top newOffsets = [] # renomalize the offsets to start at 0 minOffset = min(self.asciiOffsets) for offset in self.asciiOffsets: newOffsets.append(offset - minOffset) self.asciiOffsets = newOffsets def _updateDimensionsFromAsciiLines(self): """ Update some dimension metadata by looking at the ascii lines. In this case, asciiMaxCol actually represents the max i index. """ self._ijMax = self._asciiMaxCol - 1 self._asciiLinesOffCorner = len(self.asciiLines[-1]) - 1 def _updateDimensionsFromData(self): """ Set map dimension metadata based on populated data structure. Used before writing the asciimap from data. Add flat-hex specific corner truncation detection that allows some positions to be empty near the corners of the full hex, as is typical for hexagonal core maps. For 1/3 hex, _ijMax represents the outer outline """ AsciiMap._updateDimensionsFromData(self) # Check the j=0 ray to see how many peripheral locations are blank. # assume symmetry with the other corner. # The cap is basically the distance from the (I, 0) or (0, J) loc to self._ijMax iWithData = [i for i, j in self.asciiLabelByIndices if j == 0] maxIWithData = max(iWithData) if iWithData else -1 self._asciiLinesOffCorner = (self._ijMax - maxIWithData) * 2 - 1 # in jagged systems we have to also check the neighbor nextIWithData = [i for i, j in self.asciiLabelByIndices if j == 1] nextMaxIWithData = max(nextIWithData) if nextIWithData else -1 if nextMaxIWithData == maxIWithData - 1: # the jagged edge is lopped off too. self._asciiLinesOffCorner += 1 # now that we understand how many corner positions are truncated, # we can fully determine the size of the ascii map self._asciiMaxCol = self._ijMax + 1 self._asciiMaxLine = self._ijMax * 2 + 1 - self._asciiLinesOffCorner class AsciiMapHexFullFlatsUp(AsciiMapHexThirdFlatsUp): """ Full core flats up ascii map. Notes ----- Rather than making a consistent base, we switch base angles with this one because otherwise there would be a ridiculous number of placeholders on the left. This makes this one's base computation more complex. We also allow all corners to be cut off on these, further complicating things. """ def _getIJBaseByAsciiLine(self, asciiLineNum): """ Get i,j base (starting point) for a row from bottom. Starts out in simple pattern and then shifts. Recall that there are 2 ascii lines per j index because jagged. If hex corners are omitted, we must offset the line num to get the base right (complexity!) In this orientation, we need the _ijMax to help orient us. This represents the number of ascii lines between the center of the core and the top (or bottom) """ # handle potentially-omitted corners asciiLineNum += self._asciiLinesOffCorner if asciiLineNum < self._ijMax: # goes from (0,-9), (-1,-8), (-2,7)... i, j = -asciiLineNum, -self._ijMax + asciiLineNum elif not (asciiLineNum - self._ijMax) % 2: # goes JAGGED from (-9,0), (-8, 0), (-9,2)... # this is the outermost upward ray index = (asciiLineNum - self._ijMax) // 2 i, j = -self._ijMax, index else: # this is the innermost upward ray index = (asciiLineNum - self._ijMax) // 2 i, j = -self._ijMax + 1, index return i, j def _makeOffsets(self): """ Handle offsets for full-hex flat grids. Due to the staggered nature, these have 0 or 1 offsets on top and then 0 or 1 + an actual offset on the bottom. """ # max lines required if corners were not cut off maxIJIndex = self._ijMax self.asciiOffsets = [] # grab top left edge going down until corner where it lifts off edge. # Due to the placeholders these just oscillate for li in range(maxIJIndex * 3): self.asciiOffsets.append((li - self._asciiLinesOffCorner) % 2) # going away from the left edge, the offsets increase linearly self.asciiOffsets.extend(range(maxIJIndex + 1)) # since we allow cut-off corners, we must truncate the offsets number of items in last line indicates how many # need to be cut. (first line has placeholders...) cutoff = self._asciiLinesOffCorner if cutoff: self.asciiOffsets = self.asciiOffsets[cutoff:-cutoff] def _updateDimensionsFromData(self): AsciiMapHexThirdFlatsUp._updateDimensionsFromData(self) self._asciiMaxCol = self._ijMax + 1 self._asciiMaxLine = self._ijMax * 4 + 1 - self._asciiLinesOffCorner * 2 class AsciiMapHexFullTipsUp(AsciiMap): """ Full hex with tips up of the smaller cells. - I axis is pure horizontal here - J axis is 60 degrees up. (upper right corner) - (0,0) is in the center of the hexagon. Frequently used for pins inside hex assemblies. This does not currently support omitted positions on the hexagonal corners. In this geometry, the outline-defining _ijMax is equal to I at the far right of the hex. Thus, ijMax represents the number of positions from the center to the outer edge towards any of the 6 corners. """ def _asciiLinesToIndices(self): """Read lines in from top to bottom.""" self.asciiLabelByIndices = {} for li, line in enumerate(self.asciiLines): iBase, jBase = self._getIJBaseByAsciiLine(li) for ci, asciiLabel in enumerate(line): ij = self._getIJFromColAndBase(ci, iBase, jBase) self.asciiLabelByIndices[ij] = asciiLabel self.asciiOffsets.append(li) def _getIJFromColAndBase(self, columnNum, iBase, jBase): """ Map ascii column and base to i,j hex indices. Indices simply increment from the base across the rows. """ return iBase + columnNum + jBase, -(iBase + columnNum) def _getIJFromColRow(self, columnNum, lineNum): """ Map indices from ascii. Notes ----- Not used in reading from file b/c inefficient/repeated base calc but required for writing from ij data. """ iBase, jBase = self._getIJBaseByAsciiLine(lineNum) return self._getIJFromColAndBase(columnNum, iBase, jBase) def _getIJBaseByAsciiLine(self, asciiLineNum): """ Get i,j base (starting point) for a row counting from the top. Upper left is shifted by (size-1)//2 for a 19-line grid, we have the top left as (-18,9) and then: (-17, 8), (-16, 7), ... """ shift = self._ijMax iBase = -shift * 2 + asciiLineNum jBase = shift - asciiLineNum return iBase, jBase def _updateDimensionsFromAsciiLines(self): """Update dimension metadata when reading ascii.""" # ijmax here can be inferred directly from the max number of columns in the asciimap text self._ijMax = (self._asciiMaxCol - 1) // 2 def _updateDimensionsFromData(self): """Update asciimap dimensions from data before writing ascii.""" AsciiMap._updateDimensionsFromData(self) self._asciiMaxCol = self._ijMax * 2 + 1 self._asciiMaxLine = self._ijMax * 2 + 1 def _getLineNumsToWrite(self): """ Get order of lines to write. This map indexes lines from top to bottom. """ return range(self._asciiMaxLine) def _makeOffsets(self): """Full hex tips-up grids have linearly incrementing offset.""" self.asciiOffsets = [] for li, _line in enumerate(self.asciiLines): self.asciiOffsets.append(li) def asciiMapFromGeomAndDomain( geomType: Union[str, geometry.GeomType], domain: Union[str, geometry.DomainType] ) -> "AsciiMap": """Get a ASCII map class from a geometry and domain type.""" from armi.reactor import geometry if ( str(geomType) == geometry.HEX_CORNERS_UP and geometry.DomainType.fromAny(domain) == geometry.DomainType.FULL_CORE ): return AsciiMapHexFullTipsUp mapFromGeom = { ( geometry.GeomType.HEX, geometry.DomainType.THIRD_CORE, ): AsciiMapHexThirdFlatsUp, (geometry.GeomType.HEX, geometry.DomainType.FULL_CORE): AsciiMapHexFullFlatsUp, (geometry.GeomType.CARTESIAN, None): AsciiMapCartesian, (geometry.GeomType.CARTESIAN, geometry.DomainType.FULL_CORE): AsciiMapCartesian, ( geometry.GeomType.CARTESIAN, geometry.DomainType.QUARTER_CORE, ): AsciiMapCartesian, } return mapFromGeom[ ( geometry.GeomType.fromAny(geomType), geometry.DomainType.fromAny(domain), ) ] ================================================ FILE: armi/utils/codeTiming.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities related to profiling code.""" import copy import functools import os import time def timed(*args): """ Decorate functions to measure how long they take. Examples -------- Here are some examples of using this method:: @timed # your timer will be called the module+method name def mymethod(stuff): do stuff @timed('call my timer this instead') def mymethod2(stuff) do even more stuff """ def time_decorator(func): @functools.wraps(func) def time_wrapper(*args, **kwargs): generated_name = "::".join( [ os.path.split(func.__code__.co_filename)[1], str(func.__code__.co_firstlineno), func.__code__.co_name, ] ) MasterTimer.startTimer(label or generated_name) return_value = func(*args, **kwargs) MasterTimer.endTimer(label or generated_name) return return_value return time_wrapper if len(args) == 1 and callable(args[0]): label = None return time_decorator(args[0]) elif len(args) == 1 and isinstance(args[0], str): label = args[0] return time_decorator else: raise ValueError(f"The timed decorator has been misused. Input args were {args}") class MasterTimer: """A code timing interface, this class is designed to be a singleton.""" _instance = None def __init__(self): if MasterTimer._instance is not None: raise RuntimeError( "{} is a pseudo singleton, do not attempt to make more than one.".format(self.__class__.__name__) ) MasterTimer._instance = self self.timers = {} self.start_time = time.time() self.end_time = None @staticmethod def getMasterTimer(): """Primary method that users need get access to the MasterTimer singleton.""" if MasterTimer._instance is None: MasterTimer() return MasterTimer._instance @staticmethod def getTimer(eventName): """Return a timer with no special action take. ``with timer: ...`` friendly! """ master = MasterTimer.getMasterTimer() if eventName in master.timers: timer = master.timers[eventName] else: timer = _Timer(eventName, False) master.timers[eventName] = timer return timer @staticmethod def startTimer(eventName): """Return a timer with a start call, or a newly made started timer. ``with timer: ...`` unfriendly! """ master = MasterTimer.getMasterTimer() if eventName in master.timers: timer = master.timers[eventName] timer.start() else: timer = _Timer(eventName, True) master.timers[eventName] = timer return timer @staticmethod def endTimer(eventName): """Return a timer with a stop call, or a newly made unstarted timer. ``with timer: ...`` unfriendly! """ master = MasterTimer.getMasterTimer() if eventName in master.timers: timer = master.timers[eventName] timer.stop() else: timer = _Timer(eventName, False) master.timers[eventName] = timer return timer @staticmethod def time(): """System time offset by when this master timer was initialized.""" master = MasterTimer.getMasterTimer() if master.end_time: return master.end_time - master.start_time else: return time.time() - master.start_time @staticmethod def startAll(): """Starts all timers, won't work after a stopAll command.""" master = MasterTimer.getMasterTimer() for timer in master.timers.values(): timer.start() @staticmethod def stopAll(): """Kills the timer run, can't easily be restarted.""" master = MasterTimer.getMasterTimer() for timer in master.timers.values(): timer.overStart = 0 # deal with what recursion may have caused timer.stop() _Timer._frozen = True master.end_time = time.time() @staticmethod def getActiveTimers(): """Get all the timers for processes that are still active.""" master = MasterTimer.getMasterTimer() return [t for t in master.timers.values() if t.isActive] def __str__(self): t = self.time() return "{:55s} {:>14.2f} {:>14.2f} {:11}".format("TOTAL TIME", t, t, 1) @staticmethod def report(inclusionCutoff=0.1, totalTime=False): """ Write a string report of the timers. This report prints a table that looks something like this: TIMER REPORTS CUMULATIVE (s) AVERAGE (s) NUM ITERS thing1 0.01 0.01 1 thing2 0.01 0.01 1 TOTAL TIME 0.02 0.02 1 Parameters ---------- inclusionCutoff : float, optional Will not show results that have less than this fraction of the total time. totalTime : bool, optional Use the ratio of total time or time since last report to compare against the cutoff. See Also -------- armi.utils.codeTiming._Timer.__str__ : prints out the results for each individual line item Returns ------- str : Plain-text table report on the timers. """ master = MasterTimer.getMasterTimer() table = [ "{:55s} {:^15} {:^15} {:9}".format( "TIMER REPORTS", "CUMULATIVE (s)", "AVERAGE (s)", "NUM ITERS".rjust(9, " "), ) ] for timer in sorted(master.timers.values(), key=lambda x: x.time): if totalTime: timeRatio = timer.time / master.time() else: timeRatio = timer.timeSinceReport / master.time() if timeRatio < inclusionCutoff: continue table.append(str(timer)) # add the total time as the last row table.append(str(master)) return "\n".join(table) @staticmethod def timeline(baseFileName, inclusionCutoff=0.1, totalTime=False): """Produces a timeline graphic of the timers. Parameters ---------- baseFileName : str Whatever the leading file path should be. This method generates the same file extension for every image to add to the base. inclusionCutoff : float, optional Will not show results that have less than this fraction of the total time. totalTime : bool, optional Use the ratio of total time or time since last report to compare against the cutoff. Returns ------- str : Path to the saved plot file. """ import matplotlib.pyplot as plt import numpy as np # initial set up master = MasterTimer.getMasterTimer() curTime = master.time() color_map = plt.cm.jet colors = [] names = [] xStarts = [] xStops = [] yLevel = 0 # height of the timelines yValues = [] # list of heights # plot content gather for timer in sorted(master.timers.values(), key=lambda x: x.name): if totalTime: timeRatio = timer.time / master.time() else: timeRatio = timer.timeSinceReport / master.time() if timeRatio < inclusionCutoff: continue yLevel += 1 names.append(timer.name) for timePair in timer.times: colors.append(color_map(timeRatio)) xStarts.append(timePair[0]) xStops.append(timePair[1]) yValues.append(yLevel) # plot set up: might not be necessary to scale the width with the height like this plt.figure(figsize=(3 + len(master.timers.values()), (3 + len(master.timers.values())))) plt.axis([0.0, curTime, 0.0, yLevel + 1]) plt.xlabel("Time (s)") plt.yticks(np.arange(yLevel + 1), [""] + names) _loc, labels = plt.yticks() for tick in labels: tick.set_fontsize(40) plt.tight_layout() # plot content draw plt.hlines(yValues, xStarts, xStops, colors) def flatMerge(l1, l2=None): # duplicate a list flatly or merge them flatly (no tuples compared to zip) return [item for sublist in zip(l1, l2 or l1) for item in sublist] ymin = [y - 0.3 for y in yValues] ymax = [y + 0.3 for y in yValues] plt.vlines( flatMerge(xStarts, xStops), flatMerge(ymin), flatMerge(ymax), flatMerge(colors), ) # save and close filename = f"{baseFileName}.code-timeline.png" plt.savefig(filename) plt.close() return os.path.join(os.getcwd(), filename) class _Timer: """Code timer to call at various points to measure performance. See Also -------- MasterTimer.getTimer() for construction """ # If the master timer stops, all timers must freeze with no thaw. _frozen = False def __init__(self, name, start): self.name = name self._active = False self._times = [] # [(start, end), (start, end)...] self.overStart = 0 # necessary for recursion tracking self.reportedTotal = 0.0 # time elapsed since last asked to report time in __str__ if start: self.start() def __repr__(self): return "<{} name:'{}' num iterations:{} time:{}>".format( self.__class__.__name__, self.name, self.numIterations, self.time ) def __str__(self): s = "{:55s} {:>14.2f} {:>14.2f} {:11}".format( self.name[:55], self.time, self.time / (self.numIterations + 1), self.numIterations + 1, ) # needs to come after str generation because it resets the timeSinceReport self.reportedTotal = self.time return s def __enter__(self): self.start() def __exit__(self, *args, **kwargs): self.stop() @property def isActive(self): """Return True if the code for this timer still running.""" return self._active @property def numIterations(self): """If this number seems high, remember .start() twice in a row adds an iteration to numIterations.""" return len(self._times) - 1 if self._times else 0 @property def time(self): """Total time value.""" return sum([t[1] - t[0] for t in self.times]) @property def timeSinceReport(self): """The elapsed time since this timer was asked to report itself.""" return self.time - self.reportedTotal @property def times(self): """List of time start / stop pairs, if active the current time is used as the last stop.""" if self.isActive: times = copy.deepcopy(self._times) times[-1] = (self._times[-1][0], MasterTimer.time()) return times else: return self._times def _openTimePair(self, curTime): self._times.append((curTime, None)) def _closeTimePair(self, curTime): self._times[-1] = (self._times[-1][0], curTime) def start(self): """Start this Timer. Returns ------- float : Time stamp for the current time / start time. """ curTime = MasterTimer.time() if self._frozen: return curTime elif self.isActive: # call was made on an active timer, we're now over-started self.overStart += 1 self._closeTimePair(curTime) self._active = True self._openTimePair(curTime) return curTime def stop(self): """Stop this Timer. Returns ------- float : Time stamp for the current time / stop time. """ curTime = MasterTimer.time() if self._frozen: return curTime if self.overStart: # can't end the timer as it's over-started self.overStart -= 1 elif self.isActive: self._active = False self._closeTimePair(curTime) return curTime ================================================ FILE: armi/utils/customExceptions.py ================================================ # Copyright 2021 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Globally accessible exception definitions for better granularity on exception behavior and exception handling behavior. """ from inspect import getframeinfo, stack from armi import runLog def info(func): """Decorator to write to current log, using the info method.""" def decorated(*args, **kwargs): r"""Decorated method.""" runLog.info(func(*args, **kwargs)) return decorated def important(func): """Decorator to write to current log, using the important method.""" def decorated(*args, **kwargs): """Decorated method.""" runLog.important(func(*args, **kwargs)) return decorated def warn(func): """Decorates a method to produce a repeatable warning message.""" def decorated(*args, **kwargs): """Decorated method.""" runLog.warning(func(*args, **kwargs)) return decorated def _message_when_root(func): """Do not use this decorator.""" def decorated(*args, **kwargs): from armi import MPI_RANK if MPI_RANK == 0: func(*args, **kwargs) return decorated def warn_when_root(func): """Decorates a method to produce a warning message only on the root node.""" return _message_when_root(warn(func)) # --------------------------------------------------- class InputError(Exception): """An error found in an ARMI input file.""" def __init__(self, msg): self.msg = msg self.caller = getframeinfo(stack()[1][0]) def __str__(self): # Check if the call site is sensible enough to warrant printing. # In the past, we assumed cython would wrap the fake stack filename in <> callSiteIsFake = self.caller.filename.startswith("<") and self.caller.filename.endswith(">") if callSiteIsFake: return self.msg else: return self.caller.filename + ":" + str(self.caller.lineno) + " - " + self.msg # --------------------------------------------------- class SettingException(Exception): """Standardize behavior of setting-family errors.""" def __init__(self, msg): Exception.__init__(self, msg) class InvalidSettingsStopProcess(SettingException): """ Exception raised when setting file contains invalid settings and user aborts or process is uninteractive. """ def __init__(self, reader): msg = "Input settings file {}".format(reader.inputPath) if reader.liveVersion != reader.inputVersion: msg += ( '\n\twas made with version "{0}" which differs from the current version "{1}." ' 'Either create the input file with the "{1}", or switch to a development version ' "of ARMI.".format(reader.inputVersion, reader.liveVersion) ) if reader.invalidSettings: msg += "\n\tcontains the following {} invalid settings:\n\t\t{}".format( len(reader.invalidSettings), "\n\t\t".join(reader.invalidSettings) ) SettingException.__init__(self, msg) class NonexistentSetting(SettingException): """Exception raised when a non existent setting is asked for.""" def __init__(self, setting): SettingException.__init__(self, "Attempted to locate non-existent setting {}.".format(setting)) class InvalidSettingsFileError(SettingException): """Not a valid settings file.""" def __init__(self, path, customMsgEnd=""): msg = "Attempted to load an invalid settings file from: {}. ".format(path) msg += customMsgEnd SettingException.__init__(self, msg) class NonexistentSettingsFileError(SettingException): """Settings file does not exist.""" def __init__(self, path): SettingException.__init__(self, "Attempted to load settings file, cannot locate file: {}".format(path)) ================================================ FILE: armi/utils/densityTools.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Assorted utilities to help with basic density calculations.""" from typing import Dict, List, Tuple, Union import numpy as np from armi import runLog from armi.nucDirectory import elements, nucDir, nuclideBases from armi.utils import units def getNDensFromMasses(rho, massFracs, normalize=False): """ Convert density (g/cc) and massFracs vector into a number densities vector (#/bn-cm). .. impl:: Number densities are retrievable from masses. :id: I_ARMI_UTIL_MASS2N_DENS :implements: R_ARMI_UTIL_MASS2N_DENS Loops over all provided nuclides (given as keys in the ``massFracs`` vector) and calculates number densities of each, at a given material ``density``. Mass fractions can be provided either as normalized to 1, or as unnormalized with subsequent normalization calling ``normalizeNuclideList`` via the ``normalize`` flag. Parameters ---------- rho : float density in (g/cc) massFracs : dict vector of mass fractions -- normalized to 1 -- keyed by their nuclide name Returns ------- nuclides : np.ndarray[np.bytes_] vector of nuclide names as byte strings numberDensities : np.ndarray[np.float64] vector of number densities (#/bn-cm) for each nuclide in nuclides """ if normalize: massFracs = normalizeNuclideList(massFracs, normalization=normalize) nuclides = [] numberDensities = [] rho = rho * units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM for nucName, massFrac in massFracs.items(): atomicWeight = nuclideBases.byName[nucName].weight nuclides.append(nucName.encode()) numberDensities.append(massFrac * rho / atomicWeight) return np.array(nuclides), np.array(numberDensities) def getMassFractions(numberDensities): """ Convert number densities (#/bn-cm) into mass fractions. Parameters ---------- numberDensities : dict number densities (#/bn-cm) keyed by their nuclide name Returns ------- massFracs : dict mass fractions -- normalized to 1 -- keyed by their nuclide name """ nucMassFracs = {} totalWeight = 0.0 for nucName, numDensity in numberDensities.items(): weightI = numDensity * nucDir.getAtomicWeight(nucName) nucMassFracs[nucName] = weightI # will be normalized at end totalWeight += weightI if totalWeight != 0: for nucName in numberDensities: nucMassFracs[nucName] /= totalWeight else: for nucName in numberDensities: nucMassFracs[nucName] = 0.0 return nucMassFracs def calculateMassDensity(numberDensities): """ Calculates the mass density. Parameters ---------- numberDensities : dict vector of number densities (atom/bn-cm) indexed by nuclides names Returns ------- rho : float density in (g/cc) """ rho = 0 for nucName, nDensity in numberDensities.items(): atomicWeight = nuclideBases.byName[nucName].weight rho += nDensity * atomicWeight / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM return rho def calculateNumberDensity(nucName, mass, volume): """ Calculates the number density. Parameters ---------- mass : float volume : volume nucName : armi nuclide name -- e.g. 'U235' Returns ------- number density : float number density (#/bn-cm) See Also -------- armi.reactor.blocks.Block.setMass """ A = nucDir.getAtomicWeight(nucName) try: return units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * mass / (volume * A) except ZeroDivisionError: if mass == 0 and volume == 0: return 0 raise ValueError( "Could not calculate number density with input.\nmass : {}\nvolume : {}\natomic weight : {}\n".format( mass, volume, A ) ) def getMassInGrams(nucName: str, volume: float, numberDensity: Union[float, None] = None) -> float: """ Gets mass of a nuclide of a known volume and know number density. Parameters ---------- nucName name of nuclide -- e.g. 'U235' volume volume in (cm3) numberDensity number density in (at/bn-cm) Returns ------- mass mass of nuclide (g) """ if not numberDensity: return 0.0 A = nucDir.getAtomicWeight(nucName) return numberDensity * volume * A / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM def formatMaterialCard( densities, matNum=0, minDens=1e-15, sigFigs=8, mcnp6Compatible=False, mcnpLibrary=None, ): """ Formats nuclides and densities into a MCNP material card. .. impl:: Create MCNP material card. :id: I_ARMI_UTIL_MCNP_MAT_CARD :implements: R_ARMI_UTIL_MCNP_MAT_CARD Loops over a vector of nuclides (of type ``nuclideBase``) provided in ``densities`` and formats them into a list of strings consistent with MCNP material card syntax, skipping dummy nuclides and LFPs. A ``matNum`` may optionally be provided for the created material card: if not provided, it is left blank. The desired number of significant figures for the created card can be optionally provided by ``sigFigs``. Nuclides whose number density falls below a threshold (optionally specified by ``minDens``) are set to the threshold value. The boolean ``mcnp6Compatible`` may optionally be provided to include the nuclide library at the end of the vector of individual nuclides using the "nlib=" syntax leveraged by MCNP. If this boolean is turned on, the associated value ``mcnpLibrary`` should generally also be provided, as otherwise, the library will be left blank in the resulting material card string. Parameters ---------- densities : dict number densities indexed by nuclideBase matNum : int mcnp material number minDens : float minimum density sigFigs : int significant figures for the material card Returns ------- mCard : list list of material card strings """ if all(isinstance(nuc, (nuclideBases.LumpNuclideBase, nuclideBases.DummyNuclideBase)) for nuc in densities): return [] # no valid nuclides to write if matNum >= 0: mCard = ["m{matNum}\n".format(matNum=matNum)] else: mCard = ["m{}\n"] for nuc, dens in sorted(densities.items()): # skip LFPs and Dummies. if isinstance(nuc, (nuclideBases.LumpNuclideBase)): runLog.important("The material card returned will ignore LFPs.", single=True) continue elif isinstance(nuc, nuclideBases.DummyNuclideBase): runLog.info("Omitting dummy nuclides such as {}".format(nuc), single=True) continue mcnpNucName = nuc.getMcnpId() newEntry = (" {nucName:5d} {ndens:." + str(sigFigs) + "e}\n").format( nucName=int(mcnpNucName), ndens=max(dens, minDens) ) # 0 dens is invalid mCard.append(newEntry) if mcnp6Compatible: mCard.append(" nlib={lib}c\n".format(lib=mcnpLibrary)) return mCard def filterNuclideList(nuclideVector, nuclides): """ Filter out nuclides not in the nuclide vector. Parameters ---------- nuclideVector : dict dictionary of values indexed by nuclide identifiers -- e.g. nucNames or nuclideBases nuclides : list list of nuclide identifiers Returns ------- nuclideVector : dict dictionary of values indexed by nuclide identifiers -- e.g. nucNames or nuclideBases """ if not isinstance(list(nuclideVector.keys())[0], nuclides[0].__class__): raise ValueError( "nuclide vector is indexed by {} where as the nuclides list is {}".format( nuclideVector.keys()[0].__class__, nuclides[0].__class__ ) ) for nucName in list(nuclideVector.keys()): if nucName not in nuclides: del nuclideVector[nucName] return nuclideVector def normalizeNuclideList(nuclideVector, normalization=1.0): """ Normalize the nuclide vector. Parameters ---------- nuclideVector : dict dictionary of values -- e.g. floats, ints -- indexed by nuclide identifiers -- e.g. nucNames or nuclideBases normalization : float Returns ------- nuclideVector : dict dictionary of values indexed by nuclide identifiers -- e.g. nucNames or nuclideBases """ normalizationFactor = sum(nuclideVector.values()) / normalization for nucName, mFrac in nuclideVector.items(): nuclideVector[nucName] = mFrac / normalizationFactor return nuclideVector def expandElementalMassFracsToNuclides( massFracs: dict, elementExpansionPairs: Tuple[elements.Element, List[nuclideBases.NuclideBase]], ): """ Expand elemental mass fractions to natural nuclides. Modifies the input ``massFracs`` in place to contain nuclides. Notes ----- This indirectly updates number densities through mass fractions. .. impl:: Expand mass fractions to nuclides. :id: I_ARMI_UTIL_EXP_MASS_FRACS :implements: R_ARMI_UTIL_EXP_MASS_FRACS Given a vector of elements and nuclides with associated mass fractions (``massFracs``), expands the elements in-place into a set of nuclides using ``expandElementalNuclideMassFracs``. Isotopes to expand into are provided for each element by specifying them with ``elementExpansionPairs``, which maps each element to a list of particular NuclideBases; if left unspecified, all naturally-occurring isotopes are included. Explicitly specifying the expansion isotopes provides a way for particular naturally-occurring isotopes to be excluded from the expansion, e.g. excluding O-18 from an expansion of elemental oxygen. Parameters ---------- massFracs : dict(str, float) dictionary of nuclide or element names with mass fractions. Elements will be expanded in place using natural isotopics. elementExpansionPairs : (Element, [NuclideBase]) pairs element objects to expand (from nuclidBase.element) and list of NuclideBases to expand into (or None for all natural) """ # expand elements for element, isotopicSubset in elementExpansionPairs: massFrac = massFracs.pop(element.symbol, None) if massFrac is None: continue expandedNucs = expandElementalNuclideMassFracs(element, massFrac, isotopicSubset) massFracs.update(expandedNucs) total = sum(expandedNucs.values()) if massFrac > 0.0 and abs(total - massFrac) / massFrac > 1e-6: raise ValueError("Mass fractions not normalized properly {}!".format((total, massFrac))) def expandElementalNuclideMassFracs( element: elements.Element, massFrac: float, isotopicSubset: List[nuclideBases.NuclideBase] = None, ): """ Return a dictionary of nuclide names to isotopic mass fractions. If an isotopic subset is passed in, the mass fractions get scaled up s.t. the total mass fraction remains constant. Parameters ---------- element : Element The element to expand to natural isotopics massFrac : float Mass fraction of the initial element isotopicSubset : list of NuclideBases Natural isotopes to include in the expansion. Useful e.g. for excluding O18 from an expansion of Oxygen. """ elementNucBases = element.getNaturalIsotopics() if isotopicSubset: expandedNucBases = [nb for nb in elementNucBases if nb in isotopicSubset] else: expandedNucBases = elementNucBases elementalWeightGperMole = sum(nb.weight * nb.abundance for nb in expandedNucBases) if not any(expandedNucBases): raise ValueError("Cannot expand element `{}` into isotopes: `{}`".format(element, expandedNucBases)) expanded = {} for nb in expandedNucBases: expanded[nb.name] = massFrac * nb.abundance * nb.weight / elementalWeightGperMole return expanded def getChemicals(nuclideInventory): """ Groups the inventories of nuclides by their elements. Parameters ---------- nuclideInventory : dict nuclide inventories indexed by nuc -- either nucNames or nuclideBases Returns ------- chemicals : dict inventory of elements indexed by element symbol -- e.g. 'U' or 'PU' """ chemicals = {} for nuc, N in nuclideInventory.items(): nb = nuc if isinstance(nuc, nuclideBases.INuclide) else nuclideBases.byName[nuc] if nb.element.symbol in chemicals: chemicals[nb.element.symbol] += N else: chemicals[nb.element.symbol] = N return chemicals def applyIsotopicsMix(material, enrichedMassFracs: Dict[str, float], fertileMassFracs: Dict[str, float]): """ Update material heavy metal mass fractions based on its enrichment and two nuclide feeds. This will remix the heavy metal in a Material object based on the object's ``class1_wt_frac`` parameter and the input nuclide information. This can be used for inputting mixtures of two external custom isotopic feeds as well as for fabricating assemblies from two closed-cycle collections of material. See Also -------- armi.materials.material.FuelMaterial Parameters ---------- material : material.Material The object to modify. Must have a ``class1_wt_frac`` param set enrichedMassFracs : dict Nuclide names and weight fractions of the class 1 nuclides fertileMassFracs : dict Nuclide names and weight fractions of the class 2 nuclides """ total = sum(material.massFrac.values()) hm = 0.0 for nucName, massFrac in material.massFrac.items(): nb = nuclideBases.byName[nucName] if nb.isHeavyMetal(): hm += massFrac hmFrac = hm / total hmEnrich = material.class1_wt_frac for nucName in ( set(enrichedMassFracs.keys()).union(set(fertileMassFracs.keys())).union(set(material.massFrac.keys())) ): nb = nuclideBases.byName[nucName] if nb.isHeavyMetal(): material.massFrac[nucName] = hmFrac * ( hmEnrich * enrichedMassFracs.get(nucName, 0.0) + (1 - hmEnrich) * fertileMassFracs.get(nucName, 0.0) ) ================================================ FILE: armi/utils/directoryChangers.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os import pathlib import random import shutil import string from armi import context, runLog from armi.utils import pathTools, safeCopy, safeMove def _changeDirectory(destination): if os.path.exists(destination): os.chdir(destination) else: raise IOError("Cannot change directory to non-existent location: {}".format(destination)) class DirectoryChanger: r""" Utility to change directory. Use with 'with' statements to execute code in a different dir, guaranteeing a clean return to the original directory >>> with DirectoryChanger('C:\\whatever') ... pass Parameters ---------- destination : str Path of directory to change into filesToMove : list of str, optional Filenames to bring from the CWD into the destination filesToRetrieve : list of str, optional Filenames to bring back from the destination to the cwd. Note that if any of these files do not exist then the file will be skipped and a warning will be provided. dumpOnException : bool, optional Flag to tell system to retrieve the entire directory if an exception is raised within a the context manager. outputPath : str, optional Output path for filesToRetrieve. If None, default is the initial working directory from which the DirectoryChanger is called. """ def __init__( self, destination, filesToMove=None, filesToRetrieve=None, dumpOnException=True, outputPath=None, ): """Establish the new and return directories.""" self.initial = pathTools.armiAbsPath(os.getcwd()) self.destination = None self.outputPath = None if destination is not None: self.destination = pathTools.armiAbsPath(destination) if outputPath is not None: self.outputPath = pathTools.armiAbsPath(outputPath) else: self.outputPath = self.initial self._filesToMove = filesToMove or [] self._filesToRetrieve = filesToRetrieve or [] self._dumpOnException = dumpOnException def __enter__(self): """At the inception of a with command, navigate to a new directory if one is supplied.""" runLog.debug("Changing directory to {}".format(self.destination)) self.moveFiles() self.open() return self def __exit__(self, exc_type, exc_value, traceback): """At the termination of a with command, navigate back to the original directory.""" runLog.debug("Returning to directory {}".format(self.initial)) self._createOutputDirectory() if exc_type is not None and self._dumpOnException: runLog.info("An exception was raised within a DirectoryChanger. Retrieving entire folder for debugging.") self._retrieveEntireFolder() else: self.retrieveFiles() self.close() def __repr__(self): """Print the initial and destination paths.""" return "<{} {} to {}>".format(self.__class__.__name__, self.initial, self.destination) def open(self): """ User requested open, used to stalling the close from a with statement. This method has been made for old uses of :code:`os.chdir()` and is not recommended. Please use the with statements """ if self.destination: _changeDirectory(self.destination) def close(self): """User requested close.""" if self.initial != os.getcwd(): _changeDirectory(self.initial) def moveFiles(self): """Copy ``filesToMove`` into the destination directory on entry.""" initialPath = self.initial destinationPath = self.destination self._transferFiles(initialPath, destinationPath, self._filesToMove, moveFiles=False) if self.outputPath != self.initial: destinationPath = self.outputPath self._transferFiles(initialPath, destinationPath, self._filesToMove, moveFiles=False) def retrieveFiles(self): """Copy ``filesToRetrieve`` back into the initial directory on exit.""" if self.outputPath != self.initial: self._transferFiles( self.destination, self.outputPath, self._filesToRetrieve, moveFiles=False, ) self._transferFiles(self.destination, self.initial, self._filesToRetrieve, moveFiles=True) def _retrieveEntireFolder(self): """ Retrieve all files to a dump directory. This is used when an exception is caught by the DirectoryChanger to rescue the entire directory to aid in debugging. Typically this is only called if ``dumpOnException`` is True. """ folderName = os.path.split(self.destination)[1] recoveryPath = os.path.join(self.initial, f"dump-{folderName}") shutil.copytree(self.destination, recoveryPath) def _createOutputDirectory(self): if self.outputPath == self.initial: return if not os.path.exists(self.outputPath): runLog.extra(f"Creating output folder: {self.outputPath}") try: os.makedirs(self.outputPath) except OSError as ee: # even though we checked exists, this still fails # sometimes when multiple MPI nodes try # to make the dirs due to I/O delays runLog.error(f"Failed to make output folder: {self.outputPath}. Exception: {ee}") else: runLog.extra(f"Output folder already exists: {self.outputPath}") @staticmethod def _transferFiles(initialPath, destinationPath, fileList, moveFiles=False): """ Transfer files into or out of the directory. This is used in ``moveFiles`` and ``retrieveFiles`` to shuffle files about when creating a target directory or when coming back, respectively. Parameters ---------- initialPath : str Path to the folder to find files in. destinationPath: str Path to the folder to move file to. fileList : list of str or list of tuple File names to move from initial to destination. If this is a simple list of strings, the files will be transferred. Alternatively tuples of (initialName, finalName) are allowed if you want the file renamed during transit. In the non-tuple option, globs/wildcards are allowed. moveFiles: bool, optional Controls whether the files are "moved" (``mv``) or "copied" (``cp``) Warning ------- On Windows the max number of characters in a path is 260. If you exceed this you will see FileNotFound errors here. """ if not fileList: return if not os.path.exists(destinationPath): os.makedirs(destinationPath) for pattern in fileList: if isinstance(pattern, tuple): # allow renames in transit fromName, destName = pattern copies = [(fromName, destName)] else: # expand globs if they're given copies = [] for ff in glob.glob(pattern): # renaming not allowed with globs copies.append((ff, ff)) for fromName, destName in copies: fromPath = os.path.join(initialPath, fromName) if not os.path.exists(fromPath): runLog.warning(f"{fromPath} does not exist and will not be copied.") continue toPath = os.path.join(destinationPath, destName) if moveFiles: runLog.extra("Moving {} to {}".format(fromPath, toPath)) safeMove(fromPath, toPath) else: runLog.extra("Copying {} to {}".format(fromPath, toPath)) safeCopy(fromPath, toPath) class TemporaryDirectoryChanger(DirectoryChanger): """ Create a temporary directory, change into it, and if there is no error/exception generated when using a :code:`with` statement, delete the directory. Notes ----- If there is an error/exception generated while in a :code:`with` statement, the temporary directory contents will be copied to the original directory and then the temporary directory will be deleted. There is the ability for a user to set the environment variable ARMI_TEMP_ROOT_PATH, which will globally override the `root` argument being passed in. This is a useful tool for running code or tests in a read-only environment. """ def __init__( self, root=None, filesToMove=None, filesToRetrieve=None, dumpOnException=True, outputPath=None, ): DirectoryChanger.__init__( self, root, filesToMove, filesToRetrieve, dumpOnException, outputPath, ) # If an application sets this environment variable, all root args in all `TempDirChanger` uses are overriden # with a different root path. This is useful for running unit tests in a read-only environment. if os.environ.get("ARMI_TEMP_ROOT_PATH"): root = os.environ["ARMI_TEMP_ROOT_PATH"] # If no root dir is given, the default path comes from context.getFastPath, which # *might* be relative to the cwd, making it possible to delete unintended files. # So this check is here to ensure that if we grab a path from context, it is a # proper temp dir. # That said, since the TemporaryDirectoryChanger *always* responsible for # creating its destination directory, it may always be safe to delete it # regardless of location. if root is None: root = context.getFastPath() # ARMIs temp dirs are in an context.APP_DATA directory: validate this is a temp dir. if pathlib.Path(context.APP_DATA) not in pathlib.Path(root).parents: raise ValueError("Temporary directory not in a safe location for deletion.") # make the tmp dir, if necessary if not os.path.exists(root): try: os.makedirs(root) except FileExistsError: # ignore the obvious race condition pass # init the important path attributes self.initial = os.path.abspath(os.getcwd()) self.destination = TemporaryDirectoryChanger.GetRandomDirectory(root) while os.path.exists(self.destination): self.destination = TemporaryDirectoryChanger.GetRandomDirectory(root) @classmethod def GetRandomDirectory(cls, root): return os.path.join( root, "temp-" + "".join(random.choice(string.ascii_letters + string.digits) for _ in range(10)), ) def __enter__(self): os.makedirs(self.destination) return DirectoryChanger.__enter__(self) def __exit__(self, exc_type, exc_value, traceback): DirectoryChanger.__exit__(self, exc_type, exc_value, traceback) try: pathTools.cleanPath(self.destination, mpiRank=context.MPI_RANK, forceClean=True) except PermissionError: if os.name == "nt": runLog.warning( "There is an issue where Windows will not agree to delete private directories." "That is, if you create a directory with a name starting with a period, the " "TempDirChanger will not be able to clean it (for instance, a '.git' dir)." ) class ForcedCreationDirectoryChanger(DirectoryChanger): """Creates the directory tree necessary to reach your desired destination.""" def __init__( self, destination, filesToMove=None, filesToRetrieve=None, dumpOnException=True, outputPath=None, ): if not destination: raise ValueError("A destination directory must be provided.") DirectoryChanger.__init__( self, destination, filesToMove, filesToRetrieve, dumpOnException, outputPath, ) def __enter__(self): if not os.path.exists(self.destination): runLog.extra(f"Creating destination folder: {self.destination}") try: os.makedirs(self.destination) except OSError as ee: # even though we checked exists, this still fails # sometimes when multiple MPI nodes try # to make the dirs due to I/O delays runLog.error(f"Failed to make destination folder: {self.destination}. Exception: {ee}") else: runLog.extra(f"Destination folder already exists: {self.destination}") DirectoryChanger.__enter__(self) return self def directoryChangerFactory(): if context.MPI_SIZE > 1: from armi.utils.directoryChangersMpi import MpiDirectoryChanger return MpiDirectoryChanger else: return DirectoryChanger ================================================ FILE: armi/utils/directoryChangersMpi.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ MPI Directory changers. This is a separate module largely to minimize potential cyclic imports because the mpi action stuff requires an import of the reactor framework. """ from armi import mpiActions from armi.utils import directoryChangers class MpiDirectoryChanger(directoryChangers.DirectoryChanger): """Change all nodes to specified directory. Notes ----- `filesToMove` and `filesToRetrieve` do not get broadcasted to worker nodes. This is intended since this would cause a race condition between deleting and moving files. """ def __init__(self, destination, outputPath=None): """Establish the new and return directories. Parameters ---------- destination : str destination directory outputPath : str, optional directory for outputs """ directoryChangers.DirectoryChanger.__init__(self, destination, outputPath=outputPath) def open(self): cdma = _ChangeDirectoryMpiAction(self.destination) # line below looks a little weird, but it returns the instance cdma = cdma.broadcast(cdma) cdma.invoke(None, None, None) def close(self): cdma = _ChangeDirectoryMpiAction(self.initial) cdma = cdma.broadcast(cdma) cdma.invoke(None, None, None) class _ChangeDirectoryMpiAction(mpiActions.MpiAction): """Change directory action.""" def __init__(self, destination): mpiActions.MpiAction.__init__(self) self._destination = destination def invokeHook(self): directoryChangers._changeDirectory(self._destination) return True ================================================ FILE: armi/utils/dynamicImporter.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dynamic importing help.""" def getEntireFamilyTree(cls): """Returns a list of classes subclassing the input class. One large caveat is it can only locate subclasses that had been imported somewhere Look to use importEntirePackage before searching for subclasses if not all children are being found as expected. """ return cls.__subclasses__() + [ grandchildren for child in cls.__subclasses__() for grandchildren in getEntireFamilyTree(child) ] ================================================ FILE: armi/utils/flags.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A Flag class, similar to ``enum.Flag``. This is an alternate implementation of the standard-library ``enum.Flag`` class. We use this to implement :py:class:`armi.reactor.flags.Flags`. We used to use the standard-library implementation, but that became limiting when we wanted to make it possible for plugins to define their own flags; the standard implementation does not support extension. We also considered the ``aenum`` package, which permits extension of ``Enum`` classes, but unfortunately does not support extension of ``Flags``. So, we had to make our own. This is a much simplified version of what comes with ``aenum``, but still provides most of the safety and functionality. """ import math from typing import Dict, List, Sequence, Tuple, Union from armi import runLog class auto: # noqa: N801 """ Empty class for requesting a lazily-evaluated automatic field value. This can be used to automatically provision a value for a field, when the specific value does not matter. In the future, it would be nice to support some arithmetic for these so that automatically-derived combinations of other automatically defined fields can be specified as well. """ def __iter__(self): """ Dummy __iter__ implementation. This is only needed to make mypy happy when it type checks things that have FlagTypes in them, since these can normally be iterated over, but mypy doesn't know that the metaclass consumes the autos. """ raise NotImplementedError( f"__iter__() is not actually implemented on {type(self)}; it is only defined to appease mypy." ) class _FlagMeta(type): """ Metaclass for defining new Flag classes. This attempts to do the minimum required to make the Flag class and its subclasses function properly. It mostly digests the class attributes, resolves automatic values and creates instances of the class as it's own class attributes for each field. The rest of the functionality lives in the base ``Flag`` class as plain-old code. .. tip:: Because individual flags are defined as *class* attributes (as opposed to instance attributes), we have to customize the way a Flag subclass itself is built, which requires a metaclass. """ def __new__(cls, name, bases, attrs): autoAt = 1 explicitFields = [(attr, val) for attr, val in attrs.items() if isinstance(val, int)] explicitValues = set(val for name, val in explicitFields) flagClass = type.__new__(cls, name, bases, attrs) # Make sure that none of the values collide assert len(explicitValues) == len(explicitFields) # Assign numeric values to the autos for aName, aVal in attrs.items(): if isinstance(aVal, auto): while autoAt in explicitValues: autoAt *= 2 attrs[aName] = autoAt autoAt *= 2 # Auto fields have been resolved, so now collect all ints allFields = {name: val for name, val in attrs.items() if isinstance(val, int)} allFields = {n: v for n, v in allFields.items() if not _FlagMeta.isdunder(n)} flagClass._nameToValue = allFields flagClass._valuesTaken = set(val for _, val in allFields.items()) flagClass._autoAt = autoAt flagClass._width = math.ceil(len(flagClass._nameToValue) / 8) # Replace the original class attributes with instances of the class itself. for name, value in allFields.items(): instance = flagClass() instance._value = value setattr(flagClass, name, instance) return flagClass @staticmethod def isdunder(s): return s.startswith("__") and s.endswith("__") def __getitem__(cls, key): """ Implement indexing at the class level. This has to be done at the metaclass level, since the python interpreter looks to ``type(klass).__getitem__(klass, key)``, which for an implementation of Flag is this metaclass. """ return cls(cls._nameToValue[key]) class Flag(metaclass=_FlagMeta): """ A collection of bitwise flags. This is intended to emulate ``enum.Flag``, except with the possibility of extension after the class has been defined. Most docs for ``enum.Flag`` should be relevant here, but there are sure to be occasional differences. .. impl:: No two flags have equivalence. :id: I_ARMI_FLAG_DEFINE :implements: R_ARMI_FLAG_DEFINE A bitwise flag class intended to emulate the standard library's ``enum.Flag``, with the added functionality that it allows for extension after the class has been defined. Each Flag is unique; no two Flags are equivalent. Note that while Python allows for arbitrary-width integers, exceeding the system-native integer size can lead to challenges in storing data, e.g. in an HDF5 file. In this case, the ``from_bytes()`` and ``to_bytes()`` methods are provided to represent a Flag's values in smaller chunks so that writeability can be maintained. .. warning:: Python features arbitrary-width integers, allowing one to represent an practically unlimited number of fields. *However*, including more flags than can be represented in the system-native integer types may lead to strange behavior when interfacing with non-pure Python code. For instance, exceeding 64 fields makes the underlying value not trivially-storable in an HDF5 file. In such circumstances, the ``from_bytes()`` and ``to_bytes()`` methods are available to represent a Flag's values in smaller chunks. """ _autoAt = None _nameToValue = dict() _valuesTaken = set() _width = None def __init__(self, init=0): self._value = int(init) def _flagsOn(self): flagsOn = set() for k, v in self._nameToValue.items(): if self._value & v: flagsOn.add(k) return flagsOn def __repr__(self): return f"<{type(self).__name__}.{'|'.join(self._flagsOn())}: {self._value}>" def __str__(self): return f"{type(self).__name__}.{'|'.join(self._flagsOn())}" def __getstate__(self): return self._value def __setstate__(self, state: int): self._value = state @classmethod def _registerField(cls, name, value): """ Plug a new field into the Flags. This makes sure everything is consistent and does error/collision checks. Mostly useful for extending an existing class with more fields. """ if name in cls._nameToValue: runLog.debug(f"The flag {name} already exists and does not need to be recreated.") return cls._valuesTaken.add(value) cls._nameToValue[name] = value cls._width = math.ceil(len(cls._nameToValue) / 8) instance = cls(value) setattr(cls, name, instance) @classmethod def _resolveAutos(cls, fields: Sequence[str]) -> List[Tuple[str, int]]: """Assign values to autos, based on the current state of the class.""" # There is some opportunity for code reuse between this and the metaclass... resolved = [] for field in fields: while cls._autoAt in cls._valuesTaken: cls._autoAt *= 2 value = cls._autoAt resolved.append((field, value)) cls._autoAt *= 2 return resolved @classmethod def width(cls): """Return the number of bytes needed to store all of the flags on this class.""" return cls._width @classmethod def fields(cls): """Return a dictionary containing a mapping from field name to integer value.""" return cls._nameToValue @classmethod def sortedFields(cls): """Return a list of all field names, sorted by increasing integer value.""" return [i[0] for i in sorted(cls._nameToValue.items(), key=lambda item: item[1])] @classmethod def extend(cls, fields: Dict[str, Union[int, auto]]): """ Extend the Flags object with new fields. .. warning:: This alters the class that it is called upon! Existing instances should see the new data, since classes are mutable. .. impl:: Set of flags are extensible without loss of uniqueness. :id: I_ARMI_FLAG_EXTEND0 :implements: R_ARMI_FLAG_EXTEND A class method to extend a ``Flag`` with a vector of provided additional ``fields``, with field names as keys, without loss of uniqueness. Values for the additional ``fields`` can be explicitly specified, or an instance of ``auto`` can be supplied. Parameters ---------- fields : dict A dictionary containing field names as keys, and their desired values, or an instance of ``auto`` as values. Example ------- >>> class MyFlags(Flags): ... FOO = auto() ... BAR = 1 ... BAZ = auto() >>> MyFlags.extend({"SUPER": auto()}) >>> print(MyFlags.SUPER) <MyFlags.SUPER: 8> """ # add explicit values first, so that autos know about them for field, value in ((f, v) for f, v in fields.items() if isinstance(v, int)): cls._registerField(field, value) # find auto values (ignore if they already exist) toResolve = [field for field, val in fields.items() if isinstance(val, auto)] toResolve = [field for field in toResolve if field not in cls._nameToValue] resolved = cls._resolveAutos(toResolve) for field, value in resolved: cls._registerField(field, value) def to_bytes(self, byteorder="little"): """ Return a byte stream representing the flag. This is useful when storing Flags in a data type of limited size. Python ints can be of arbitrary size, while most other systems can only represent integers of 32 or 64 bits. For compatibility, this function allows to convert the flags to a sequence of single-byte elements. Note that this uses snake_case to mimic the method on the Python-native int type. """ return self._value.to_bytes(self.width(), byteorder=byteorder) @classmethod def from_bytes(cls, bytes, byteorder="little"): """Return a Flags instance given a byte stream.""" return cls(int.from_bytes(bytes, byteorder=byteorder)) def __int__(self): return self._value def __and__(self, other): return type(self)(self._value & other._value) def __or__(self, other): return type(self)(self._value | other._value) def __xor__(self, other): return type(self)(self._value ^ other._value) def __invert__(self): """ Implement unary ~. Note ---- This is avoiding just ~ on the ``_value`` because it might not be safe. Using the int directly is slightly dangerous in that python ints are not of fixed width, so the result of inverting one Flag might not be as wide as the result of inverting another Flag. Typically, one would want to invert a Flag to create a mask for unsetting a bit on another Flag, like ``f1 &= ~f2``. If ``f2`` is narrower than ``f1`` the field of ones that you need to keep ``f1`` bits on might not cover the width of ``f1``, erroneously turning off its upper bits. Not sure if this was an issue before or not. Once things are working, might makes sense to play with this more. """ new = self._value for _, val in self._nameToValue.items(): if val & new: new -= val else: new += val return type(self)(new) def __iter__(self): for _, value in self._nameToValue.items(): if value & self._value: yield type(self)(value) def __bool__(self): return bool(self._value) def __eq__(self, other): return self._value == other._value def __contains__(self, other): return bool(other & self) def __hash__(self): return hash(self._value) # Type alias to reliably check for a proper Flag type. This cannot just be `Flag`, since mypy gets confused by `auto` # because it doesn't go to the trouble of resolving them in the metaclass. FlagType = Union[Flag, auto] ================================================ FILE: armi/utils/gridEditor.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ GUI elements for manipulating grid layout and contents. This provides a handful of classes which provide wxPython Controls for manipulating grids and grid Blueprints. The grid editor may be invoked with the :py:mod:`armi.cli.gridGui` entry point:: $ python -m armi grids If you have an existing set of input files, pass in the blueprints input file as the first argument and the system will load up the associated grid, e.g.:: $ python -m armi grids FFTF-blueprints.yaml .. figure:: /.static/gridEditor.png :align: center An example of the Grid Editor being used on a FFTF input file **Known Issues** * There is no action stack or undo functionality. Save frequently if you want to recover previous states * Cartesian grids are supported, but not rendered as nicely as their Hex counterparts. The "through center assembly" case is not rendered properly with the half-assemblies that lie along the edges. * The controls are optimized for manipulating a Core layout, displaying an "Assembly palette" that contains the Assembly designs found in the top-level blueprints. A little extra work and this could also be made to manipulate block grids or other things. * Assembly colors are derived from the set of flags applied to them, but the mapping of colors to flags is not particularly rich, and there isn't anything to disambiguate between assemblies of different design, but the same flags. * No proper zoom support, and object sizes are fixed and don't accommodate long specifiers. """ import colorsys import enum import io import os import pathlib import sys from typing import Dict, Optional, Sequence, Tuple, Union import numpy as np import numpy.linalg import wx import wx.adv from armi.reactor import geometry, grids from armi.reactor.blueprints import Blueprints, gridBlueprint, migrate from armi.reactor.blueprints.assemblyBlueprint import AssemblyBlueprint from armi.reactor.blueprints.gridBlueprint import GridBlueprint, saveToStream from armi.reactor.flags import Flags from armi.settings.caseSettings import Settings from armi.utils import hexagon, textProcessors UNIT_SIZE = 50 # pixels per assembly UNIT_MARGIN = 40 # offset applied to the draw area margins # The color to use for each object is based on the flags that that object has. All applicable colors # will be blended together to produce the final color for the object. There are also plans to apply # brush styles like cross-hatching or the like, which is what the Nones are for below. Future work # to employ these. Colors are RGB fractions. FLAG_STYLES = { # Red Flags.FUEL: (np.array([1.0, 0.0, 0.0]), None), # Green Flags.CONTROL: (np.array([0.0, 1.0, 0.0]), None), # Gray Flags.SHIELD: (np.array([0.4, 0.4, 0.4]), None), # Yellow Flags.REFLECTOR: (np.array([0.5, 0.5, 0.0]), None), # Paisley? Flags.INNER: (np.array([0.5, 0.5, 1.0]), None), # We shouldn't see many SECONDARY, OUTER, MIDDLE, etc. on their own, so these # will just darken or brighten whatever color we would otherwise get) Flags.SECONDARY: (np.array([0.0, 0.0, 0.0]), None), Flags.OUTER: (np.array([0.0, 0.0, 0.0]), None), # WHITE (same as above, this will just lighten anything that it accompanies) Flags.MIDDLE: (np.array([1.0, 1.0, 1.0]), None), Flags.ANNULAR: (np.array([1.0, 1.0, 1.0]), None), Flags.IGNITER: (np.array([0.2, 0.2, 0.2]), None), Flags.STARTER: (np.array([0.4, 0.4, 0.4]), None), Flags.FEED: (np.array([0.6, 0.6, 0.6]), None), Flags.DRIVER: (np.array([0.8, 0.8, 0.8]), None), } # RGB weights for calculating luminance. We use this to decide whether we should put white or black # text on top of the color. These come from CCIR 601 LUMINANCE_WEIGHTS = np.array([0.3, 0.59, 0.11]) def _translationMatrix(x, y): """Return an affine transformation matrix representing an x- and y-translation.""" return np.array([[1.0, 0.0, x], [0.0, 1.0, y], [0.0, 0.0, 1.0]]) def _boundingBox(points: Sequence[np.ndarray]) -> wx.Rect: """Return the smallest wx.Rect that contains all of the passed points.""" xmin = np.amin([p[0] for p in points]) xmax = np.amax([p[0] for p in points]) ymin = np.amin([p[1] for p in points]) ymax = np.amax([p[1] for p in points]) return wx.Rect(wx.Point(int(xmin), int(ymin)), wx.Point(int(xmax), int(ymax))) def _desaturate(c: Sequence[float]): r, g, b = tuple(c) hue, lig, sat = colorsys.rgb_to_hls(r, g, b) lig = lig + (1.0 - lig) * 0.5 return np.array(colorsys.hls_to_rgb(hue, lig, sat)) def _getColorAndBrushFromFlags(f, bold=True): """Given a set of Flags, return a wx.Pen and wx.Brush with which to draw a shape.""" c = np.array([0.0, 0.0, 0.0]) nColors = 0 for styleFlag, style in FLAG_STYLES.items(): if not styleFlag & f: continue color, brush = style if color is not None: c += color nColors += 1 if nColors: c /= nColors if not bold: # round-trip the rgb color through hsv so that we can desaturate c = _desaturate(c) luminance = c.dot(LUMINANCE_WEIGHTS) dark = luminance < 0.5 c = tuple(int(255 * ci) for ci in c) brush = wx.Brush(wx.Colour(*c, 255)) pen = wx.WHITE if dark else wx.BLACK return pen, brush def _drawShape( dc: wx.DC, geom: geometry.GeomType, view: np.ndarray, model: Optional[np.ndarray] = None, label: str = "", description: Optional[str] = None, bold: bool = True, ): """ Draw a shape to the passed DC, given its GeomType and other relevant information. Return the bounding box. Parameters ---------- dc: wx.DC The device context to draw to geom: geometry.GeomType The geometry type, which defines the shape to be drawn view: np.ndarray A 3x3 matrix defining the world transform model: np.ndarray, optional A 3x3 matrix defining the model transform. No transform is made to the "unit" shape if no model transform is provided. label: str, optional A string label to draw on the shape description: str, optional A string containing metadata for determining how to style to shape bold: bool, optional Whether the object should be drawn with full saturation. Default ``True`` """ if description is None: dc.SetBrush(wx.Brush(wx.Colour(200, 200, 200, 255))) color = wx.BLACK else: aFlags = Flags.fromStringIgnoreErrors(description) color, brush = _getColorAndBrushFromFlags(aFlags, bold=bold) dc.SetBrush(brush) if geom == geometry.GeomType.HEX: primitive = hexagon.corners(rotation=0) elif geom == geometry.GeomType.CARTESIAN: primitive = [(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)] else: raise ValueError("Geom type `{}` unsupported".format(geom)) # Appending 1 to each coordinate since the transformation matrix is 3x3 poly = np.array([np.append(vertex, 1) for vertex in primitive]).transpose() model = model if model is not None else np.eye(3) poly = view.dot(model).dot(poly).transpose() poly = [wx.Point(int(vertex[0]), int(vertex[1])) for vertex in poly] boundingBox = _boundingBox(poly) dc.SetTextForeground(color) dc.DrawPolygon(poly) dc.DrawLabel(label, boundingBox, wx.ALIGN_CENTRE) return boundingBox class _GridControls(wx.Panel): """Collection of controls for the main Grid editor. Save/Open, num rings, etc.""" def __init__(self, parent): wx.Panel.__init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize) self.parent = parent sizer = wx.BoxSizer(wx.HORIZONTAL) self.ringControl = wx.SpinCtrl(self, id=wx.ID_ANY, initial=5, min=1, max=20) self.ringControl.SetToolTip("Select how many rings of the grid to display") self.ringApply = wx.Button(self, id=wx.ID_ANY, label="Apply") self.ringApply.SetToolTip( "Apply the number of rings to the current grid. " "Assemblies outside of the displayed region will not be removed." ) self.expandButton = wx.Button(self, id=wx.ID_ANY, label="Expand to full core") self.labelMode = wx.Choice( self, id=wx.ID_ANY, choices=[mode.label for mode in GridGui.Mode if mode is not GridGui.Mode.PATH], ) self.labelMode.SetSelection(int(GridGui.Mode.SPECIFIER)) self.labelMode.SetToolTip("Select what to display in each grid region.") self.saveButton = wx.Button(self, id=wx.ID_ANY, label="Save grid blueprints...") self.saveButton.SetToolTip("Save just the grids section to its own file. ") self.openButton = wx.Button(self, id=wx.ID_ANY, label="Open blueprints...") self.openButton.SetToolTip( "Open a new top-level blueprints file. Top-level is needed to populate the assembly palette on the right." ) self.newButton = wx.Button(self, id=wx.ID_ANY, label="New grid blueprints...") self.newButton.SetToolTip("Create a new Grid blueptint.") self.helpButton = wx.Button(self, id=wx.ID_ANY, label="Help") self.saveImgButton = wx.Button(self, id=wx.ID_ANY, label="Save image...") self.saveImgButton.SetToolTip("Save the grid layout to an image file.") self.Bind(wx.EVT_BUTTON, self.onChangeRings, self.ringApply) self.Bind(wx.EVT_BUTTON, self.onExpand, self.expandButton) self.Bind(wx.EVT_BUTTON, self.onSave, self.saveButton) self.Bind(wx.EVT_BUTTON, self.onOpen, self.openButton) self.Bind(wx.EVT_BUTTON, self.onNew, self.newButton) self.Bind(wx.EVT_BUTTON, self.onHelp, self.helpButton) self.Bind(wx.EVT_BUTTON, self.onSaveImage, self.saveImgButton) self.Bind(wx.EVT_CHOICE, self.onLabelMode, self.labelMode) self.help = HelpDialog(self) ringBox = wx.BoxSizer(wx.VERTICAL) ringLabel = wx.StaticText(self, wx.ID_ANY, "Num. Rings", style=wx.ALIGN_CENTRE_HORIZONTAL) ringBox.Add(ringLabel, 1, wx.EXPAND) ringBox.Add(self.ringControl, 1, wx.EXPAND) ringBox.Add(self.ringApply, 1, wx.EXPAND) sizer.Add(ringBox, 0, wx.ALL, 0) auxButtons = wx.BoxSizer(wx.VERTICAL) auxButtons.Add(self.expandButton, 1, wx.EXPAND) auxButtons.Add(self.labelMode, 1, wx.EXPAND) sizer.Add(auxButtons) fileBox = wx.BoxSizer(wx.VERTICAL) fileBox.Add(self.saveButton, 1, wx.EXPAND) fileBox.Add(self.openButton, 1, wx.EXPAND) fileBox.Add(self.newButton, 1, wx.EXPAND) sizer.Add(fileBox) sizer.Add(self.helpButton) sizer.Add(self.saveImgButton) self.SetSizerAndFit(sizer) def setNumRings(self, numRings): self.ringControl.SetValue(numRings) def onChangeRings(self, _event): self.parent.setNumRings(self.ringControl.GetValue()) def onHelp(self, _event): self.help.Show() def onLabelMode(self, _event): newMode = GridGui.Mode(self.labelMode.GetSelection()) self.parent.setMode(newMode) def onExpand(self, event): self.parent.expandToFullCore(event) def onSave(self, event): self.parent.save() def onSaveImage(self, event): self.parent.saveImage() def onOpen(self, event): self.parent.open(event) def onNew(self, event): self.parent.new(event) class _PathControl(wx.Panel): """Collection of controls for manipulating fuel shuffling paths.""" def __init__(self, parent, viewer=None): wx.Panel.__init__(self, parent, id=wx.ID_ANY) # Direct link to the main viz control. This avoids having to reach up and back down for an # instance, with all of the structural assumptions that that requires. self._viewer = viewer self._needsIncrement = False self.activateButton = wx.ToggleButton(self, label="Fuel Path") self.clearButton = wx.ToggleButton(self, label="Remove From Path") sizer = wx.BoxSizer(wx.VERTICAL) pathSizer = wx.BoxSizer(wx.HORIZONTAL) indexSizer = wx.BoxSizer(wx.HORIZONTAL) self.pathSpinner = wx.SpinCtrl(self, id=wx.ID_ANY, initial=0, min=0) self.indexSpinner = wx.SpinCtrl(self, id=wx.ID_ANY, initial=0, min=0) self.autoIncrement = wx.CheckBox(self, id=wx.ID_ANY, label="Increment") pathSizer.Add(wx.StaticText(self, wx.ID_ANY, "Path: ")) pathSizer.Add(self.pathSpinner, 1) indexSizer.Add(wx.StaticText(self, wx.ID_ANY, "Index: ")) indexSizer.Add(self.indexSpinner, 1) buttonSizer = wx.BoxSizer(wx.HORIZONTAL) buttonSizer.Add(self.activateButton) buttonSizer.Add(self.clearButton) buttonSizer.AddSpacer(20) sizer.Add(buttonSizer, 1, wx.EXPAND) sizer.Add(pathSizer, 1) sizer.Add(indexSizer, 1) sizer.Add(self.autoIncrement) self.Bind(wx.EVT_TOGGLEBUTTON, parent.onToggle, self.activateButton) self.Bind(wx.EVT_TOGGLEBUTTON, parent.onToggle, self.clearButton) self.Bind(wx.EVT_CHECKBOX, self.onAutoIncrement, self.autoIncrement) self.Bind(wx.EVT_SPINCTRL, self.onPathChange, self.pathSpinner) self.SetSizerAndFit(sizer) def onPathChange(self, event): self.indexSpinner.SetValue(0) if self._viewer is not None: self._viewer.drawGrid() self._viewer.drawArrows() self._viewer.Refresh() def onAutoIncrement(self, event): self.indexSpinner.Enable(not self.autoIncrement.GetValue()) def getActivateButtons(self): return { self.activateButton.GetId(): self.activateButton, self.clearButton.GetId(): self.clearButton, } def getIndices(self, clear=False) -> Tuple[Optional[int], Optional[int]]: if self.clearButton.GetValue() and clear: return None, None path, index = self.pathSpinner.GetValue(), self.indexSpinner.GetValue() if self._needsIncrement: self._needsIncrement = False self.indexSpinner.SetValue(index + 1) return path, index def maybeIncrement(self): self._needsIncrement = self.autoIncrement.GetValue() and self.activateButton.GetValue() class _AssemblyPalette(wx.ScrolledWindow): """ Collection of toggle controls for each defined AssemblyBlueprint, as well as some extra controls for configuring fuel shuffling paths. """ def __init__( self, parent, geomType: Optional[geometry.GeomType], assemDesigns=None, viewer=None, ): wx.ScrolledWindow.__init__(self, parent, wx.ID_ANY, (0, 0), size=(250, 150), style=wx.SUNKEN_BORDER) self.parent = parent self.geomType = geomType self.assemDesigns = assemDesigns or dict() self.SetScrollRate(0, 20) # None -> None is useful for propagating a None to other components without # special branching self.assemDesignsById: Dict[Optional[int], Optional[AssemblyBlueprint]] = {None: None} sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add( wx.StaticText(self, wx.ID_ANY, "Assemblies:"), 0, wx.ALIGN_CENTRE | wx.ALL, 5, ) # keyed on ID self.assemButtons = dict() self.buttonIdBySpecifier = {None: None} self.activeAssemID: Optional[int] = None for key, design in self.assemDesigns.items(): # flip y-coordinates, enlarge, offset flip_y = np.array([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]]) scale = np.array( [ [UNIT_SIZE * 0.8, 0.0, 0.0], [0.0, UNIT_SIZE * 0.8, 0.0], [0.0, 0.0, 1.0], ] ) translate = np.array( [ [1.0, 0.0, UNIT_SIZE * 0.5], [0.0, 1.0, UNIT_SIZE * 0.5], [0.0, 0.0, 0.0], ] ) transform = translate.dot(flip_y).dot(scale) bmap = wx.Bitmap(UNIT_SIZE, UNIT_SIZE) dc = wx.MemoryDC() dc.SelectObject(bmap) brush = wx.Brush(self.GetBackgroundColour()) dc.SetBackground(brush) dc.Clear() _drawShape( dc, self.geomType, transform, label=design.specifier, description=design.flags or key, ) dc.SelectObject(wx.NullBitmap) img = wx.StaticBitmap(self, bitmap=bmap) button = wx.ToggleButton(self, wx.ID_ANY, key) self.assemButtons[button.GetId()] = button self.buttonIdBySpecifier[design.specifier] = button.GetId() self.Bind(wx.EVT_TOGGLEBUTTON, self.onToggle, button) buttonSizer = wx.BoxSizer(wx.HORIZONTAL) buttonSizer.Add(img) buttonSizer.Add(button, 1, wx.EXPAND) buttonSizer.AddSpacer(20) sizer.Add(buttonSizer, 1, wx.EXPAND) self.assemDesignsById[button.GetId()] = design sizer.Add(wx.StaticText(self, wx.ID_ANY, "Equilibrium Fuel Path:"), 0, wx.ALIGN_CENTRE) self.pathControl = _PathControl(self, viewer) sizer.Add(self.pathControl) self.assemButtons.update(self.pathControl.getActivateButtons()) self.SetSizerAndFit(sizer) def _setActiveAssemID(self, id: Optional[int]): """Make sure the appropriate button is on, but none others.""" if self.activeAssemID is not None and self.activeAssemID != id: # there is currently an active assem, and it isn't the requested one. Turn # its button off. self.assemButtons[self.activeAssemID].SetValue(False) if id is not None: # we are activating an assem ID. Turn its button on self.assemButtons[id].SetValue(True) self.activeAssemID = id def onToggle(self, event): """ Respond to toggle events. This makes sure that the right selector button is activated, and switches the GUI mode into the proper one based on whether an assembly design is selected, or the fuel path controls. """ if self.assemButtons[event.GetId()].GetValue(): # The button that generated the event is "on" (the ToggleButton assumes its new value # before the event is propagated). We need to select whichever button it was. setTo = event.GetId() else: # The button that generated the event is off, implying that the user clicked on the # previously-selected button. Clear the active selection setTo = None self._setActiveAssemID(setTo) mode = ( GridGui.Mode.PATH if event.GetId() in self.pathControl.getActivateButtons() and setTo is not None else GridGui.Mode.SPECIFIER ) self.parent.setMode(mode) def editorClicked(self): self.pathControl.maybeIncrement() def getSelectedAssem(self) -> Optional[Union[AssemblyBlueprint, Tuple[int, int]]]: """Return the currently-selected assembly design or fuel path indices.""" if self.activeAssemID in self.assemDesignsById: # We have an assembly design activated. return it return self.assemDesignsById[self.activeAssemID] elif self.activeAssemID in self.pathControl.getActivateButtons(): # we are in path selection mode, return stuff from the pathControl return self.pathControl.getIndices(clear=False) else: return None def getAssemToSet(self) -> Optional[Union[AssemblyBlueprint, Tuple[int, int]]]: """ Return the assembly design of fuel path tuple that a client should set. This differs from ``getSelectedAssem`` in that it can incorporate more logic to enforce certain rules, such as performing increments, masking things off based on other state etc., whereas ``getSelectedAssem`` should be more dumb and just return the state of the controls themselves. """ if self.activeAssemID in self.assemDesignsById: # We have an assembly design activated. return it return self.assemDesignsById[self.activeAssemID] elif self.activeAssemID in self.pathControl.getActivateButtons(): # we are in path selection mode, return stuff from the pathControl return self.pathControl.getIndices(clear=True) else: return None def setActiveAssem(self, assemDesign: Optional[Union[AssemblyBlueprint, tuple]]): """Override the selected assembly design from above.""" specifier = None if isinstance(assemDesign, AssemblyBlueprint): specifier = assemDesign.specifier self._setActiveAssemID(self.buttonIdBySpecifier[specifier]) elif isinstance(assemDesign, tuple): self._setActiveAssemID(self.pathControl.activateButton.GetId()) elif assemDesign is None: self._setActiveAssemID(None) class GridGui(wx.ScrolledWindow): """ Visual editor for grid blueprints. This is the actual viewer that displays the grid and grid blueprints contents, and responds to mouse events. Under the hood, it uses a wx.PseudoDC to handle the drawing, which provides the following benefits over a regular DC: * Drawn objects can be associated with an ID, allowing parts of the drawing to be modified or cleared without having to re-draw everything. * The IDs associated with the objects can be used to distinguish what was clicked on in a mouse event (though the support for this isn't super great, so we do have to do some of our own object disambiguation). The ``drawGrid()`` method is used to re-draw the entire geometry, whereas the ``applyAssem()`` method may be used to update a single assembly. """ class Mode(enum.IntEnum): """ Enumeration for what type of objects are currently being manipulated. This can either be SPECIFIER, for laying out the initial core layout, or PATH for manipulating fuel shuffling paths. """ # We use these values to map between selections in GUI elements, so do not go changing them # willy-nilly. SPECIFIER = 0 POSITION_IJ = 1 POSITION_RINGPOS = 2 PATH = 3 @property def label(self): if self == self.SPECIFIER: return "Specifier" elif self == self.PATH: return "Shuffle Path" elif self == self.POSITION_IJ: return "(i, j)" else: return "(Ring, Position)" @property def isPosition(self): return self in (self.POSITION_IJ, self.POSITION_RINGPOS) def __init__(self, parent, bp=None, defaultGeom=geometry.CARTESIAN): """ Create a new GridGui. Parameters ---------- parent : wx.Window The parent control bp : set of grid blueprints, optional This should be the ``gridDesigns`` section of a root Blueprints object. If not provided, a dictionary will be created with an empty "core" grid blueprint. """ wx.ScrolledWindow.__init__(self, parent, wx.ID_ANY, (0, 0), size=(250, 150), style=wx.BORDER_DEFAULT) self.parent = parent if bp is None: bp = {"core": GridBlueprint(name="core", gridContents=dict(), geom=defaultGeom)} self.bp = bp self.coreBp = bp["core"] self.eqFuelPathBp = bp.get("coreEqPath", None) self.numRings = 7 self._grid = None self._geomType = None # What are we displaying/modifying self._mode = GridGui.Mode.SPECIFIER grid = self.coreBp.construct() if self.coreBp.gridContents: maxRings = max(grid.getRingPos(idx)[0] for idx in self.coreBp.gridContents.keys()) self.numRings = max(7, maxRings) # Need to assign this after setting numRings, since we need a grid to # determine numRings, but need numRings to properly set the self.grid # property. self.grid = grid # If we are in the middle of handling some click events, what are the indices of # the clicked-on region self.clickIndices = None self.Bind(wx.EVT_PAINT, self.onPaint) self.Bind(wx.EVT_MOUSE_EVENTS, self.onMouse) self.contextMenuIDs = { item[0]: (wx.NewIdRef(), item[1]) for item in [ ("Select assembly type", self.onSelectAssembly), ("Make ring like this", self.onFillRing), ("Clear ring", self.onClearRing), ] } self.contextMenu = wx.Menu() for text, info in self.contextMenuIDs.items(): self.contextMenu.Append(info[0], text) self.Bind(wx.EVT_MENU, info[1], info[0]) self.pdc = wx.adv.PseudoDC() # Might be a good idea to implement this with bidict, but maybe not worth the # dependency self.pdcIdToIndices: Dict[int, Tuple[int, int, int]] = dict() self.indicesToPdcId: Dict[Tuple[int, int, int], int] = dict() # map from a PeudoDC ID (e.g. a hex) to the pixel location of the shapes # center. This is used to distinguish between a multi-object hit on click # events. While the FindObjects docs purport to distinguish objects # pixel-by-pixel, it seems like this is a lie, and that they simply use the # bounding boxes provided by the drawer. Laaaaame. self.pdcIdToCenter: Dict[int, wx.Point] = dict() # The ID to use for all arrow drawing. lets us clear and re-draw easily self._arrowPdcId = wx.NewIdRef() self.drawGrid() @property def grid(self): return self._grid @grid.setter def grid(self, newGrid): self._grid = newGrid self._geomType = self._grid.geomType self._idxByRing = [list() for _ in range(self.numRings)] for idx, loc in self._grid.items(): ring, _pos = self._grid.getRingPos(idx) if not self._grid.locatorInDomain(loc, symmetryOverlap=False) or ring > self.numRings: continue self._idxByRing[ring - 1].append(idx) @property def mode(self): return self._mode @mode.setter def mode(self, newMode): if self.mode == newMode: return self._mode = newMode self.drawGrid() if self._mode == GridGui.Mode.PATH: self.drawArrows() self.Refresh() @property def activeBlueprints(self): if self.mode == GridGui.Mode.SPECIFIER: return self.coreBp elif self.mode == GridGui.Mode.PATH: return self.eqFuelPathBp elif self.mode.isPosition: return self.coreBp else: raise ValueError("Unsupported mode `{}`".format(self.mode)) def growToFullCore(self): if geometry.FULL_CORE not in self.coreBp.symmetry: self.coreBp.expandToFull() if self.eqFuelPathBp is not None: self.eqFuelPathBp.expandToFull() self.grid = self.coreBp.construct() self.drawGrid() self.Refresh() def _getWindowCoordinates(self, event): xv, yv = self.GetViewStart() dx, dy = self.GetScrollPixelsPerUnit() xOffset = dx * xv yOffset = dy * yv x = event.GetX() y = event.GetY() xScrolled = x + xOffset yScrolled = y + yOffset return xScrolled, yScrolled def _getIndicesFromEvent(self, event) -> Optional[Tuple[int, int, int]]: obj = self._getObjectFromEvent(event) if obj is None: return None return self.pdcIdToIndices[obj] def _getObjectFromEvent(self, event) -> Optional[int]: def _distanceish(p1, p2): return (p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2 x, y = self._getWindowCoordinates(event) objs = self.pdc.FindObjects(x, y, radius=1) if not objs: return None if len(objs) == 1: return objs[0] # list of tuples with (distance, ID) sortableObjectIds = [(_distanceish(wx.RealPoint(x, y), self.pdcIdToCenter[obj]), obj) for obj in objs] return min(sortableObjectIds)[1] def drawGrid(self): """Wipe out anything in the drawing and re-draw everything.""" self.pdc.Clear() self.pdc.RemoveAll() self.pdcIdToIndices = dict() self.indicesToPdcId = dict() self.pdcIdToCenter = dict() self.pdc.SetPen(wx.Pen("BLACK", 1)) gridScale = self._gridScale(self.grid) # flip y-coordinates, enlarge flip_y = np.array([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]]) scale = np.array( [ [UNIT_SIZE / gridScale[0], 0.0, 0.0], [0.0, UNIT_SIZE / gridScale[1], 0.0], [0.0, 0.0, 1.0], ] ) # uniform grid, so all shapes have the same scale model = np.array([[gridScale[0], 0.0, 0.0], [0.0, gridScale[1], 0.0], [0.0, 0.0, 1.0]]) self.transform = flip_y.dot(scale) rect = self._calcGridBounds() self.SetVirtualSize((rect.Width, rect.Height)) self.SetScrollRate(20, 20) # Global translation used to center the view translate = _translationMatrix(-1 * rect.Left, -1 * rect.Top) self.transform = translate.dot(self.transform) brush = wx.Brush(wx.Colour(128, 128, 128, 0)) self.pdc.SetBrush(brush) for idx, loc in self.grid.items(): ring, _ = self.grid.getRingPos(idx) if not self.grid.locatorInDomain(loc) or ring > self.numRings: continue id = wx.NewIdRef() self.pdcIdToIndices[id] = idx self.indicesToPdcId[idx] = id self.pdc.SetId(id) label, description, bold = self._getLabel(idx) coords = np.array(self.grid.getCoordinates(idx))[:2] offset = _translationMatrix(*coords) boundingBox = _drawShape( self.pdc, self._geomType, self.transform, model=offset.dot(model), label=label, description=description, bold=bold, ) center = (boundingBox.TopLeft + boundingBox.BottomRight) / 2 self.pdcIdToCenter[id] = center self.pdc.SetIdBounds(id, boundingBox) def drawArrows(self): """Draw fuel path arrows.""" if self.mode != GridGui.Mode.PATH: return self.pdc.ClearId(self._arrowPdcId) self.pdc.SetId(self._arrowPdcId) goodPen = wx.Pen(wx.Colour(0, 0, 0), width=1, style=wx.PENSTYLE_DOT) badPen = wx.Pen(wx.Colour(255, 0, 0)) thisPath = self.parent.getSelectedPath() stuffInPath = sorted( [(index, idx) for idx, (path, index) in self.eqFuelPathBp.gridContents.items() if path == thisPath] ) touchedIndices = {entry[0] for entry in stuffInPath} indexGraph = {index: list() for index in touchedIndices} for index, location in stuffInPath: indexGraph[index].append(location) # python 3.6+ should maintain key order from the sorted stuffInPath keys = list(indexGraph.keys()) for i1, i2 in zip(keys[:-1], keys[1:]): pen = goodPen if i2 - i1 != 1: pen = badPen if len(indexGraph[i1]) > 1 or len(indexGraph[i2]) > 1: pen = badPen self.pdc.SetPen(pen) for fromIdx in indexGraph[i1]: for toIdx in indexGraph[i2]: p1 = self.grid.getCoordinates(fromIdx + (0,)) p2 = self.grid.getCoordinates(toIdx + (0,)) p1[2] = 1.0 p2[2] = 1.0 p1 = self.transform.dot(p1)[0:2] p2 = self.transform.dot(p2)[0:2] p1 = [int(v) for v in p1] p2 = [int(v) for v in p2] self.pdc.DrawLines([wx.Point(*p1), wx.Point(*p2)]) def _getLabel(self, idx) -> Tuple[str, Optional[str], bool]: """ Given (i, j, k) indices, return information about the object at that location. This will return a tuple containing: - The label to actually display in the GUI - Optionally, a description that can be turned into Flags and used to determine what the object should look like - Whether the object should be drawn in its full/bold representation """ ring, pos = self.grid.getRingPos(idx) specifier = self.coreBp.gridContents.get(tuple(idx[0:2]), None) aDesign = None description = None bold = True if specifier is not None: aDesign = self.parent.getAssemDesignBySpecifier(specifier) description = aDesign.flags or aDesign.name if self.mode == GridGui.Mode.SPECIFIER: if aDesign is not None: label = specifier else: label = "{}, {}".format(ring, pos) elif self.mode == GridGui.Mode.PATH: selectedPath = self.parent.getSelectedPath() if self.eqFuelPathBp is None: # We need to add a grid blueprint for the equilibrium fuel path self.bp["coreEqPath"] = GridBlueprint("coreEqPath", self.coreBp.geom) self.eqFuelPathBp = self.bp["coreEqPath"] if self.eqFuelPathBp.gridContents is None: _grid = self.eqFuelPathBp.construct() path, index = self.eqFuelPathBp.gridContents.get(idx[0:2], (None, None)) if path != selectedPath: bold = False if path is not None and index is not None: label = "({}, {})".format(path, index) else: label = "-" elif self.mode == GridGui.Mode.POSITION_RINGPOS: label = "{}, {}".format(ring, pos) elif self.mode == GridGui.Mode.POSITION_IJ: label = "{}, {}".format(*idx[0:2]) else: raise ValueError("Unsupported mode `{}`".format(self.mode)) return label, description, bold def setNumRings(self, n: int): """Change the number of rings that should be drawn.""" self.numRings = n if self.grid.geomType == geometry.GeomType.HEX: grid = grids.HexGrid.fromPitch(1, numRings=self.numRings) elif self.grid.geomType == geometry.GeomType.CARTESIAN: rectangle = [1.0, 1.0] if self.coreBp.latticeDimensions is not None: rectangle = [ self.coreBp.latticeDimensions.x, self.coreBp.latticeDimensions.y, ] grid = grids.CartesianGrid.fromRectangle(*rectangle, numRings=self.numRings) else: raise ValueError("Only support Hex and Cartesian grids, not {}".format(self.grid.geomType)) grid.symmetry = self.grid.symmetry grid.geomType = self.grid.geomType self.grid = grid self.drawGrid() self.Refresh() def onPaint(self, event, dc=None): selfPaint = dc is None dc = dc or wx.BufferedPaintDC(self) dc.SetBackground(wx.Brush(wx.Colour(255, 255, 255, 255))) dc.Clear() self.DoPrepareDC(dc) if selfPaint: xv, yv = self.GetViewStart() dx, dy = self.GetScrollPixelsPerUnit() region = self.GetUpdateRegion() region.Offset(dx * xv, dy * yv) _ = region.GetBox() self.pdc.DrawToDC(dc) def onMouse(self, event): if event.RightUp(): self.onContextMenu(event) return event.Skip() if event.LeftDown(): _ = event.GetX() _ = event.GetY() objId = self._getObjectFromEvent(event) if objId is None: return event.Skip() idx = tuple(self.pdcIdToIndices[objId])[0:2] self.parent.objectClicked(idx) assem = self.parent.getAssemToSet() self.applyAssem(objId, assem) if event.LeftUp(): pass return event.Skip() def onContextMenu(self, event): self.clickIndices = self._getIndicesFromEvent(event) self.PopupMenu(self.contextMenu) self.clickIndices = None def onSelectAssembly(self, event): specifier = self.coreBp.gridContents.get(self.clickIndices[0:2], None) aDesign = self.parent.getAssemDesignBySpecifier(specifier) if specifier is not None else None self.parent.setActiveAssem(aDesign) def onFillRing(self, event): ring, _ = self.grid.getRingPos(self.clickIndices) specifier = self.coreBp.gridContents.get(self.clickIndices[0:2], None) aDesign = self.parent.getAssemDesignBySpecifier(specifier) if specifier is not None else None for idx in self._idxByRing[ring - 1]: self.applyAssem(self.indicesToPdcId[idx], aDesign) def onClearRing(self, event): ring, _ = self.grid.getRingPos(self.clickIndices) for idx in self._idxByRing[ring - 1]: self.applyAssem(self.indicesToPdcId[idx], None) def applyAssem(self, pdcId, value: Optional[Union[AssemblyBlueprint, Tuple[int, int]]]): """ Apply the passed assembly design or equilibrium path indices for the desired object and redraw it. """ if self.activeBlueprints is None: return # uniform grid, so all shapes have the same scale gridScale = self._gridScale(self.grid) model = np.array([[gridScale[0], 0.0, 0.0], [0.0, gridScale[1], 0.0], [0.0, 0.0, 1.0]]) idx = tuple(self.pdcIdToIndices[pdcId]) idx2 = idx[0:2] if value is not None: if isinstance(value, AssemblyBlueprint): assert self.mode in { GridGui.Mode.SPECIFIER, GridGui.Mode.POSITION_IJ, GridGui.Mode.POSITION_RINGPOS, } self.activeBlueprints.gridContents[idx2] = value.specifier elif isinstance(value, tuple): assert self.mode == GridGui.Mode.PATH self.activeBlueprints.gridContents[idx2] = value else: # Clear whatever we clicked on if idx2 in self.activeBlueprints.gridContents: del self.activeBlueprints.gridContents[idx2] self.pdc.ClearId(pdcId) self.pdc.SetId(pdcId) coords = np.array(self.grid.getCoordinates(idx)) model = _translationMatrix(*coords[0:2]).dot(model) label, description, bold = self._getLabel(idx) boundingBox = _drawShape( self.pdc, self._geomType, self.transform, model=model, label=label, description=description, bold=bold, ) self.pdc.SetIdBounds(pdcId, boundingBox) self.drawArrows() self.Refresh() @staticmethod def _gridScale(grid): if isinstance(grid, grids.HexGrid): # Unit steps aren't aligned with the x,y coordinate system for Hex, so just # use the y dimension, assuming that's the proper flat-to-flat dimension coordScale = np.array([grid._unitSteps[1][1]] * 2) elif isinstance(grid, grids.CartesianGrid): # Cartesian grids align with the GUI coordinates, so just use unit steps # directly coordScale = np.array([grid._unitSteps[0][0], grid._unitSteps[1][1]]) return coordScale def _calcGridBounds(self) -> wx.Rect: """ Return the width and height (in pixels) that are needed to display the passed grid. This allows us to dynamically size the scrolled area, and to offset the geometry properly into the center of the screen. """ inDomain = { idx: loc for idx, loc in self.grid.items() if self.grid.locatorInDomain(loc) and self.grid.getRingPos(loc)[0] <= self.numRings } _ = self._gridScale(self.grid) allCenters = np.array([self.grid.getCoordinates(idx)[:2] for idx in inDomain]) minXY = np.amin(allCenters, axis=0) maxXY = np.amax(allCenters, axis=0) topRight = np.append([maxXY[1], maxXY[1]], 1.0) bottomLeft = np.append([minXY[0], minXY[1]], 1.0) nudge = np.array([UNIT_MARGIN, -UNIT_MARGIN, 0.0]) bottomRight = (self.transform.dot(topRight) + nudge).tolist() topLeft = (self.transform.dot(bottomLeft) - nudge).tolist() bottomRight = [int(v) for v in bottomRight] topLeft = [int(v) for v in topLeft] return wx.Rect(wx.Point(*topLeft[:2]), wx.Point(*bottomRight[:2])) class GridBlueprintControl(wx.Panel): """ A GUI for manipulating core layouts. The original intent of this is to serve as a stand-in replacement for the current "HexDragger". With further work, this could be made to function as a more general tool for manipulating grids of any sort. """ _wildcard = "YAML blueprints (*.yaml)|*.yaml|All files (*.*)|*.*" _defaultGeom = geometry.CARTESIAN def __init__(self, parent): wx.Panel.__init__(self, parent, wx.ID_ANY, size=(200, 30)) bp = Blueprints() bp.gridDesigns = gridBlueprint.Grids() # cs only needed for migrations. Realistically, this would be set from a # higher-level GUI container. If it is not set and migrations are needed # anyways, the user will be prompted. self._cs = None self._fName = None self._bp = bp self.clicker = GridGui(self, defaultGeom=self._defaultGeom) self.assemblyPalette = _AssemblyPalette(self, None, dict(), self.clicker) self.controls = _GridControls(self) self.controls.setNumRings(self.clicker.numRings) sizer = wx.BoxSizer(wx.VERTICAL) hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(self.clicker, 1, wx.EXPAND) hsizer.Add(self.assemblyPalette, 0) sizer.Add(hsizer, 1, wx.EXPAND) sizer.Add(self.controls, 0) self.sizer = sizer self.SetSizerAndFit(self.sizer) @property def bp(self): return self._bp @bp.setter def bp(self, bp): self._bp = bp geomType = geometry.GeomType.fromStr(bp.gridDesigns["core"].geom) # Make new assembly palette and editor newClicker = GridGui(self, bp=self.bp.gridDesigns) newPalette = _AssemblyPalette(self, geomType, bp.assemDesigns, newClicker) self.sizer.Replace(self.assemblyPalette, newPalette, recursive=True) self.sizer.Replace(self.clicker, newClicker, recursive=True) self.assemblyPalette.Destroy() self.clicker.Destroy() self.assemblyPalette = newPalette self.clicker = newClicker self.controls.setNumRings(self.clicker.numRings) self.sizer.Layout() self.SendSizeEventToParent() @property def grid(self): return self.clicker.grid def setNumRings(self, n: int): self.clicker.setNumRings(n) def setActiveAssem(self, aDesign): self.assemblyPalette.setActiveAssem(aDesign) self.clicker.mode = ( GridGui.Mode.SPECIFIER if isinstance(aDesign, (AssemblyBlueprint, type(None))) else GridGui.Mode.PATH ) def setMode(self, mode: GridGui.Mode): self.clicker.mode = mode # make sure that gui elements that have to do with mode setting are consistent if mode == GridGui.Mode.isPosition: self.assemblyPalette.setActiveAssem(None) self.controls.labelMode.SetSelection(mode) def expandToFullCore(self, event): self.clicker.growToFullCore() def objectClicked(self, _idx): """ Notify relevant controls that the object at the passed indices has been activated. This is needed to make the auto-increment stuff work in the fuel path editor. Without some sort of event that provides a positive assertion that the user is trying to interact with the layout, we can't know when to increment. """ self.assemblyPalette.editorClicked() def saveImage(self): """ Save the core layout to an image. Currently this only supports PNG images for simplicity. wxpython does not attempt to infer the file type based on extension, so we would need to make a file extension-to-format mapping. """ dlg = wx.FileDialog( self, message="Save image to...", style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT, wildcard="PNG images (.png)|*.png", ) if dlg.ShowModal() == wx.ID_OK: path = dlg.GetPath() else: return size = self.clicker.GetVirtualSize() image = wx.Bitmap(size) dc = wx.MemoryDC() dc.SelectObject(image) self.clicker.onPaint(None, dc=dc) dc.SelectObject(wx.NullBitmap) image.SaveFile(path, wx.BITMAP_TYPE_PNG) def save(self, stream=None, full=False): """ Save the blueprints to the passed stream, if provided. Otherwise prompt for a file to save to. This can save either the entire blueprints, or just the `grids:` section of the blueprints, based on the passed ``full`` argument. Saving just the grid blueprints can be useful when cobbling blueprints together with !include flags. """ if stream is None: self._saveNoStream(full) else: saveToStream(stream, self.bp, full, tryMap=True) def _saveNoStream(self, full=False): """Prompt for a file to save to. This can save either the entire blueprints, or just the `grids:` section of the blueprints, based on the passed ``full`` argument. Saving just the grid blueprints can be useful when cobbling blueprints together with !include flags. """ # Prompt the user for a file name, open it, and call ourself again with that # as the stream argument if self._fName is None: wd = os.getcwd() else: wd = os.path.split(self._fName)[0] # Don't use the blueprints filename as the default if we are only saving the # grids section; doing so may encourage users to overwrite their main # blueprints file. if full: fName = self._fName or "" else: fName = "" title = "Save blueprints to..." if full else "Save grid designs to..." dlg = wx.FileDialog( self, message=title, defaultDir=wd, defaultFile=fName, wildcard=self._wildcard, style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT, ) if dlg.ShowModal() == wx.ID_OK: path = dlg.GetPath() else: return # Disallow overwriting the main blueprints with the grids section if not full and pathlib.Path(path).exists() and pathlib.Path(path).samefile(self._fName): message = ( "The chosen path, `{}` is the same as the main blueprints " 'file. This tool only saves the "grids" section of the ' "blueprints file, so saving over the original top-level blueprints " "will lead to data loss. Try again with a different name.".format(path) ) with wx.MessageDialog( self, message, "Overwriting top-level blueprints!", style=wx.ICON_WARNING, ) as dlg: dlg.ShowModal() return # Try writing to an internal buffer before opening the file for write. This # way to don't destroy anything unless we know we have something with which # to replace it. bpStream = io.StringIO() saveToStream(bpStream, self.bp, full, tryMap=True) with open(path, "w") as stream: stream.write(bpStream.getvalue()) def open(self, _event): if self._fName is None: wd = os.getcwd() else: wd = os.path.split(self._fName)[0] dlg = wx.FileDialog( self, message="Open blueprints file...", defaultDir=wd, defaultFile="", wildcard=self._wildcard, style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST, ) if dlg.ShowModal() == wx.ID_OK: path = dlg.GetPath() self.loadFile(path) def new(self, _event): """ Create a Dialog with options to make a new grid blueprint, then make it and rejigger everything to use it. """ with NewGridBlueprintDialog(self) as dlg: if dlg.ShowModal() == wx.ID_OK: # Make new bp gridBp = dlg.getGridBlueprint() if self.bp is not None: self.bp.gridDesigns[gridBp.name] = gridBp self.bp = self.bp def loadFile(self, fName, cs=None): """Load a new blueprints file, refreshing pretty much everything.""" self._fName = fName self._cs = cs with open(fName, "r") as bpYaml: bpYaml = textProcessors.resolveMarkupInclusions(bpYaml, root=pathlib.Path(fName).parent) bp = Blueprints.load(bpYaml) if bp.gridDesigns is None or "core" not in bp.gridDesigns: cs = self._cs or self._promptForCs() if cs is None: # We didn't get a CS from the user, so cannot migrate old # blueprints. Give up. return migrate(bp, cs) self.bp = bp def getAssemDesignBySpecifier(self, specifier): for _key, design in self.bp.assemDesigns.items(): if design.specifier == specifier: return design raise KeyError("Could not find an Assembly design with specifier `{}`".format(specifier)) def getAssemToSet(self): return self.assemblyPalette.getAssemToSet() def getSelectedPath(self): """ Return the fuel path index that is currently selected. This is used to route the state of the _AssemblyPalette controls to things that need to know about such things (arrow drawing, whether objects should be bold, etc.) """ assem = self.assemblyPalette.getSelectedAssem() assert isinstance(assem, tuple) return assem[0] def _promptForCs(self) -> Optional[Settings]: """ Ask the user for a case settings file to locate the appropriate geom file to perform blueprint migrations. """ if self._fName is None: wd = os.getcwd() else: wd = os.path.split(self._fName)[0] dlg = wx.FileDialog( self, message="Migrations needed. Please provide a settings file...", defaultDir=wd, defaultFile="", style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST, ) if dlg.ShowModal() == wx.ID_OK: path = dlg.GetPath() return Settings(path) return None class HelpDialog(wx.Dialog): def __init__(self, parent): wx.Dialog.__init__(self, parent, id=wx.ID_ANY, title="About the grid editor...") helpTxt = """ The Grid Editor is a rudimentary tool for manipulating the contents and basic structure of Grid Blueprints. Rather than being a full-featured reactor editor GUI, it intends to help in the specific task of laying objects out in a grid, which can be frustrating or tedious to do by hand in a text editor. Since this is not a general-purpose blueprint editor, this will only save the "grids" section of a blueprints file, which will then need to be incorporated into a top-level blueprints input, typically by !include-ing from the host blueprints. When opening a blueprints file, the root blueprints should be provided, since the Editor uses the assembly designs to populate the assembly palette on the right. Controls -------- Left-click in map: Apply the selected assembly design to the clicked location. If no assembly is selected, clear the assembly design in the licked location. Right-click in map: Summon context menu with useful tools. "Num. Rings" spinner: Modify the number of rings that the displayed grid should span. Reducing the number of rings below the region with defined assemblies *will not* clear those regions. "Expand to full core": Expand a 1/N-th reactor map into full symmetry. This will honor the periodic/reflective boundary conditions as specified. Label display drop-down: Select what should be displayed at each grid location. Save grid blueprints: Save just the grid blueprints to a file. This will need to be incorporated into a top-level blueprints file. To prevent loss of data, it will try to prevent overwriting the original blueprints file that was opened. Open blueprints: Open a new top-level blueprints file. New grid blueprints: Create a new grid blueprint, allowing configuration of the geometry type, domain, and boundary conditions. """ self.Sizer = wx.BoxSizer(wx.VERTICAL) txt = wx.StaticText(self, label=helpTxt) ok = wx.Button(self, id=wx.ID_OK) self.Sizer.Add(txt) self.Sizer.Add(ok) self.Fit() class NewGridBlueprintDialog(wx.Dialog): """Dialog box for configuring a new grid blueprint.""" # these provide stable mappings from the wx.Choice control indices to the respective geom types _geomFromIdx = {i: geomType for i, geomType in enumerate({geometry.GeomType.CARTESIAN, geometry.GeomType.HEX})} _idxFromGeom = {geomType: i for i, geomType in _geomFromIdx.items()} def __init__(self, parent): wx.Dialog.__init__(self, parent, id=wx.ID_ANY, title="New Grid Blueprint...") nameLabel = wx.StaticText(self, label="Grid name:") self.gridName = wx.TextCtrl(self, value="core") nameSizer = wx.BoxSizer(wx.HORIZONTAL) nameSizer.Add(nameLabel, 0) nameSizer.Add(self.gridName, 1, wx.EXPAND) self.geomType = wx.Choice( self, id=wx.ID_ANY, choices=[gt.label for gt in self._geomFromIdx.values()], ) self.Bind(wx.EVT_CHOICE, self.onSelectGeomType, self.geomType) # Domain controls self.throughCenter = wx.CheckBox(self, id=wx.ID_ANY, label="Through Center Assembly") self.domainFull = wx.RadioButton(self, id=wx.ID_ANY, label="Full Core", style=wx.RB_GROUP) self.domain3 = wx.RadioButton(self, id=wx.ID_ANY, label="1/3 Core") self.domain4 = wx.RadioButton(self, id=wx.ID_ANY, label="1/4 Core") domainBox = wx.StaticBoxSizer(wx.VERTICAL, self, label="Domain") domainBox.Add(self.domainFull, 0) domainBox.Add(self.domain3, 0) domainBox.Add(self.domain4, 0) domainBox.Add(self.throughCenter, 0) self.Bind(wx.EVT_RADIOBUTTON, self.onDomainChange) # Symmetry controls self.symmetryFull = wx.RadioButton(self, id=wx.ID_ANY, style=wx.RB_GROUP, label="Full") self.periodic = wx.RadioButton(self, id=wx.ID_ANY, label="Periodic") self.reflective = wx.RadioButton(self, id=wx.ID_ANY, label="Reflective") symmetryBox = wx.StaticBoxSizer(wx.VERTICAL, self, label="Symmetry") symmetryBox.Add(self.symmetryFull, 0) symmetryBox.Add(self.periodic, 0) symmetryBox.Add(self.reflective, 0) # arrange the two boxes horizontally gridControls = wx.BoxSizer(wx.HORIZONTAL) gridControls.Add(domainBox, 0) gridControls.Add(symmetryBox, 0) ok = wx.Button(self, wx.ID_OK) cancel = wx.Button(self, wx.ID_CANCEL) self.Sizer = wx.BoxSizer(wx.VERTICAL) self.Sizer.Add(nameSizer, 1, wx.ALL, 0) self.Sizer.Add(self.geomType, 0, wx.ALL, 0) self.Sizer.Add(gridControls, 0, wx.ALL, 0) self.Sizer.Add(wx.StaticLine(self), 0, wx.EXPAND) okCancelSizer = wx.BoxSizer(wx.HORIZONTAL) okCancelSizer.Add(ok) okCancelSizer.Add(cancel) self.Sizer.Add(okCancelSizer, 0, wx.EXPAND | wx.ALL, 10) self.selectGeomType(geometry.GeomType.HEX) self.Fit() def selectGeomType(self, geom): """Enable/disable relevant controls for the selected geom type.""" # make sure the geom type Choice is in sync. This function doesn't have to be # called from the event handler. self.geomType.SetSelection(self._idxFromGeom[geom]) # switch to full-core, since it's always available self.domainFull.SetValue(True) self.symmetryFull.SetValue(True) self._toggleControls() def onSelectGeomType(self, _event): self.selectGeomType(self._geomFromIdx[self.geomType.GetSelection()]) def _toggleControls(self): """Make sure that the appropriate controls are enabled/disabled.""" geom = self._geomFromIdx[self.geomType.GetSelection()] full = self.domainFull.GetValue() self.throughCenter.Enable(enable=geom == geometry.GeomType.CARTESIAN) self.symmetryFull.Enable(enable=full) self.domain3.Enable(enable=geom == geometry.GeomType.HEX) self.domain4.Enable(enable=geom == geometry.GeomType.CARTESIAN) self.periodic.Enable(enable=not full) self.reflective.Enable(enable=not full and geom == geometry.GeomType.CARTESIAN) if full: self.symmetryFull.SetValue(True) def onDomainChange(self, event): if event.EventObject in {self.domainFull, self.domain3, self.domain4}: if self.domainFull.GetValue(): self.symmetryFull.SetValue(True) else: self.periodic.SetValue(True) self._toggleControls() def getGridBlueprint(self): """Using the state of the dialog controls, return a corresponding GridBlueprint.""" name = self.gridName.GetValue() geom = self._geomFromIdx[self.geomType.GetSelection()] if self.domainFull.GetValue(): domain = geometry.DomainType.FULL_CORE elif self.domain3.GetValue(): domain = geometry.DomainType.THIRD_CORE elif self.domain4.GetValue(): domain = geometry.DomainType.QUARTER_CORE else: raise ValueError("Couldn't map selection to supported fractional domain") if self.periodic.GetValue(): bc = geometry.BoundaryType.PERIODIC elif self.reflective.GetValue(): bc = geometry.BoundaryType.REFLECTIVE else: bc = geometry.BoundaryType.NO_SYMMETRY symmetry = geometry.SymmetryType(domain, bc, self.throughCenter.GetValue()) assert symmetry.checkValidSymmetry() bp = GridBlueprint(name=name, geom=str(geom), symmetry=str(symmetry)) return bp if __name__ == "__main__": app = wx.App() frame = wx.Frame(None, wx.ID_ANY, title="Grid Blueprints GUI", size=(1000, 1000)) gui = GridBlueprintControl(frame) frame.Show() if len(sys.argv) > 1: gui.loadFile(sys.argv[1]) app.MainLoop() ================================================ FILE: armi/utils/hexagon.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Generic hexagon math. Hexagons are fundamental to advanced reactors. .. image:: /.static/hexagon.png :width: 100% """ import math import numpy as np SQRT3 = math.sqrt(3.0) def area(pitch): """ Area of a hex given the flat-to-flat pitch. Notes ----- The pitch is the distance between the center of the hexagons in the lattice. """ return SQRT3 / 2.0 * pitch**2 def side(pitch): r""" Side length of a hex given the flat-to-flat pitch. Pythagorean theorem says: .. math:: \frac{s}{2}^2 + \frac{p}{2}^2 = s^2 which you can solve to find p = sqrt(3)*s Notes ----- The pitch is the distance between the center of the hexagons in the lattice. """ return pitch / SQRT3 def corners(rotation=0): """ Return the coordinates of a unit hexagon, rotated as requested. Zero rotation implies flat-to-flat aligned with y-axis. Origin in the center. """ points = np.array( [ (1.0 / (2.0 * math.sqrt(3.0)), 0.5), (1.0 / math.sqrt(3.0), 0.0), (1.0 / (2.0 * math.sqrt(3.0)), -0.5), (-1.0 / (2.0 * math.sqrt(3.0)), -0.5), (-1.0 / math.sqrt(3.0), 0.0), (-1.0 / (2.0 * math.sqrt(3.0)), 0.5), ] ) rotation = rotation / 180.0 * math.pi rotation = np.array( [ [math.cos(rotation), -math.sin(rotation)], [math.sin(rotation), math.cos(rotation)], ] ) return np.array([tuple(rotation.dot(point)) for point in points]) def pitch(side): """ Calculate the pitch from the length of a hexagon side. Notes ----- The pitch is the distance between the center of the hexagons in the lattice. """ return side * SQRT3 def numRingsToHoldNumCells(numCells): """ Determine the number of rings in a hexagonal grid with this many hex cells. If the number of pins don't fit exactly into any ring, returns the ring just large enough to fit them. Parameters ---------- numCells : int The number of hex cells in a hex lattice Returns ------- numRings : int Number of rings required to contain numCells items. Notes ----- The first hex ring (center) holds 1 position. Each subsequent hex ring contains 6 more positions than the last. This method works by incrementing ring numbers until the number of items is reached or exceeded. It could easily be replaced by a lookup table if so desired. """ if numCells == 0: return 0 nPinRings = int(math.ceil(0.5 * (1 + math.sqrt(1 + 4 * (numCells - 1) // 3)))) return nPinRings def numPositionsInRing(ring): """Number of positions in ring (starting at 1) of a hex lattice.""" return (ring - 1) * 6 if ring != 1 else 1 def totalPositionsUpToRing(ring: int) -> int: """Return the number of positions in a hexagon with a given number of rings.""" return 1 + 3 * ring * (ring - 1) def getIndexOfRotatedCell(initialCellIndex: int, orientationNumber: int) -> int: """Obtain a new cell number after placing a hexagon in a new orientation. Parameters ---------- initialCellIndex : int Positive number for this cell's position in a hexagonal lattice. orientationNumber : Orientation in number of 60 degree, counter clockwise rotations. An orientation of zero means the first cell in each ring of a flags up hexagon is in the upper right corner. Returns ------- int New cell number across the rotation Raises ------ ValueError If ``initialCellIndex`` is not positive. If ``orientationNumber`` is less than zero or greater than five. """ if orientationNumber < 0 or orientationNumber > 5: raise ValueError(f"Orientation number must be in [0:5], got {orientationNumber}") if initialCellIndex > 1: if orientationNumber == 0: return initialCellIndex ring = numRingsToHoldNumCells(initialCellIndex) tot_pins = totalPositionsUpToRing(ring) newPinLocation = initialCellIndex + (ring - 1) * orientationNumber if newPinLocation > tot_pins: newPinLocation -= (ring - 1) * 6 return newPinLocation elif initialCellIndex == 1: return initialCellIndex raise ValueError(f"Cell number must be positive, got {initialCellIndex}") ================================================ FILE: armi/utils/iterables.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module of utilities to help dealing with iterable objects in Python.""" import struct from itertools import chain, filterfalse, tee import numpy as np def flatten(lst): """Flattens an iterable of iterables by one level. Examples -------- >>> flatten([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]]) [1,2,3,4,5,6,7,8,9,10] """ return [item for sublist in lst for item in sublist] def chunk(lst, n): r"""Returns a generator object that yields lenght-`n` chunks of `lst`. The last chunk may have a length less than `n` if `n` doesn't divide `len(lst)`. Examples -------- >>> list(chunk([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 4)) [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]] """ for i in range(0, len(lst), n): yield lst[i : i + n] def split(a, n, padWith=()): r""" Split an iterable `a` into `n` sublists. Parameters ---------- a : iterable The list to be broken into chunks n : int The number of "even" chunks to break this into. There will be this many entries in the returned list no matter what. If len(a) < n, error unless padWith has been set. If padWithNones is true, then the output will be padded with lists containing a single None. padWith : object, optional if n > len(a), then the result will be padded to length-n by appending `padWith`. Returns ------- chunked : list[len=n] of lists Examples -------- >>> split([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 4) [[1, 2, 3], [4, 5, 6], [7, 8], [9, 10]] >>> split([0, 1, 2], 5, padWith=None) [[0], [1], [2], None, None] """ a = list(a) # in case `a` is not list-like N = len(a) assert n > 0, "Cannot chunk into less than 1 chunks. You requested {0}".format(n) k, m = divmod(N, n) chunked = [a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] or padWith for i in range(n)] return chunked def unpackBinaryStrings(binaryRow): """Unpacks a row of binary strings to a list of floats.""" if len(binaryRow) % 8: raise ValueError("Cannot unpack binary strings from misformatted row. Expected chunks of size 8.") return [(struct.unpack("<d", barray)[0]) for barray in chunk(binaryRow, 8)] def packBinaryStrings(valueDict): """Converts a dictionary of lists of floats into a dictionary of lists of byte arrays.""" bytearrays = {} for entry in valueDict: bytearrays[entry] = [bytearray()] for value in valueDict[entry]: bytearrays[entry][0].extend(struct.pack("<d", value)) return bytearrays def unpackHexStrings(hexRow): """Unpacks a row of binary strings to a list of floats.""" return [float.fromhex(ss) for ss in hexRow.split() if ss != ""] def packHexStrings(valueDict): """Converts a dictionary of lists of floats into a dictionary of lists of hex values arrays.""" hexes = {} for entry in valueDict: hexes[entry] = [" ".join(float.hex(float(value)) for value in valueDict[entry])] return hexes class Sequence: """ The Sequence class partially implements a list-like interface, supporting methods like append and extend and also operations like + and +=. It also provides some convenience methods such as drop and select to support filtering, as well as a transform function to modify the sequence. Note that these methods return a "cloned" version of the iterator to support chaining, e.g. >>> s = Sequence(range(1000000)) >>> tuple(s.drop(lambda i: i % 2 == 0).select(lambda i: i < 20).transform(lambda i: i * 10)) (10, 30, 50, 70, 90, 110, 130, 150, 170, 190) This starts with a Sequence over 1 million elements (not stored in memory), drops the even elements, selects only those whose value is less than 20, and multiplies the resulting values by 10, all while loading only one element at a time into memory. It is only when tuple is called that the operations are performed. drop, select, and transform act in-place, so the following is equivalent to the chained expression given above: >>> s = Sequence(range(1000000)) >>> s.drop(lambda i: i % 2 == 0) <Sequence at 0x...> >>> s.select(lambda i: i < 20) <Sequence at 0x...> >>> s.transform(lambda i: i * 10) <Sequence at 0x...> >>> tuple(s) (10, 30, 50, 70, 90, 110, 130, 150, 170, 190) Note: that this class is intended for use with finite sequences. Don't attempt to use with infinite generators. For instance, the following will not work: >>> def counter(): ... i = 0 ... while True: ... yield i ... i += 1 >>> s = Sequence(counter()).select(lambda i: i < 10) >>> tuple(s) # DON'T DO THIS! Although the result should be (0,1,2,3,4,5,6,7,8,9), the select method is not smart enough to know that it's a terminal condition and will continue to check every number generated forever. One could remedy this by using the dropwhile and/or takewhile methods in the itertools module, but this has not been done. """ def __init__(self, seq=None): """Constructs a new Sequence object from an iterable. This also serves as a copy constructor if seq is an instance of Sequence. """ if seq is None: seq = [] elif isinstance(seq, Sequence): seq = seq.copy() self._iter = iter(seq) def copy(self): """Return a new iterator that is a copy of self without consuming self.""" self._iter, copy = tee(self._iter, 2) return Sequence(copy) def __iter__(self): return self def __repr__(self): return "<{:s} at 0x{:x}>".format(self.__class__.__name__, id(self)) def __next__(self): return next(self._iter) def select(self, pred): """Keep only items for which pred(item) evaluates to True. Note: returns self so it can be chained with other filters, e.g., newseq = seq.select(...).drop(...).transform(...) """ self._iter = filter(pred, self._iter) return self def drop(self, pred): """Drop items for which pred(item) evaluates to True. Note: returns self so it can be chained with other filters, e.g., newseq = seq.select(...).drop(...).transform(...) """ self._iter = filterfalse(pred, self._iter) return self def transform(self, func): """Apply func to this sequence.""" self._iter = map(func, self._iter) return self def extend(self, seq): self._iter = chain(self._iter, seq) def append(self, item): self.extend([item]) def __radd__(self, other): """Basic sequence addition: s1 += s2.""" new = Sequence(other) new += Sequence(self) return new def __add__(self, other): new = Sequence(self) new += Sequence(other) return new def __iadd__(self, other): self.extend(Sequence(other)) return self def pivot(items, position: int): """Pivot the items in an iterable to start at a given position. Functionally just ``items[position:] + items[:position]`` with some logic to handle numpy arrays (concatenation not summation) Parameters ---------- items : list or numpy.ndarray Sequence to be re-ordered position : int Position that will be the first item in the sequence after the pivot Returns ------- list or numpy.ndarray """ if isinstance(items, np.ndarray): return np.concatenate((items[position:], items[:position])) elif isinstance(items, list): return items[position:] + items[:position] raise TypeError(f"Pivoting {type(items)} not supported : {items}") ================================================ FILE: armi/utils/mathematics.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Various math utilities.""" import math import operator # the python package, not the ARMI module import re import numpy as np import scipy.optimize as sciopt # special pattern to deal with FORTRAN-produced scipats without E, like 3.2234-234 SCIPAT_SPECIAL = re.compile(r"([+-]?\d*\.\d+)[eEdD]?([+-]\d+)") def average1DWithinTolerance(vals, tolerance=0.2): """ Compute the average of a series of 1D arrays with a tolerance. Tuned for averaging assembly meshes or block heights. Parameters ---------- vals : 2D np.array could be assembly x axial mesh tops or heights tolerance : float The accuracy to which we need to know the average. Returns ------- 1D np.array The average of all the input 1D NumPy arrays. """ vals = np.array(vals) filterOut = np.array([False]) # this gets discarded while not filterOut.all(): # 20% difference is the default tolerance avg = vals.mean(axis=0) # average over all columns diff = abs(vals - avg) / avg # no nans, because all vals are non-zero # True = 1, sum across axis means any height in assem is off filterOut = (diff > tolerance).sum(axis=1) == 0 vals = vals[filterOut] # filter anything that is skewing if vals.size == 0: raise ValueError("Nothing was near the mean, there are no acceptable values!") if (avg <= 0.0).any(): raise ValueError( "A non-physical value (<=0) was computed, but this is not possible.\nValues: {}\navg: {}".format(vals, avg) ) return avg def convertToSlice(x, increment=0): """ Convert a int, float, list of ints or floats, None, or slice to a slice. Also optionally increments that slice to make it easy to line up lists that don't start with 0. Use this with np.array (np.ndarray) types to easily get selections of it's elements. Parameters ---------- x : multiple types allowed. int: select one index. list of int: select these index numbers. None: select all indices. slice: select this slice increment : integer (or boolean), optional Step size, when taking your slices. (`False` is zero.) Returns ------- slice : slice Returns a slice object that can be used in an array like a[x] to select from its members. Also, the slice has its index numbers decremented by 1. It can also return a numpy array, which can be used to slice other numpy arrays in the same way as a slice. increment : int Step size to take, if you want to take less then every datum in the collection. Examples -------- >>> a = np.array([10, 11, 12, 13]) >>> convertToSlice(2) slice(2, 3, None) >>> a[convertToSlice(2)] array([12]) >>> convertToSlice(2, increment=-1) slice(1, 2, None) >>> a[convertToSlice(2, increment=-1)] array([11]) >>> a[convertToSlice(None)] array([10, 11, 12, 13]) >>> a[utils.convertToSlice([1, 3])] array([11, 13]) >>> a[utils.convertToSlice([1, 3], increment=-1)] array([10, 12]) >>> a[utils.convertToSlice(slice(2, 3, None), increment=-1)] array([11]) """ if x is None: x = np.s_[:] if isinstance(x, list): x = np.array(x) if isinstance(x, (int, np.integer, float, np.floating)): x = slice(int(x), int(x) + 1, None) # Correct the slice indices to be group instead of index based. # The energy groups are 1..x and the indices are 0..x-1. if isinstance(x, slice): if x.start is not None: jstart = x.start + increment else: jstart = None if x.stop is not None: if isinstance(x.stop, list): jstop = [x + increment for x in x.stop] else: jstop = x.stop + increment else: jstop = None jstep = x.step return np.s_[jstart:jstop:jstep] elif isinstance(x, np.ndarray): return np.array([i + increment for i in x]) else: raise Exception(f"It is not known how to handle x type: {type(x)} in utils.convertToSlice") def efmt(a: str) -> str: """Converts string exponential number to another string with just 2 digits in the exponent.""" # this assumes that none of our numbers will be more than 1e100 or less than 1e-100... if len(a.split("E")) != 2: two = a.split("e") else: two = a.split("E") # print two exp = two[1] # this is '+002' or '+02' or something if len(exp) == 4: # it has 3 digits of exponent exp = exp[0] + exp[2:] # gets rid of the hundred's place digit return two[0] + "E" + exp def expandRepeatedFloats(repeatedList): """ Return an expanded repeat list. Notes ----- R char is valid for showing the number of repeats in MCNP. For examples the list: [150, 200, '9R'] indicates a 150 day cycle followed by 10 200 day cycles. """ nonRepeatList = [] for val in repeatedList: isRepeat = False if isinstance(val, str): val = val.upper() if val.count("R") > 1: raise ValueError("List had strings that were not repeats") elif "R" in val: val = val.replace("R", "") isRepeat = True if isRepeat: nonRepeatList += [nonRepeatList[-1]] * int(val) else: nonRepeatList.append(float(val)) return nonRepeatList def findClosest(listToSearch, val, indx=False): """ Find closest item in a list. Parameters ---------- listToSearch : list The list to search through val : float The target value that is being searched for in the list indx : bool, optional If true, returns minVal and minIndex, otherwise, just the value Returns ------- minVal : float The item in the listToSearch that is closest to val minI : int The index of the item in listToSearch that is closest to val. Returned if indx=True. """ d = float("inf") minVal = None minI = None for i, item in enumerate(listToSearch): if abs(item - val) < d: d = abs(item - val) minVal = item minI = i if indx: return minVal, minI else: # backwards compatibility return minVal def findNearestValue(searchList, searchValue): """Search a given list for the value that is closest to the given search value.""" return findNearestValueAndIndex(searchList, searchValue)[0] def findNearestValueAndIndex(searchList, searchValue): """Search a given list for the value that is closest to the given search value. Return a tuple containing the value and its index in the list. """ searchArray = np.array(searchList) closestValueIndex = (np.abs(searchArray - searchValue)).argmin() return searchArray[closestValueIndex], closestValueIndex def fixThreeDigitExp(strToFloat: str) -> float: """ Convert FORTRAN numbers that cannot be converted into floats. Notes ----- Converts a number line "9.03231714805651-101" (no e or E) to "9.03231714805651e-101". Some external depletion kernels currently need this fix. From contact with developer: The notation like 1.0-101 is a FORTRAN thing, with history going back to the 60's. They will only put E before an exponent 99 and below. Fortran will also read these guys just fine, and they are valid floating point numbers. It would not be a useful effort, in terms of time, trying to get FORTRAN to behave differently. The approach has been to write a routine in the reading code which will interpret these. This helps when the scientific number exponent does not fit. """ match = SCIPAT_SPECIAL.match(strToFloat) return float("{}E{}".format(*match.groups())) def getFloat(val): """Returns float version of val, or None if it's impossible. Useful for converting user-input into floats when '' might be possible. """ try: newVal = float(val) return newVal except Exception: return None def getStepsFromValues(values, prevValue=0.0): """Convert list of floats to list of steps between each float.""" steps = [] for val in values: currentVal = float(val) steps.append(currentVal - prevValue) prevValue = currentVal return steps def isMonotonic(inputIter, relation): """ Checks if an iterable contains elements that are monotonically increasing or decreasing, whatever that might mean for the specific types of the elements. Parameters ---------- inputIter : list Some list to check. Values in the list should have a defined relation to each other. relation : {'<=', '<', '>=', '>'} The relation between the elements to check, from left to right through the iterable. Returns ------- bool """ operatorDict = { "<=": operator.le, "<": operator.lt, ">=": operator.ge, ">": operator.gt, } try: op = operatorDict[relation] except KeyError: raise ValueError(f"Valid relation not specified: {relation}") return all([op(x, y) for x, y in zip(inputIter, inputIter[1:])]) def linearInterpolation(x0, y0, x1, y1, targetX=None, targetY=None): """ Does a linear interpolation (or extrapolation) for y=f(x). Parameters ---------- x0,y0,x1,y1 : float Coordinates of two points to interpolate between targetX : float, optional X value to evaluate the line at targetY : float, optional Y value we want to find the x value for (inverse interpolation) Returns ------- interpY : float The value of y(targetX), if targetX is not None interpX : float The value of x where y(x) = targetY (if targetY is not None) y = m(x-x0) + b x = (y-b)/m """ if x1 == x0: raise ZeroDivisionError("The x-values are identical. Cannot interpolate.") m = (y1 - y0) / (x1 - x0) b = -m * x0 + y0 if targetX is not None: return m * targetX + b else: return (targetY - b) / m def minimizeScalarFunc( func, goal, guess, maxIterations=None, cs=None, positiveGuesses=False, method=None, tol=1.0e-3, ): """ Use SciPy minimize with the given function, goal value, and first guess. Parameters ---------- func : function The function that guess will be changed to try to make it return the goal value. goal : float The function will be changed until it's return equals this value. guess : float The first guess value to do Newton's method on the func. maxIterations : int The maximum number of iterations that the Newton's method will be allowed to perform. Returns ------- ans : float The guess that when input to the func returns the goal. """ def goalFunc(guess, func, positiveGuesses): if positiveGuesses is True: guess = abs(guess) funcVal = func(guess) val = abs(goal - funcVal) return val if (maxIterations is None) and (cs is not None): maxIterations = cs["maxNewtonsIterations"] X = sciopt.minimize( goalFunc, guess, args=(func, positiveGuesses), method=method, tol=tol, options={"maxiter": maxIterations}, ) # X returns `[num]` instead of `num`, so we have to grab the first/only element in that list ans = float(X["x"][0]) if positiveGuesses is True: ans = abs(ans) return ans def newtonsMethod(func, goal, guess, maxIterations=None, cs=None, positiveGuesses=False): r""" Solves a Newton's method with the given function, goal value, and first guess. Parameters ---------- func : function The function that guess will be changed to try to make it return the goal value. goal : float The function will be changed until it's return equals this value. guess : float The first guess value to do Newton's method on the func. maxIterations : int The maximum number of iterations that the Newton's method will be allowed to perform. Returns ------- ans : float The guess that when input to the func returns the goal. """ def goalFunc(guess, func, positiveGuesses): if positiveGuesses is True: guess = abs(guess) funcVal = func(guess) val = abs(goal - funcVal) return val if (maxIterations is None) and (cs is not None): maxIterations = cs["maxNewtonsIterations"] ans = float( sciopt.newton( goalFunc, guess, args=(func, positiveGuesses), tol=1.0e-3, maxiter=maxIterations, ) ) if positiveGuesses is True: ans = abs(ans) return ans def parabolaFromPoints(p1, p2, p3): r""" Find the parabola that passes through three points. We solve a simultaneous equation with three points. A = x1**2 x1 1 x2**2 x2 1 x3**2 x3 1 b = y1 y2 y3 find coefficients Ax=b Parameters ---------- p1 : tuple first point (x,y) coordinates p2 : tuple second (x,y) points p3 : tuple third (x,y) points Returns ------- tuple 3 floats: a,b,c coefficients of y=ax^2+bx+c """ A = np.array([[p1[0] ** 2, p1[0], 1], [p2[0] ** 2, p2[0], 1], [p3[0] ** 2, p3[0], 1]]) b = np.array([[p1[1]], [p2[1]], [p3[1]]]) try: x = np.linalg.solve(A, b) except: print("Error in parabola {} {}".format(A, b)) raise # x[#] returns `[num]` instead of `num`, so we have to grab the first/only element in that list return float(x[0][0]), float(x[1][0]), float(x[2][0]) def parabolicInterpolation(ap, bp, cp, targetY): """ Given parabola coefficients, this interpolates the time that would give k=targetK. keff = at^2+bt+c We want to solve a*t^2+bt+c-targetK = 0.0 for time. if there are real roots, we should probably take the smallest one because the larger one might be at very high burnup. If there are no real roots, just take the point where the deriv ==0, or 2at+b=0, so t = -b/2a The slope of the curve is the solution to 2at+b at whatever t has been determined Parameters ---------- ap : float coefficients ap of a parabola y = ap*x^2 + bp*x + cp bp : float coefficients bp of a parabola y = ap*x^2 + bp*x + cp cp : float coefficients cp of a parabola y = ap*x^2 + bp*x + cp targetY : float The keff to find the cycle length of Returns ------- realRoots : list of tuples (root, slope) The best guess of the cycle length that will give k=targetK If no positive root was found, this is the maximum of the curve. In that case, it will be a negative number. If there are two positive roots, there will be two entries. slope : float The slope of the keff vs. time curve at t=newTime """ roots = np.roots([ap, bp, cp - targetY]) realRoots = [] for r in roots: if r.imag == 0 and r.real > 0: realRoots.append((r.real, 2.0 * ap * r.real + bp)) if not realRoots: # no positive real roots. Take maximum and give up for this cyclic. newTime = -bp / (2 * ap) if newTime < 0: raise RuntimeError("No positive roots or maxima.") slope = 2.0 * ap * newTime + bp newTime = -newTime # return a negative newTime to signal that it is not expected to be critical. realRoots = [(newTime, slope)] return realRoots def relErr(v1: float, v2: float) -> float: """Find the relative error between to numbers.""" if v1: return (v2 - v1) / v1 else: return -1e99 def resampleStepwise(xin, yin, xout, avg=True): """ Resample a piecewise-defined step function from one set of mesh points to another. This is useful for reallocating values along a given axial mesh (or assembly of blocks). Parameters ---------- xin : list interval points / mesh points yin : list interval values / inter-mesh values xout : list new interval points / new mesh points avg : bool By default, this is set to True, forcing the resampling to be done by averaging. But if this is False, the resmampling will be done by summation, to try and preserve the totals after resampling. """ # validation: there must be one more mesh point than inter-mesh values assert (len(xin) - 1) == len(yin) # find out in which xin bin each xout value lies bins = np.digitize(xout, bins=xin) # loop through xout / the xout bins yout = [] for i in range(1, len(bins)): start = bins[i - 1] end = bins[i] chunk = yin[start - 1 : end] length = xin[start - 1 : end + 1] length = [length[j] - length[j - 1] for j in range(1, len(length))] # if the xout lies outside the xin range if not len(chunk): yout.append(0) continue # trim any partial right-side bins if xout[i] < xin[min(end, len(xin) - 1)]: fraction = (xout[i] - xin[end - 1]) / (xin[end] - xin[end - 1]) if fraction == 0: chunk = chunk[:-1] length = length[:-1] elif avg: length[-1] *= fraction else: chunk[-1] *= fraction # trim any partial left-side bins if xout[i - 1] > xin[start - 1]: fraction = (xin[start] - xout[i - 1]) / (xin[start] - xin[start - 1]) if fraction == 0: chunk = chunk[1:] length = length[1:] elif avg: length[0] *= fraction else: chunk[0] *= fraction # return the sum or the average if [1 for c in chunk if (not hasattr(c, "__len__") and c is None)]: yout.append(None) elif avg: weighted_sum = sum([ch * ln for ch, ln in zip(chunk, length)]) yout.append(weighted_sum / sum(length)) else: yout.append(sum(chunk)) return yout def rotateXY(x, y, degreesCounterclockwise=None, radiansCounterclockwise=None): """ Rotates x, y coordinates. Parameters ---------- x : float X coordinates, array-like y : float Y coordinates, array-like degreesCounterclockwise : float Degrees to rotate in the CCW direction radiansCounterclockwise : float Radians to rotate in the CCW direction Returns ------- tuple xr, yr: the rotated coordinates """ if radiansCounterclockwise is None: radiansCounterclockwise = degreesCounterclockwise * math.pi / 180.0 sinT = math.sin(radiansCounterclockwise) cosT = math.cos(radiansCounterclockwise) rotationMatrix = np.array([[cosT, -sinT], [sinT, cosT]]) xr, yr = rotationMatrix.dot(np.vstack((x, y))) if len(xr) > 1: # Convert to lists because everyone prefers lists for some reason return xr.tolist(), yr.tolist() else: # Convert to scalar for consistency with old implementation return xr[0], yr[0] ================================================ FILE: armi/utils/outputCache.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TerraPower Calculation Results Cache (CRC). This helps avoid duplicated time/energy in running cases. In test systems and analysis, it's possible that the same calc will be done over and over, always giving the same result. This system allows the results to be cached and returned instantly instead of re-running, for example, MC2. API usage --------- Getting a cached file:: exe = "MC2-2018-blah.exe" inpFiles = ["mccAA.inp", "rmzflx"] outputFound = crc.retrieveOutput(exe, inp, output) if not outputFound: mc2.run(exe, inp, output) Storing a file to the cache:: crc.store(exe, inp, outFiles) Notes ----- Could probably be, like, a decorate on subprocess but we call subprocess a bunch of different ways. """ import hashlib import json import os import subprocess from armi import runLog from armi.utils import safeCopy from armi.utils.pathTools import cleanPath MANIFEST_NAME = "CRC-manifest.json" def retrieveOutput(exePath, inputPaths, cacheDir, locToRetrieveTo=None): """ Check the cache for a valid file and copy it if it exists. Notes ----- Input paths need to be in the same order each time if the same cached folder is expected to be found. """ cachedFolder = _getCachedFolder(exePath, inputPaths, cacheDir) if os.path.exists(cachedFolder): if locToRetrieveTo is None: locToRetrieveTo = os.path.dirname(inputPaths[0]) successful = _copyOutputs(cachedFolder, locToRetrieveTo) if successful: runLog.info("Retrieved cached outputs for {}".format(exePath)) return True else: # outputs didn't match manifest. Just delete to save checking next time. runLog.warning( "Outputs in {} were inconsistent with manifest. Deleting and reproducing".format(cachedFolder) ) try: deleteCache(cachedFolder) except Exception as e: runLog.debug(e) return False def _copyOutputs(cachedFolder, locToRetrieveTo): """Check that the outputs have the expectect hashes and copy them if they do.""" manifest = os.path.join(cachedFolder, MANIFEST_NAME) if not os.path.exists(manifest): return False with open(manifest) as manifestJSON: storedOutputNamesToHashes = json.load(manifestJSON) copies = [] for storedOutputName, expectedHash in storedOutputNamesToHashes.items(): storedOutputPath = os.path.join(cachedFolder, storedOutputName) try: if _hashFiles([storedOutputPath]) != expectedHash: return False except FileNotFoundError: return False copyPath = os.path.join(locToRetrieveTo, storedOutputName) copies.append([storedOutputPath, copyPath]) for copy in copies: storedOutputPath, copyPath = copy safeCopy(storedOutputPath, copyPath) return True def _getCachedFolder(exePath, inputPaths, cacheDir): """Return the the folder name expected for this executable and set of inputs.""" exeName = os.path.basename(os.path.splitext(exePath)[0]) exeHash = _hashFiles([exePath]) inputHash = _hashFiles(inputPaths) # first 2 helps with reducing the number of folders in a folder first2, remainder = (inputHash[:2], inputHash[2:]) return os.path.join(cacheDir, exeName, exeHash, first2, remainder) def _hashFiles(paths): """Return a MD5 hash of a file's contents.""" with open(paths[0], "rb") as binaryF: md5Hash = hashlib.md5(binaryF.read()) for path in paths[1:]: with open(path, "rb") as binaryF: md5Hash.update(binaryF.read()) return md5Hash.hexdigest() def _makeOutputManifest(outputFiles, folderLocation): """Make a json file with the output names and expected hash.""" manifest = {outputFile: _hashFiles([outputFile]) for outputFile in outputFiles} with open(os.path.join(folderLocation, MANIFEST_NAME), "w") as manifestJSON: json.dump(manifest, manifestJSON) def store(exePath, inputPaths, outputFiles, cacheDir): """ Store an output file in the cache. Notes ----- Input paths need to be in the same order each time if the same cached folder is expected to be found. It is difficult to know what outputs will exist from a specific run, so only outputs that do exist will attempt to be copied. This function should be supplied with a greedy list of outputs. """ # outputFilePaths is a greedy list and they might not all be produced outputsThatExist = [outputFile for outputFile in outputFiles if os.path.exists(outputFile)] folderLoc = _getCachedFolder(exePath, inputPaths, cacheDir) if os.path.exists(folderLoc): deleteCache(folderLoc) os.makedirs(folderLoc) _makeOutputManifest(outputsThatExist, folderLoc) for outputFile in outputsThatExist: baseName = os.path.basename(outputFile) cachedLoc = os.path.join(folderLoc, baseName) safeCopy(outputFile, cachedLoc) runLog.info("Added outputs for {} to the cache.".format(exePath)) def deleteCache(cachedFolder): """ Remove this folder. Requires keyword because this is potentially extremely destructive. """ if "cache" not in str(cachedFolder).lower(): raise RuntimeError("Cache location must contain keyword: `cache`.") # Output caches need to pass in `forceClean` in order to greenlight the deletion. cleanPath(cachedFolder, forceClean=True) def cacheCall(cacheDir, executablePath, inputPaths, outputFileNames, execute=None, tearDown=None): """ Checks the cache to see if there are outputs for the run and returns them, otherwise calls the execute command. Notes ----- It is non-trivial to determine the exact set of outputs an executable will produce without running the executable. Therefore, ``outputFileNames`` is expected to be a greedy list and cache will attempt to copy all the files, but not fail if the file is not present. When copying outputs back, all files copied previously will be targeted. """ if execute is None: execute = lambda: subprocess.call([executablePath] + inputPaths) if not cacheDir: runLog.info("Executing {}".format(executablePath)) execute() return try: if retrieveOutput(executablePath, inputPaths, cacheDir): return except Exception as e: runLog.warning( "Outputs existed in cache, but failed to retrieve outputs from: {} \nerror: {}".format( _getCachedFolder(executablePath, inputPaths, cacheDir), e ) ) runLog.warning("Cached outputs were not found, executing {}".format(executablePath)) execute() if tearDown is not None: tearDown() try: store(executablePath, inputPaths, outputFileNames, cacheDir) except Exception as e: # something went wrong in storage. # This is okay as the manifest will be inconsistent with the outputs and not used in the future. runLog.warning( "Failed to store outputs in: {}\nerror: {}".format( _getCachedFolder(executablePath, inputPaths, cacheDir), e ) ) ================================================ FILE: armi/utils/parsing.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This file contains tools for common tasks in parsing in python strings into non-string values.""" import ast import copy def tryLiteralEval(source): try: source = ast.literal_eval(source) except (ValueError, SyntaxError): pass return source # the following dict helps avoid the need for an eval() statement # Is there no better way to go 'bool' -> bool !? _str_types = {tp.__name__: tp for tp in (type(None), bool, int, complex, float, str, bytes, list, tuple, dict)} _type_strs = {v: k for k, v in _str_types.items()} # python's matching truth evaluations of Nones in different primitive types # str's and unicodes omitted because parseValue denies their use. _none_types = { type(None): None, bool: False, int: 0, complex: 0j, float: 0.0, list: [], tuple: (), dict: {}, } def _numericSpecialBehavior(source, rt): try: return rt(source), True # convert, report success except (ValueError, TypeError): return source, False # fail, report failure def parseValue(source, requestedType, allowNone=False, matchingNonetype=True): """Tries parse a python value, expecting input to be the right type or a string.""" # misuse prevention if requestedType is str: raise TypeError( "Unreliable and unnecessary to use parseValue for strs and unicodes. " "Given parameters are {}, {}, {}.".format(source, requestedType, allowNone) ) # evaluation and special evaluation for numbers evaluated_source, skip_instance_check = tryLiteralEval(source), False if requestedType in [int, float, complex]: evaluated_source, skip_instance_check = _numericSpecialBehavior(evaluated_source, requestedType) # none logic if allowNone and not evaluated_source: if matchingNonetype: return copy.deepcopy(_none_types[requestedType]) else: return evaluated_source # assert everything went well if not skip_instance_check and not isinstance(evaluated_source, requestedType): msg = "Could not parse {} from source {}." if allowNone: msg += " Nor could None be parsed from source." raise ValueError(msg.format(requestedType, evaluated_source)) return evaluated_source ================================================ FILE: armi/utils/pathTools.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains commonly used functions relating to directories, files and path manipulations. """ import importlib import os import pathlib import shutil from time import sleep from armi import context, runLog from armi.utils import safeCopy def armiAbsPath(*pathParts): """Convert a list of path components to an absolute path, without drive letters if possible.""" return os.path.abspath(os.path.join(*pathParts)) def copyOrWarn(filepathDescription, sourcePath, destinationPath): """Copy a file or directory, or warn if the filepath doesn't exist. Parameters ---------- filepathDescription : str a description of the file and/or operation being performed. sourcePath : str Filepath to be copied. destinationPath : str Copied filepath. """ try: if os.path.isdir(sourcePath): shutil.copytree(sourcePath, destinationPath, dirs_exist_ok=True) else: safeCopy(sourcePath, destinationPath) runLog.debug("Copied {}: {} -> {}".format(filepathDescription, sourcePath, destinationPath)) except shutil.SameFileError: pass except Exception as e: runLog.warning( "Could not copy {} from {} to {}\nError was: {}".format(filepathDescription, sourcePath, destinationPath, e) ) def isFilePathNewer(path1, path2): """Returns true if path1 is newer than path2. Returns true if path1 is newer than path2, or if path1 exists and path2 does not, otherwise raises an IOError. """ exist1 = os.path.exists(path1) exist2 = os.path.exists(path2) if exist1 and exist2: path1stat = os.stat(path1) path2stat = os.stat(path2) return path1stat.st_mtime > path2stat.st_mtime elif exist1 and not exist2: return True else: raise IOError("Path 1 does not exist: {}".format(path1)) def isAccessible(path): """Check whether user has access to a given path. Parameters ---------- path : str a directory or file """ return os.path.exists(path) def separateModuleAndAttribute(pathAttr): """ Return True of the specified python module, and attribute of the module exist. Parameters ---------- pathAttr : str Path to a python module followed by the desired attribute. e.g.: `/path/to/my/thing.py:MyClass` Notes ----- The attribute of the module could be a class, function, variable, etc. Raises ------ ValueError: If there is no `:` separating the path and attr. """ # rindex gives last index. # The last is needed because the first colon index could be mapped drives in windows. lastColonIndex = pathAttr.rindex(":") # this raises a valueError # there should be at least 1 colon. 2 is possible due to mapped drives in windows. return (pathAttr[:lastColonIndex]), pathAttr[lastColonIndex + 1 :] def importCustomPyModule(modulePath): """ Dynamically import a custom module. Parameters ---------- modulePath : str Path to a python module. Returns ------- userSpecifiedModule : module The imported python module. """ modulePath = pathlib.Path(modulePath) if not modulePath.exists() or not modulePath.is_file(): raise IOError(r"Cannot import module from the given path: `{modulePath}`") _dir, moduleName = os.path.split(modulePath) moduleName = os.path.splitext(moduleName)[0] # take off the extension spec = importlib.util.spec_from_file_location(moduleName, modulePath) userSpecifiedModule = importlib.util.module_from_spec(spec) spec.loader.exec_module(userSpecifiedModule) return userSpecifiedModule def moduleAndAttributeExist(pathAttr): """ Return True if the specified python module, and attribute of the module exist. Parameters ---------- pathAttr : str Path to a python module followed by the desired attribute. e.g.: `/path/to/my/thing.py:MyClass` Returns ------- bool True if the specified python module, and attribute of the module exist. Notes ----- The attribute of the module could be a class, function, variable, etc. """ try: modulePath, moduleAttributeName = separateModuleAndAttribute(pathAttr) except ValueError: return False modulePath = pathlib.Path(modulePath) if not modulePath.is_file(): return False try: userSpecifiedModule = importCustomPyModule(modulePath) # Blanket except is okay since we are checking to see if a custom import will work. except Exception: return False return moduleAttributeName in userSpecifiedModule.__dict__ def cleanPath(path, mpiRank=0, forceClean=False): """Recursively delete a path. This function checks for a few cases we know to be OK to delete: (1) Any `TemporaryDirectoryChanger` or output cache instance and (2) anything under the ARMI `_FAST_PATH`. Be careful with editing this! Do not make it a generic can-delete-anything function, because it could in theory delete anything a user has write permissions on. Returns ------- success : bool True if file was deleted. False if it was not. """ valid = False if not os.path.exists(path): return True if forceClean: # Any forceClean can be deleted valid = True elif pathlib.Path(path).is_relative_to(pathlib.Path(context.getFastPath())): # If the path slated for deletion is a subdirectory of _FAST_PATH, then cool, delete. # _FAST_PATH itself gets deleted on program exit. valid = True if not valid: raise Exception(f"You tried to delete {path}, but it does not seem safe to do so.") # Delete the file/directory from only one process if mpiRank == context.MPI_RANK: if os.path.exists(path) and os.path.isdir(path): shutil.rmtree(path) elif not os.path.isdir(path): # it's just a file. Delete it. os.remove(path) # Deletions may not be immediate on Windows, so wait for it to finish. maxLoops = 6 waitTime = 0.5 loopCounter = 0 while os.path.exists(path): loopCounter += 1 if loopCounter > maxLoops: break sleep(waitTime) return not os.path.exists(path) ================================================ FILE: armi/utils/plotting.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module makes heavy use of matplotlib. Beware that plots generated with matplotlib may not free their memory, even after the plot is closed, and excessive use of plotting functions may gobble up all of your machine's memory. Therefore, you should use these plotting tools judiciously. It is not advisable to, for instance, plot some sequence of objects in a loop at every time node. If you start to see your memory usage grow inexplicably, you should question any plots that you are generating. """ import collections import itertools import math import os import re from glob import glob import matplotlib import matplotlib.colors as mcolors import matplotlib.patches import matplotlib.pyplot as plt import matplotlib.text as mpl_text import numpy as np from matplotlib import cm from matplotlib.collections import PatchCollection from matplotlib.widgets import Slider from mpl_toolkits import axes_grid1 from ordered_set import OrderedSet import armi from armi import runLog from armi.bookkeeping import report from armi.materials import custom from armi.reactor import grids from armi.reactor.components import Circle, DerivedShape, Helix from armi.reactor.components.basicShapes import Hexagon, Rectangle, Square from armi.reactor.flags import Flags from armi.utils import hexagon, iterables, units LUMINANCE_WEIGHTS = np.array([0.3, 0.59, 0.11, 0.0]) def colorGenerator(skippedColors=10): """ Selects a color from the matplotlib css color database. Parameters ---------- skippedColors: int Number of colors to skip in the matplotlib CSS color database when generating the next color. Without skipping colors the next color may be similar to the previous color. Notes ----- Will cycle indefinitely to accommodate large cores. Colors will repeat. """ colors = list(mcolors.CSS4_COLORS) for start in itertools.cycle(range(20, 20 + skippedColors)): for i in range(start, len(colors), skippedColors): yield colors[i] def plotBlockDepthMap( core, param="pdens", fName=None, bare=False, cmapName="jet", labels=(), labelFmt="{0:.3f}", legendMap=None, fontSize=None, minScale=None, maxScale=None, axisEqual=False, makeColorBar=False, cBarLabel="", title="", shuffleArrows=False, titleSize=25, depthIndex=0, ): """ Plot a param distribution in xy space with the ability to page through depth. Notes ----- This is useful for visualizing the spatial distribution of a param through the core. Blocks could possibly not be in alignment between assemblies, but the depths viewable are based on the first fuel assembly. Parameters ---------- The kwarg definitions are the same as those of ``plotFaceMap``. depthIndex: int The the index of the elevation to show block params. The index is determined by the index of the blocks in the first fuel assembly. """ fuelAssem = core.getFirstAssembly(typeSpec=Flags.FUEL) if not fuelAssem: raise ValueError( "Could not find fuel assembly. This method uses the first fuel blocks mesh for the axial mesh of the plot. " "Cannot proceed without fuel block." ) # block mid point elevation elevations = [elev for _b, elev in fuelAssem.getBlocksAndZ()] data = [] for elevation in elevations: paramValsAtElevation = [] for a in core: paramValsAtElevation.append(a.getBlockAtElevation(elevation).p[param]) data.append(paramValsAtElevation) data = np.array(data) fig = plt.figure(figsize=(12, 12), dpi=100) # Make these now, so they are still referenceable after plotFaceMap. patches = _makeAssemPatches(core) collection = PatchCollection(patches, cmap=cmapName, alpha=1.0) texts = [] plotFaceMap( core, param=param, vals="peak", data=None, # max values so legend is set correctly bare=bare, cmapName=cmapName, labels=labels, labelFmt=labelFmt, legendMap=legendMap, fontSize=fontSize, minScale=minScale, maxScale=maxScale, axisEqual=axisEqual, makeColorBar=makeColorBar, cBarLabel=cBarLabel, title=title, shuffleArrows=shuffleArrows, titleSize=titleSize, referencesToKeep=[patches, collection, texts], ) # make space for the slider fig.subplots_adjust(bottom=0.15) ax_slider = fig.add_axes([0.1, 0.05, 0.8, 0.04]) # This controls what the slider does. def update(i): # int, since we are indexing an array. i = int(i) collection.set_array(data[i, :]) for valToPrint, text in zip(data[i, :], texts): text.set_text(labelFmt.format(valToPrint)) # Slider doesn't seem to work unless assigned to variable _slider = DepthSlider(ax_slider, "Depth(cm)", elevations, update, "green", valInit=depthIndex) if fName: plt.savefig(fName, dpi=150) plt.close() else: plt.show() return fName def plotFaceMap( core, param="pdens", vals="peak", data=None, fName=None, bare=False, cmapName="jet", labels=(), labelFmt="{0:.3f}", legendMap=None, fontSize=None, minScale=None, maxScale=None, axisEqual=False, makeColorBar=False, cBarLabel="", title="", shuffleArrows=False, titleSize=25, referencesToKeep=None, ): """ Plot a face map of the core. Parameters ---------- core: Core The core to plot. param : str, optional The block-parameter to plot. Default: pdens vals : str, optional Can be 'peak', 'average', or 'sum'. The type of vals to produce. Will find peak, average, or sum of block values in an assembly. Default: peak data : list, optional rather than using param and vals, use the data supplied as is. It must be in the same order as iter(r). fName : str, optional File name to create. If none, will show on screen. bare : bool, optional If True, will skip axis labels, etc. cmapName : str The name of the matplotlib colormap to use. Default: jet Other possibilities: http://matplotlib.org/examples/pylab_examples/show_colormaps.html labels : list of str, optional Data labels corresponding to data values. labelFmt : str, optional A format string that determines how the data is printed if ``labels`` is not provided. E.g. ``"{:.1e}"`` legendMap : list, optional A tuple list of (value, label, description), to define the data in the legend. fontSize : int, optional Font size in points minScale : float, optional The minimum value for the low color on your colormap (to set scale yourself) Default: autoscale maxScale : float, optional The maximum value for the high color on your colormap (to set scale yourself) Default: autoscale axisEqual : Boolean, optional If True, horizontal and vertical axes are scaled equally such that a circle appears as a circle rather than an ellipse. If False, this scaling constraint is not imposed. makeColorBar : Boolean, optional If True, a vertical color bar is added on the right-hand side of the plot. If False, no color bar is added. cBarLabel : String, optional If True, this string is the color bar quantity label. If False, the color bar will have no label. When makeColorBar=False, cBarLabel affects nothing. title : String, optional If True, the string is added as the plot title. If False, no plot title is added. shuffleArrows : list, optional Adds arrows indicating fuel shuffling maneuvers titleSize : int, optional Size of title on plot referencesToKeep : list, optional References to previous plots you might want to plot on: patches, collection, texts. Examples -------- Plotting a BOL assembly type facemap with a legend:: >>> plotFaceMap(core, param='typeNumAssem', cmapName='RdYlBu') """ if referencesToKeep: patches, collection, texts = referencesToKeep fig, ax = plt.gcf(), plt.gca() else: fig, ax = plt.subplots(figsize=(12, 12), dpi=100) # set patch (shapes such as hexagon) heat map values patches = _makeAssemPatches(core) collection = PatchCollection(patches, cmap=cmapName, alpha=1.0) texts = [] ax.set_title(title, size=titleSize) # get param vals if data is None: data = [] for a in core: if vals == "peak": data.append(a.getMaxParam(param)) elif vals == "average": data.append(a.calcAvgParam(param)) elif vals == "sum": data.append(a.calcTotalParam(param)) else: raise ValueError(f"{vals} is an invalid entry for `vals` in plotFaceMap. Use peak, average, or sum.") if not labels: labels = [None] * len(data) if len(data) != len(labels): raise ValueError( f"Data had length {len(data)}, but labels had length {len(labels)}. They should be equal length." ) collection.set_array(np.array(data)) if minScale or maxScale: collection.set_clim([minScale, maxScale]) else: collection.norm.autoscale(np.array(data)) ax.add_collection(collection) # Makes text in the center of each shape displaying the values. # (The text is either black or white depending on the background color it is written on) _setPlotValText(ax, texts, core, data, labels, labelFmt, fontSize, collection) # allow a color bar option if makeColorBar: collection2 = PatchCollection(patches, cmap=cmapName, alpha=1.0) if minScale and maxScale: collection2.set_array(np.array([minScale, maxScale])) else: collection2.set_array(np.array(data)) if "radial" in cBarLabel: colbar = fig.colorbar(collection2, ticks=[x + 1 for x in range(max(data))], shrink=0.43) else: colbar = fig.colorbar(collection2, ax=ax, shrink=0.43) colbar.set_label(cBarLabel, size=20) colbar.ax.tick_params(labelsize=16) if legendMap is not None: legend = _createLegend(legendMap, collection) else: legend = None if axisEqual: # don't "squish" patches vertically or horizontally ax.set_aspect("equal", "datalim") ax.autoscale_view(tight=True) # make it 2-D, for now... shuffleArrows = shuffleArrows or [] for sourceCoords, destinationCoords in shuffleArrows: ax.annotate( "", xy=destinationCoords[:2], xytext=sourceCoords[:2], arrowprops={"arrowstyle": "->", "color": "white"}, ) if bare: ax.set_xticks([]) ax.set_yticks([]) ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) ax.spines["left"].set_visible(False) ax.spines["bottom"].set_visible(False) else: ax.set_xlabel("x (cm)") ax.set_ylabel("y (cm)") if fName: if legend: # expand so the legend fits if necessary pltKwargs = {"bbox_extra_artists": (legend,), "bbox_inches": "tight"} else: pltKwargs = {} try: plt.savefig(fName, dpi=150, **pltKwargs) except IOError: runLog.warning("Cannot update facemap at {0}: IOError. Is the file open?".format(fName)) plt.close(fig) elif referencesToKeep: # Don't show yet, since it will be updated. return fName else: # Never close figures after a .show() # because they're being used interactively e.g. # in a live tutorial or by the doc gallery plt.show() return fName def close(fig=None): """ Wrapper for matplotlib close. This is useful to avoid needing to import plotting and matplotlib. The plot functions cannot always close their figure if it is going to be used somewhere else after becoming active (e.g. in reports or gallery examples). """ plt.close(fig) def _makeAssemPatches(core): """Return a list of assembly shaped patches for each assembly.""" patches = [] if isinstance(core.spatialGrid, grids.HexGrid): nSides = 6 elif isinstance(core.spatialGrid, grids.ThetaRZGrid): raise TypeError("This plot function is not currently supported for ThetaRZGrid grids.") else: nSides = 4 pitch = core.getAssemblyPitch() for a in core: x, y, _ = a.spatialLocator.getLocalCoordinates() if nSides == 6: if core.spatialGrid.cornersUp: orientation = 0 else: orientation = math.pi / 2.0 assemPatch = matplotlib.patches.RegularPolygon( (x, y), nSides, radius=pitch / math.sqrt(3), orientation=orientation ) elif nSides == 4: # for rectangle x, y is defined as sides instead of center assemPatch = matplotlib.patches.Rectangle((x - pitch[0] / 2, y - pitch[1] / 2), *pitch) else: raise ValueError(f"Unexpected number of sides: {nSides}.") patches.append(assemPatch) return patches def _setPlotValText(ax, texts, core, data, labels, labelFmt, fontSize, collection): """Write param values down, and return text so it can be edited later.""" _ = core.getAssemblyPitch() for a, val, label in zip(core, data, labels): x, y, _ = a.spatialLocator.getLocalCoordinates() cmap = collection.get_cmap() patchColor = np.asarray(cmap(collection.norm(val))) luminance = patchColor.dot(LUMINANCE_WEIGHTS) dark = luminance < 0.5 if dark: color = "white" else: color = "black" # Write text on top of patch locations. if label is None and labelFmt is not None: # Write the value labelText = labelFmt.format(val) text = ax.text( x, y, labelText, zorder=1, ha="center", va="center", fontsize=fontSize, color=color, ) elif label is not None: text = ax.text( x, y, label, zorder=1, ha="center", va="center", fontsize=fontSize, color=color, ) else: # labelFmt was none, so they don't want any text plotted continue texts.append(text) def _createLegend(legendMap, collection, size=9, shape=Hexagon): """Make special legend for the assembly face map plot with assembly counts, and Block Diagrams.""" class AssemblyLegend: """ Custom Legend artist handler. Matplotlib allows you to define a class that implements ``legend_artist`` to give you full control over how the legend keys and labels are drawn. This is done here to get Hexagons with Letters in them on the legend, which is not a built-in legend option. See: http://matplotlib.org/users/legend_guide.html#implementing-a-custom-legend-handler """ def legend_artist(self, _legend, orig_handle, _fontsize, handlebox): letter, index = orig_handle x0, y0 = handlebox.xdescent, handlebox.ydescent width, height = handlebox.width, handlebox.height x = x0 + width / 2.0 y = y0 + height / 2.0 normVal = collection.norm(index) cmap = collection.get_cmap() colorRgb = cmap(normVal) if shape == Hexagon: patch = matplotlib.patches.RegularPolygon( (x, y), 6, radius=height, orientation=math.pi / 2.0, facecolor=colorRgb, transform=handlebox.get_transform(), ) elif shape == Rectangle: patch = matplotlib.patches.Rectangle( (x - height / 2, y - height / 2), height * 2, height * 2, facecolor=colorRgb, transform=handlebox.get_transform(), ) else: patch = matplotlib.patches.Circle( (x, y), radius=height, facecolor=colorRgb, transform=handlebox.get_transform(), ) luminance = np.array(colorRgb).dot(LUMINANCE_WEIGHTS) dark = luminance < 0.5 if dark: color = "white" else: color = "black" handlebox.add_artist(patch) txt = mpl_text.Text(x=x, y=y, text=letter, ha="center", va="center", size=7, color=color) handlebox.add_artist(txt) return (patch, txt) ax = plt.gca() keys = [] labels = [] for value, label, description in legendMap: keys.append((label, value)) labels.append(description) legend = ax.legend( keys, labels, handler_map={tuple: AssemblyLegend()}, loc="center left", bbox_to_anchor=(1.0, 0.5), frameon=False, prop={"size": size}, ) return legend class DepthSlider(Slider): """Page slider used to view params at different depths.""" def __init__( self, ax, sliderLabel, depths, updateFunc, selectedDepthColor, fontsize=8, valInit=0, **kwargs, ): # The color of the currently displayed depth page. self.selectedDepthColor = selectedDepthColor self.nonSelectedDepthColor = "w" self.depths = depths # Make the selection depth buttons self.depthSelections = [] numDepths = float(len(depths)) rectangleBot = 0 textYCoord = 0.5 # startBoundaries go from zero to just below 1. leftBoundary = [i / numDepths for i, _depths in enumerate(depths)] for leftBoundary, depth in zip(leftBoundary, depths): # First depth (leftBoundary==0) is on, rest are off. if leftBoundary == 0: color = self.selectedDepthColor else: color = self.nonSelectedDepthColor depthSelectBox = matplotlib.patches.Rectangle( (leftBoundary, rectangleBot), 1.0 / numDepths, 1, transform=ax.transAxes, facecolor=color, ) ax.add_artist(depthSelectBox) self.depthSelections.append(depthSelectBox) # Make text halfway into box textXCoord = leftBoundary + 0.5 / numDepths ax.text( textXCoord, textYCoord, "{:.1f}".format(depth), ha="center", va="center", transform=ax.transAxes, fontsize=fontsize, ) # Make forward and backward button backwardArrow, forwardArrow = "$\u25c0$", "$\u25b6$" divider = axes_grid1.make_axes_locatable(ax) buttonWidthPercent = "5%" backwardAxes = divider.append_axes("right", size=buttonWidthPercent, pad=0.03) forwardAxes = divider.append_axes("right", size=buttonWidthPercent, pad=0.03) self.backButton = matplotlib.widgets.Button( backwardAxes, label=backwardArrow, color=self.nonSelectedDepthColor, hovercolor=self.selectedDepthColor, ) self.backButton.label.set_fontsize(fontsize) self.backButton.on_clicked(self.previous) self.forwardButton = matplotlib.widgets.Button( forwardAxes, label=forwardArrow, color=self.nonSelectedDepthColor, hovercolor=self.selectedDepthColor, ) self.forwardButton.label.set_fontsize(fontsize) self.forwardButton.on_clicked(self.next) # init at end since slider will set val to 0, and it needs to have state # setup before doing that Slider.__init__(self, ax, sliderLabel, 0, len(depths), valinit=0, **kwargs) self.on_changed(updateFunc) self.set_val(valInit) # need to set after updateFunc is added. # Turn off value visibility since the buttons text shows the value self.valtext.set_visible(False) def set_val(self, val): """ Set the value and update the color. Notes ----- valmin/valmax are set on the parent to 0 and len(depths). """ val = int(val) # valmax is not allowed, since it is out of the array. # valmin is allowed since 0 index is in depth array. if val < self.valmin or val >= self.valmax: # invalid, so ignore return # activate color is first since we still have access to self.val self.updatePageDepthColor(val) Slider.set_val(self, val) def next(self, _event): """Move forward to the next depth (page).""" self.set_val(self.val + 1) def previous(self, _event): """Move backward to the previous depth (page).""" self.set_val(self.val - 1) def updatePageDepthColor(self, newVal): """Update the page colors.""" self.depthSelections[self.val].set_facecolor(self.nonSelectedDepthColor) self.depthSelections[newVal].set_facecolor(self.selectedDepthColor) def plotAssemblyTypes( assems: list = None, fileName: str = None, maxAssems: int = None, showBlockAxMesh: bool = True, yAxisLabel: str = None, title: str = None, hot: bool = True, ) -> plt.Figure: """ Generate a plot showing the axial block and enrichment distributions of each assembly type in the core. Parameters ---------- assems: list list of assembly objects to be plotted. fileName : str or None Base for filename to write, or None for just returning the fig maxAssems: integer maximum number of assemblies to plot in the assems list. showBlockAxMesh: bool if true, the axial mesh information will be displayed on the right side of the assembly plot. yAxisLabel: str Optionally, provide a label for the Y-axis. title: str Optionally, provide a title for the plot. hot : bool, optional If True, plot the hot block heights. If False, use cold heights from the inputs. Returns ------- fig : plt.Figure The figure object created """ if maxAssems is not None and not isinstance(maxAssems, int): raise TypeError(f"Maximum assemblies should be an integer: {maxAssems} was of type {type(maxAssems)}.") numAssems = len(assems) if maxAssems is None: maxAssems = numAssems if yAxisLabel is None: yAxisLabel = "Axial Heights (cm)" if title is None: title = "Assembly Designs" # Set assembly/block size constants yBlockHeights = [] yBlockAxMesh = OrderedSet() assemWidth = 5.0 assemSeparation = 0.3 xAssemLoc = 0.5 xAssemEndLoc = numAssems * (assemWidth + assemSeparation) + assemSeparation # Setup figure fig, ax = plt.subplots(figsize=(15, 15), dpi=300) for index, assem in enumerate(assems): isLastAssem = index == numAssems - 1 (xBlockLoc, yBlockHeights, yBlockAxMesh) = _plotBlocksInAssembly( ax, assem, isLastAssem, yBlockHeights, yBlockAxMesh, xAssemLoc, xAssemEndLoc, showBlockAxMesh, hot, ) xAxisLabel = re.sub(" ", "\n", assem.getType().upper()) ax.text( xBlockLoc + assemWidth / 2.0, -5, xAxisLabel, fontsize=13, ha="center", va="top", ) xAssemLoc += assemWidth + assemSeparation # Set up plot layout ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.yaxis.set_ticks_position("left") yBlockHeights.insert(0, 0.0) yBlockHeights.sort() yBlockHeightDiffs = np.diff(yBlockHeights) # Compute differential heights between each block ax.set_yticks([0.0] + list(set(np.cumsum(yBlockHeightDiffs)))) ax.xaxis.set_visible(False) ax.set_title(title, y=1.03) ax.set_ylabel(yAxisLabel, labelpad=20) ax.set_xlim([0.0, 0.5 + maxAssems * (assemWidth + assemSeparation)]) # Plot and save figure ax.plot() if fileName: fig.savefig(fileName) runLog.debug(f"Writing assem layout {fileName} in {os.getcwd()}") plt.close(fig) return fig def _plotBlocksInAssembly( axis, assem, isLastAssem, yBlockHeights, yBlockAxMesh, xAssemLoc, xAssemEndLoc, showBlockAxMesh, hot, ): # Set dictionary of pre-defined block types and colors for the plot lightsage = "xkcd:light sage" blockTypeColorMap = collections.OrderedDict( { "fuel": "tomato", "shield": "cadetblue", "reflector": "darkcyan", "aclp": "lightslategrey", "plenum": "white", "duct": "plum", "control": lightsage, "handling socket": "lightgrey", "grid plate": "lightgrey", "inlet nozzle": "lightgrey", } ) # Initialize block positions blockWidth = 5.0 yBlockLoc = 0 xBlockLoc = xAssemLoc xTextLoc = xBlockLoc + blockWidth / 20.0 for b in assem: # get block height if hot: blockHeight = b.getHeight() else: try: blockHeight = b.getInputHeight() except AttributeError: raise ValueError( f"Cannot plot cold height for block {b} in assembly {assem} because it does not have access to a " "blueprints through any of its parents. Either make sure that a blueprints is accessible or plot " "the hot heights instead." ) # Get the basic text label for the block try: blockType = [bType for bType in blockTypeColorMap.keys() if b.hasFlags(Flags.fromString(bType))][0] color = blockTypeColorMap[blockType] except IndexError: blockType = b.getType() color = "grey" # Get the detailed text label for the block blockXsId = b.p.xsType dLabel = "" if b.hasFlags(Flags.FUEL): dLabel = " {:0.2f}%".format(b.getFissileMassEnrich() * 100) elif b.hasFlags(Flags.CONTROL): blockType = "ctrl" dLabel = " {:0.2f}%".format(b.getBoronMassEnrich() * 100) dLabel += " ({})".format(blockXsId) # Set up block rectangle blockPatch = matplotlib.patches.Rectangle( (xBlockLoc, yBlockLoc), blockWidth, blockHeight, facecolor=color, alpha=0.7, edgecolor="k", lw=1.0, ls="solid", ) axis.add_patch(blockPatch) yBlockCenterLoc = yBlockLoc + blockHeight / 2.5 axis.text( xTextLoc, yBlockCenterLoc, blockType.upper() + dLabel, ha="left", fontsize=10, ) yBlockLoc += blockHeight yBlockHeights.append(yBlockLoc) # Add location, block heights, and axial mesh points to ordered set yBlockAxMesh.add((yBlockCenterLoc, blockHeight, b.p.axMesh)) # Add the block heights, block number of axial mesh points on the far right of the plot. if isLastAssem and showBlockAxMesh: xEndLoc = 0.5 + xAssemEndLoc for bCenter, bHeight, axMeshPoints in yBlockAxMesh: axis.text( xEndLoc, bCenter, f"{bHeight} cm ({axMeshPoints})", fontsize=10, ha="left", ) return xBlockLoc, yBlockHeights, yBlockAxMesh def plotRadialReactorLayouts(reactor): """Generate a radial layout image of the converted reactor core.""" bpAssems = list(reactor.blueprints.assemblies.values()) assemsToPlot = [] for bpAssem in bpAssems: coreAssems = reactor.core.getAssemblies(bpAssem.p.flags) if not coreAssems: continue assemsToPlot.append(coreAssems[0]) # Obtain the plot numbering based on the existing files so that existing plots are not overwritten. start = 0 existingFiles = glob(f"{reactor.core.name}AssemblyTypes" + "*" + ".png") # This loops over the existing files for the assembly types outputs and makes a unique integer value so that plots # are not overwritten. The regular expression here captures the first integer as AssemblyTypesX and then ensures # that the numbering in the next enumeration below is 1 above that. for f in existingFiles: newStart = int(re.search(r"\d+", f).group()) if newStart > start: start = newStart figs = [] for plotNum, assemBatch in enumerate(iterables.chunk(assemsToPlot, 6), start=start + 1): assemPlotName = f"{reactor.core.name}AssemblyTypes{plotNum}-rank{armi.MPI_RANK}.png" fig = plotAssemblyTypes(assemBatch, assemPlotName, maxAssems=6, showBlockAxMesh=True) figs.append(fig) return figs def plotBlockFlux(core, fName=None, bList=None, peak=False, adjoint=False, bList2=[]): """ Produce energy spectrum plot of real and/or adjoint flux in one or more blocks. Parameters ---------- core : Core Core object fName : str, optional the name of the plot file to produce. If none, plot will be shown. A text file with the flux values will also be generated if this is non-empty. bList : iterable, optional is a single block or a list of blocks to average over. If no bList, full core is assumed. peak : bool, optional a flag that will produce the peak as well as the average on the plot. adjoint : bool, optional plot the adjoint as well. bList2 : list, optional a separate list of blocks that will also be plotted on a separate axis on the same plot. This is useful for comparing flux in some blocks with flux in some other blocks. """ class BlockListFlux: def __init__(self, nGroup, blockList=[], adjoint=False, peak=False, primary=False): self.nGroup = nGroup self.blockList = blockList self.adjoint = adjoint self.peak = peak self.avgHistogram = None self.eHistogram = None self.peakHistogram = None self.E = None if not blockList: self.avgFlux = np.zeros(self.nGroup) self.peakFlux = np.zeros(self.nGroup) self.lineAvg = "-" self.linePeak = "-" else: self.avgFlux = np.zeros(self.nGroup) self.peakFlux = np.zeros(self.nGroup) if self.adjoint: self.labelAvg = "Average Adjoint Flux" self.labelPeak = "Peak Adjoint Flux" else: self.labelAvg = "Average Flux" self.labelPeak = "Peak Flux" if primary: self.lineAvg = "-" self.linePeak = "-" else: self.lineAvg = "r--" self.linePeak = "k--" def calcAverage(self): for b in self.blockList: thisFlux = np.array(b.getMgFlux(adjoint=self.adjoint)) self.avgFlux += np.array(thisFlux) if sum(thisFlux) > sum(self.peakFlux): self.peakFlux = thisFlux self.avgFlux = self.avgFlux / len(bList) def setEnergyStructure(self, upperEnergyBounds): self.E = [eMax / 1e6 for eMax in upperEnergyBounds] def makePlotHistograms(self): self.eHistogram, self.avgHistogram = makeHistogram(self.E, self.avgFlux) if self.peak: _, self.peakHistogram = makeHistogram(self.E, self.peakFlux) def checkSize(self): if len(self.E) != len(self.avgFlux): runLog.error(self.avgFlux) raise def getTable(self): return enumerate(zip(self.E, self.avgFlux, self.peakFlux)) if bList is None: bList = core.getBlocks() bList = list(bList) if adjoint and bList2: runLog.warning("Cannot plot adjoint flux with bList2 argument") return elif adjoint: bList2 = bList try: G = len(core.lib.neutronEnergyUpperBounds) except Exception: runLog.warning("No ISOTXS library attached so no flux plots.") return BlockListFluxes = set() bf1 = BlockListFlux(G, blockList=bList, peak=peak, primary=True) BlockListFluxes.add(bf1) if bList2: bf2 = BlockListFlux(G, blockList=bList2, adjoint=adjoint, peak=peak) BlockListFluxes.add(bf2) for bf in BlockListFluxes: bf.calcAverage() bf.setEnergyStructure(core.lib.neutronEnergyUpperBounds) bf.checkSize() bf.makePlotHistograms() if fName: # write a little flux text file txtFileName = os.path.splitext(fName)[0] + ".txt" with open(txtFileName, "w") as f: f.write("{0:16s} {1:16s} {2:16s}\n".format("Energy_Group", "Average_Flux", "Peak_Flux")) for _, (eMax, avgFlux, peakFlux) in bf1.getTable(): f.write("{0:12E} {1:12E} {2:12E}\n".format(eMax, avgFlux, peakFlux)) if max(bf1.avgFlux) <= 0.0: runLog.warning(f"Cannot plot flux with maxval=={bf1.avgFlux} in {bList[0]}") return plt.figure() plt.plot(bf1.eHistogram, bf1.avgHistogram, bf1.lineAvg, label=bf1.labelAvg) if peak: plt.plot(bf1.eHistogram, bf1.peakHistogram, bf1.linePeak, label=bf1.labelPeak) ax = plt.gca() ax.set_xscale("log") ax.set_yscale("log") plt.xlabel("Energy (MeV)") plt.ylabel("Flux (n/cm$^2$/s)") if peak or bList2: plt.legend(loc="lower right") plt.grid(color="0.70") if bList2: if adjoint: plt.twinx() plt.ylabel("Adjoint Flux (n/cm$^2$/s)", rotation=270) ax2 = plt.gca() ax2.set_yscale("log") plt.plot(bf2.eHistogram, bf2.avgHistogram, bf2.lineAvg, label=bf2.labelAvg) if peak and not adjoint: plt.plot(bf2.eHistogram, bf2.peakHistogram, bf2.linePeak, label=bf2.labelPeak) plt.legend(loc="lower left") plt.title("Group flux") if fName: plt.savefig(fName) report.setData( f"Flux Plot {os.path.split(fName)[1]}", os.path.abspath(fName), report.FLUX_PLOT, ) plt.close() else: # Never close interactive plots plt.show() def makeHistogram(x, y): """ Take a list of x and y values, and return a histogram version. Good for plotting multigroup flux spectrum or cross sections. """ if not len(x) == len(y): raise ValueError( "Cannot make a histogram unless the x and y lists are the same size." + "len(x) == {} and len(y) == {}".format(len(x), len(y)) ) n = len(x) xHistogram = np.zeros(2 * n) yHistogram = np.zeros(2 * n) for i in range(n): lower = 2 * i upper = 2 * i + 1 xHistogram[lower] = x[i - 1] xHistogram[upper] = x[i] yHistogram[lower] = y[i] yHistogram[upper] = y[i] xHistogram[0] = x[0] / 2.0 return xHistogram, yHistogram def _makeBlockPinPatches(block, cold): """Return lists of block component patches and corresponding data and names (which relates to material of the component for later plot-coloring/legend) for a single block. Takes in a block that must have a spatialGrid attached as well as a variable which signifies whether the dimensions of the components are at hot or cold temps. When cold is set to true, you would get the BOL cold temp dimensions. Parameters ---------- block : Block cold : bool true for cold temps, hot = false Returns ------- patches : list list of patches for block components data : list list of the materials these components are made of name : list list of the names of these components """ patches = [] data = [] names = [] cornersUp = False if isinstance(block.spatialGrid, grids.HexGrid): largestPitch, comp = block.getPitch(returnComp=True) cornersUp = block.spatialGrid.cornersUp elif isinstance(block.spatialGrid, grids.ThetaRZGrid): raise TypeError("This plot function is not currently supported for ThetaRZGrid grids.") else: largestPitch, comp = block.getPitch(returnComp=True) if block.getPitch()[0] != block.getPitch()[1]: raise ValueError("Only works for blocks with equal length and width.") sortedComps = sorted(block, reverse=True) derivedComponents = block.getComponentsOfShape(DerivedShape) if len(derivedComponents) == 1: derivedComponent = derivedComponents[0] sortedComps.remove(derivedComponent) cName = derivedComponent.name if isinstance(derivedComponent.material, custom.Custom): material = derivedComponent.p.customIsotopicsName else: material = derivedComponent.material.name location = comp.spatialLocator if isinstance(location, grids.MultiIndexLocation): location = location[0] x, y, _ = location.getLocalCoordinates() if isinstance(comp, Hexagon): orient = math.pi / 6 if cornersUp else 0 derivedPatch = matplotlib.patches.RegularPolygon( (x, y), 6, radius=largestPitch / math.sqrt(3), orientation=orient ) elif isinstance(comp, Square): derivedPatch = matplotlib.patches.Rectangle( (x - largestPitch[0] / 2, y - largestPitch[0] / 2), largestPitch[0], largestPitch[0], ) else: raise TypeError( f"Shape of the pitch-defining element is not a Square or Hex it is {comp.shape}, " "cannot plot for this type of block." ) patches.append(derivedPatch) data.append(material) names.append(cName) for component in sortedComps: locs = component.spatialLocator if not isinstance(locs, grids.MultiIndexLocation): # make a single location a list to iterate. locs = [locs] for loc in locs: x, y, _ = loc.getLocalCoordinates() # goes through each location in stack order blockPatches = _makeComponentPatch(component, (x, y), cold, cornersUp) for element in blockPatches: patches.append(element) if isinstance(component.material, custom.Custom): material = component.p.customIsotopicsName else: material = component.material.name data.append(material) names.append(component.name) return patches, data, names def _makeComponentPatch(component, position, cold, cornersUp=False): """Makes a component shaped patch to later be used for making block diagrams. Parameters ---------- component: a component of a block position: tuple (x, y) position cold: bool True if looking for dimension at cold temps cornersUp: bool, optional If this is a HexBlock, is it corners-up or flats-up? Returns ------- blockPatch: list A list of Patch objects that together represent a component in the diagram. Notes ----- Currently accepts components of shape Circle, Helix, Hexagon, or Square """ x = position[0] y = position[1] if isinstance(component, Helix): blockPatch = matplotlib.patches.Wedge( ( x + component.getDimension("helixDiameter", cold=cold) / 2 * math.cos(math.pi / 6), y + component.getDimension("helixDiameter", cold=cold) / 2 * math.sin(math.pi / 6), ), component.getDimension("od", cold=cold) / 2, 0, 360, width=(component.getDimension("od", cold=cold) / 2) - (component.getDimension("id", cold=cold) / 2), ) elif isinstance(component, Circle): blockPatch = matplotlib.patches.Wedge( (x, y), component.getDimension("od", cold=cold) / 2, 0, 360, width=(component.getDimension("od", cold=cold) / 2) - (component.getDimension("id", cold=cold) / 2), ) elif isinstance(component, Hexagon): angle = 0 if cornersUp else 30 outerPoints = np.array(hexagon.corners(angle) * component.getDimension("op", cold=cold)) blockPatch = [] if component.getDimension("ip", cold=cold) != 0: # a hexagonal ring innerPoints = np.array(hexagon.corners(angle) * component.getDimension("ip", cold=cold)) for n in range(6): corners = [ innerPoints[n], innerPoints[(n + 1) % 6], outerPoints[(n + 1) % 6], outerPoints[n], ] patch = matplotlib.patches.Polygon(corners, fill=True) blockPatch.append(patch) else: # a simple hexagon for n in range(6): corners = [ outerPoints[(n + 1) % 6], outerPoints[n], ] patch = matplotlib.patches.Polygon(corners, fill=True) blockPatch.append(patch) elif isinstance(component, Rectangle): if component.getDimension("widthInner", cold=cold) != 0: innerPoints = np.array( [ [ x + component.getDimension("widthInner", cold=cold) / 2, y + component.getDimension("lengthInner", cold=cold) / 2, ], [ x + component.getDimension("widthInner", cold=cold) / 2, y - component.getDimension("lengthInner", cold=cold) / 2, ], [ x - component.getDimension("widthInner", cold=cold) / 2, y - component.getDimension("lengthInner", cold=cold) / 2, ], [ x - component.getDimension("widthInner", cold=cold) / 2, y + component.getDimension("lengthInner", cold=cold) / 2, ], ] ) outerPoints = np.array( [ [ x + component.getDimension("widthOuter", cold=cold) / 2, y + component.getDimension("lengthOuter", cold=cold) / 2, ], [ x + component.getDimension("widthOuter", cold=cold) / 2, y - component.getDimension("lengthOuter", cold=cold) / 2, ], [ x - component.getDimension("widthOuter", cold=cold) / 2, y - component.getDimension("lengthOuter", cold=cold) / 2, ], [ x - component.getDimension("widthOuter", cold=cold) / 2, y + component.getDimension("lengthOuter", cold=cold) / 2, ], ] ) blockPatch = [] for n in range(4): corners = [ innerPoints[n], innerPoints[(n + 1) % 4], outerPoints[(n + 1) % 4], outerPoints[n], ] patch = matplotlib.patches.Polygon(corners, fill=True) blockPatch.append(patch) else: # Just make it a rectangle blockPatch = matplotlib.patches.Rectangle( ( x - component.getDimension("widthOuter", cold=cold) / 2, y - component.getDimension("lengthOuter", cold=cold) / 2, ), component.getDimension("widthOuter", cold=cold), component.getDimension("lengthOuter", cold=cold), ) if isinstance(blockPatch, list): return blockPatch return [blockPatch] def plotBlockDiagram(block, fName, cold, cmapName="RdYlBu", materialList=None, fileFormat="svg"): """Given a Block with a spatial Grid, plot the diagram of it with all of its components (wire, duct, coolant, etc). Parameters ---------- block : Block fName : str Name of the file to save to cold : bool True is for cold temps, False is hot cmapName : str name of a colorMap to use for block colors materialList : list A list of material names across all blocks to be plotted so that same material on all diagrams will have the same color fileFormat : str The format to save the picture as, e.g. svg, png, jpg, etc. """ _, ax = plt.subplots(figsize=(20, 20), dpi=200) if block.spatialGrid is None: return None # building a list of materials if materialList is None: materialList = [] for component in block: if isinstance(component.material, custom.Custom): materialName = component.p.customIsotopicsName else: materialName = component.material.name if materialName not in materialList: materialList.append(materialName) materialMap = {material: ai for ai, material in enumerate(np.unique(materialList))} allColors = np.array(list(materialMap.values())) # build the geometric shapes on the plot patches, data, _ = _makeBlockPinPatches(block, cold) collection = PatchCollection(patches, cmap=cmapName, alpha=1.0) ourColors = np.array([materialMap[materialName] for materialName in data]) collection.set_array(ourColors) ax.add_collection(collection) collection.norm.autoscale(allColors) # set up plot axis, labels and legends legendMap = [(materialMap[materialName], "", f"{materialName}") for materialName in np.unique(data)] legend = _createLegend(legendMap, collection, size=50, shape=Rectangle) pltKwargs = {"bbox_extra_artists": (legend,), "bbox_inches": "tight"} ax.set_xticks([]) ax.set_yticks([]) ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) ax.spines["left"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.margins(0) plt.savefig(fName, format=fileFormat, **pltKwargs) plt.close() return os.path.abspath(fName) def plotScatterMatrix(scatterMatrix, scatterTypeLabel="", fName=None): """Plots a matrix to show scattering.""" img = plt.imshow(scatterMatrix.todense(), interpolation="nearest") plt.grid(color="0.70") plt.xlabel("From group") plt.ylabel("To group") plt.title(f"{scatterTypeLabel} scattering XS") plt.colorbar() if fName: plt.savefig(fName) plt.close() else: plt.show() return img def plotNucXs(isotxs, nucNames, xsNames, fName=None, label=None, noShow=False, title=None): """ Generates a XS plot for a nuclide on the ISOTXS library. Parameters ---------- isotxs : IsotxsLibrary A collection of cross sections (XS) for both neutron and gamma reactions. nucNames : str or list The nuclides to plot xsNames : str or list the XS to plot e.g. n,g, n,f, nalph, etc. see xsCollections for actual names. fName : str, optional if fName is given, the file will be written rather than plotting to screen label : str, optional is an optional label for image legends, useful in ipython sessions. noShow : bool, optional Won't finalize plot. Useful for using this to make custom plots. Examples -------- >>> l = ISOTXS() >>> plotNucXs(l, "U238NA", "fission") >>> # Plot n,g for all xenon and krypton isotopes >>> f = lambda name: "XE" in name or "KR" in name >>> plotNucXs(l, sorted(filter(f, l.nuclides.keys())), itertools.repeat("nGamma")) See Also -------- plotScatterMatrix """ # convert all input to lists if isinstance(nucNames, str): nucNames = [nucNames] if isinstance(xsNames, str): xsNames = [xsNames] for nucName, xsName in zip(nucNames, xsNames): nuc = isotxs[nucName] thisLabel = label or "{0} {1}".format(nucName, xsName) x = isotxs.neutronEnergyUpperBounds / 1e6 y = nuc.micros[xsName] plt.plot(x, y, "-", label=thisLabel, drawstyle="steps-post") ax = plt.gca() ax.set_xscale("log") ax.set_yscale("log") plt.grid(color="0.70") plt.title(title or f"microscopic XS from {isotxs}") plt.xlabel("Energy (MeV)") plt.ylabel("microscopic XS (barns)") plt.legend() if fName: plt.savefig(fName) plt.close() elif not noShow: plt.show() def plotConvertedBlock(sourceBlock, convertedBlock, fName=None): """Render an image of the converted block.""" runLog.extra(f"Plotting equivalent cylindrical block of {sourceBlock}") fig, ax = plt.subplots() fig.patch.set_visible(False) ax.patch.set_visible(False) ax.axis("off") patches = [] colors = [] for circleComp in convertedBlock: innerR = circleComp.getDimension("id") / 2.0 outerR = circleComp.getDimension("od") / 2.0 runLog.debug("Plotting {:40s} with {:10.3f} {:10.3f} ".format(circleComp, innerR, outerR)) circle = matplotlib.patches.Wedge((0.0, 0.0), outerR, 0, 360.0, width=outerR - innerR) patches.append(circle) colors.append(circleComp.density()) p = PatchCollection(patches, alpha=1.0, linewidths=0.1, cmap=cm.YlGn) p.set_array(np.array(colors)) ax.add_collection(p) ax.autoscale_view(True, True, True) ax.set_aspect("equal") fig.tight_layout() if fName: plt.savefig(fName) plt.close() else: plt.show() return fName def plotConvertedRZTReactor(reactor, fNameBase=None): """ Generate plots for the converted RZT reactor. Parameters ---------- fNameBase : str, optional A name that will form the basis of the N plots that are generated by this method. Will get split on extension and have numbers added. Should be like ``coreMap.png``. """ runLog.info(f"Generating plot(s) of the converted {str(reactor.core.geomType).upper()} reactor") figs = [] colConv = matplotlib.colors.ColorConverter() colGen = colorGenerator(5) blockColors = {} thetaMesh, radialMesh, axialMesh = _getReactorMeshCoordinates(reactor) innerTheta = 0.0 for i, outerTheta in enumerate(thetaMesh): fig, ax = plt.subplots(figsize=(12, 12)) innerRadius = 0.0 for outerRadius in radialMesh: innerAxial = 0.0 for outerAxial in axialMesh: b = _getBlockAtMeshPoint( reactor, innerTheta, outerTheta, innerRadius, outerRadius, innerAxial, outerAxial, ) blockType = b.getType() blockColor = _getBlockColor(colConv, colGen, blockColors, blockType) if blockColor is not None: blockColors[blockType] = blockColor blockPatch = matplotlib.patches.Rectangle( (innerRadius, innerAxial), (outerRadius - innerRadius), (outerAxial - innerAxial), facecolor=blockColors[blockType], linewidth=0, alpha=0.7, ) ax.add_patch(blockPatch) innerAxial = outerAxial innerRadius = outerRadius ax.set_title( "{} Core Map from {} to {:.4f} revolutions".format( str(reactor.core.geomType).upper(), innerTheta * units.RAD_TO_REV, outerTheta * units.RAD_TO_REV, ), y=1.03, ) ax.set_xticks([0.0] + radialMesh) ax.set_yticks([0.0] + axialMesh) ax.tick_params(axis="both", which="major", labelsize=11, length=0, width=0) ax.grid() labels = ax.get_xticklabels() for label in labels: label.set_rotation(270) handles = [] labels = [] for blockType, blockColor in blockColors.items(): line = matplotlib.lines.Line2D([], [], color=blockColor, markersize=15, label=blockType) handles.append(line) labels.append(line.get_label()) ax.set_xlabel("RADIAL MESH (CM)", labelpad=20) ax.set_ylabel("AXIAL MESH (CM)", labelpad=20) if fNameBase: root, ext = os.path.splitext(fNameBase) fName = root + f"{i}" + ext plt.savefig(fName) plt.close() else: figs.append(fig) innerTheta = outerTheta return figs def _getReactorMeshCoordinates(reactor): """A helper for plotConvertedRZTReactor.""" thetaMesh, radialMesh, axialMesh = reactor.core.findAllMeshPoints(applySubMesh=False) thetaMesh.remove(0.0) radialMesh.remove(0.0) axialMesh.remove(0.0) return thetaMesh, radialMesh, axialMesh def _getBlockAtMeshPoint(reactor, innerTheta, outerTheta, innerRadius, outerRadius, innerAxial, outerAxial): """A helper for plotConvertedRZTReactor.""" for b in reactor.core.iterBlocks(): blockMidTh, blockMidR, blockMidZ = b.spatialLocator.getGlobalCoordinates(nativeCoords=True) if (blockMidTh >= innerTheta) and (blockMidTh <= outerTheta): if (blockMidR >= innerRadius) and (blockMidR <= outerRadius): if (blockMidZ >= innerAxial) and (blockMidZ <= outerAxial): return b raise ValueError( "No block found between ({}, {}), ({}, {}), ({}, {})\nLast block had TRZ= {} {} {}".format( innerTheta, outerTheta, innerRadius, outerRadius, innerAxial, outerAxial, blockMidTh, blockMidR, blockMidZ, ) ) def _getBlockColor(colConverter, colGenerator, blockColors, blockType): """A helper for plotConvertedRZTReactor.""" nextColor = None if blockType not in blockColors: if "fuel" in blockType: nextColor = "tomato" elif "structure" in blockType: nextColor = "lightgrey" elif "radial shield" in blockType: nextColor = "lightgrey" elif "duct" in blockType: nextColor = "grey" else: while True: try: nextColor = next(colGenerator) colConverter.to_rgba(nextColor) break except ValueError: continue return nextColor ================================================ FILE: armi/utils/properties.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module contains methods for adding properties with custom behaviors to classes.""" import numpy as np def areEqual(val1, val2, relativeTolerance=0.0): hackEqual = numpyHackForEqual(val1, val2) if hackEqual or not relativeTolerance: # takes care of dictionaries and strings. return hackEqual return np.allclose(val1, val2, rtol=relativeTolerance, atol=0.0) # does not work for dictionaries or strings def numpyHackForEqual(val1, val2): """Checks lots of types for equality like strings and dicts.""" # when doing this with numpy arrays you get an array of booleans which causes the value error if isinstance(val1, np.ndarray) and isinstance(val2, np.ndarray): if val1.size != val2.size: return False notEqual = val1 != val2 try: # should work for everything but numpy arrays if isinstance(notEqual, np.ndarray) and notEqual.size == 0: return True return not notEqual.__bool__() except (AttributeError, ValueError): # from comparing 2 numpy arrays return not notEqual.any() def createImmutableProperty(name, dependencyAction, doc): """Create a properrty that raises useful AttributeErrors when the attribute has not been assigned. Parameters ---------- name : str Name of the property. This is unfortunately necessary, because the method does not know the name of the property being assigned by the developer. dependencyAction : str Description of an action that needs to be performed in order to set the value of the property. doc : str Docstring of the property. See Also -------- armi.utils.properties.unlockImmutableProperties armi.utils.properties.lockImmutableProperties Examples -------- The following example is essentially exactly how this should be used. >>> class SomeClass: ... myNum = createImmutableProperty("myNum", "You must invoke the initialize() method", "My random number") ... ... def initialize(self, val): ... unlockImmutableProperties(self) ... try: ... self.myNum = val ... finally: ... lockImmutableProperties(self) >>> sc = SomeClass() >>> sc.myNum.__doc__ My Random Number >>> sc.myNum # raises error, because it hasn't been assigned ImmutablePropertyError >>> sc.myNum = 42.1 >>> sc.myNum 42.1 >>> sc.myNum = 21.05 * 2 # raises error, because the value cannot change after it has been assigned. ImmutablePropertyError >>> sc.initialize(42.1) # this works, because the values are the same. >>> sc.initialize(100) # this fails, because the value cannot change ImmutablePropertyError """ privateName = "_" + name def _getter(self): try: return getattr(self, privateName) except AttributeError: if getattr(self, "-unlocked", False): return None raise ImmutablePropertyError( "Attribute {} on {} has not been set, must read {} file first.".format(name, self, dependencyAction) ) def _setter(self, value): if hasattr(self, privateName): currentVal = getattr(self, privateName) if currentVal is None or value is None: setattr(self, privateName, value if currentVal is None else currentVal) elif not numpyHackForEqual(currentVal, value): raise ImmutablePropertyError( "{} on {} has already been set by reading {} file.\n" "The original value: ({})\n" "does not match the new value: ({}).".format(name, self, dependencyAction, currentVal, value) ) else: setattr(self, privateName, value) return property(_getter, _setter, doc=doc) class ImmutablePropertyError(Exception): """Exception raised when performing an illegal operation on an immutable property.""" def unlockImmutableProperties(lib): """Unlock an object that has immutable properties for modification. This will prevent raising errors when reading or assigning values to an immutable property See Also -------- armi.utils.properties.createImmutableProperty """ setattr(lib, "-unlocked", True) def lockImmutableProperties(lib): """Lock an object that has immutable properties such that accessing unassigned properties, or attempting to modify the properties raises an exception. See Also -------- armi.utils.properties.createImmutableProperty """ del lib.__dict__["-unlocked"] ================================================ FILE: armi/utils/reportPlotting.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Plotting Utils specific to reports. This module makes heavy use of matplotlib. Beware that plots generated with matplotlib may not free their memory, even after the plot is closed, and excessive use of plotting functions may gobble up all of your machine's memory. Therefore, you should use these plotting tools judiciously. It is not advisable to, for instance, plot some sequence of objects in a loop at every time node. If you start to see your memory usage grow inexplicably, you should question any plots that you are generating. """ import itertools import math import os import matplotlib.path import matplotlib.projections.polar import matplotlib.pyplot as plt import matplotlib.spines import numpy as np from matplotlib import colormaps from matplotlib import colors as mpltcolors from armi import runLog, settings from armi.bookkeeping import report from armi.reactor.flags import Flags def plotReactorPerformance(reactor, dbi, buGroups, extension=None, history=None): """ Generates a set of plots useful in reactor analysis given a populated reactor. Parameters ---------- reactor : armi.reactor.reactors.Reactor The reactor to plot dbi : armi.bookkeeping.db.DatabaseInterface The DatabaseInterface object from which to pull historical data buGroups : list of float The burnup groups in the problem extension : str, optional The file extension for saving plots history: armi.bookkeeping.historyTracker.HistoryTrackerInterface object The history tracker interface """ try: data = dbi.getHistory(reactor, params=["cycle", "time"]) data.update( dbi.getHistory( reactor.core, params=[ "keff", "keffUnc", "maxPD", "maxBuI", "maxBuF", "maxDPA", "numMoves", ], ) ) except Exception as ee: runLog.warning( "Cannot plot rxPerformance without the data model present in the database.\nError: {}".format(ee) ) return # data is a dict of OrderedDict: { <paramName> : { (<cycle>, <node>) : value } } scalars = {key: list(timeStepDict.values()) for key, timeStepDict in data.items()} runLog.info("scalars for plotting {}".format(scalars)) valueVsTime( reactor.name, scalars["time"], scalars["maxPD"], "maxPD", "Max Areal PD (MW/m^2)", "Max Areal PD vs. time", 0.0, extension=extension, ) keffVsTime( reactor.name, scalars["time"], scalars["keff"], scalars["keffUnc"], ymin=1.0, extension=extension, ) movesVsCycle(reactor.name, scalars, extension=extension) def valueVsTime(name, x, y, key, yaxis, title, ymin=None, extension=None): """ Plots a value vs. time with a standard graph format. Parameters ---------- name : str Reactor.name x : iterable The x-axis values (the abscissa) y : iterable The y-axis values (the ordinate) key : str A key word to add the item to the report interface yaxis : str The y axis label title : str the plot title ymin : str, optional The minimum y-axis value. If any ordinates are less than this value, it will be ignored. extension : str, optional The file extension for saving the figure """ extension = extension or settings.Settings()["outputFileExtension"] plt.figure() plt.plot(x, y, ".-") plt.xlabel("Time (yr)") plt.ylabel(yaxis) plt.grid(color="0.70") plt.title(title + " for {0}".format(name)) if ymin is not None and all([yi > ymin for yi in y]): # set ymin all values are greater than it and it exists. ax = plt.gca() ax.set_ylim(bottom=ymin) figName = name + "." + key + "." + extension plt.savefig(figName) plt.close(1) report.setData("PlotTime", os.path.abspath(figName), report.TIME_PLOT) def keffVsTime(name, time, keff, keffUnc=None, ymin=None, extension=None): """ Plots core keff vs. time. Parameters ---------- name : str reactor.name time : list Time in years keff : list Keff in years keffUnc : list, optional Uncontrolled keff or None (will be plotted as secondary series) ymin : float, optional Minimum y-axis value to target. extension : str, optional The file extension for saving the figure """ extension = extension or settings.Settings()["outputFileExtension"] plt.figure() if any(keffUnc): label1 = "Controlled k-eff" label2 = "Uncontrolled k-eff" else: label1 = None plt.plot(time, keff, ".-", label=label1) if any(keffUnc): plt.plot(time, keffUnc, ".-", label=label2) plt.legend() plt.xlabel("Time (yr)") plt.ylabel("k-eff") plt.grid(color="0.70") plt.title("k-eff vs. time" + " for {0}".format(name)) if ymin is not None and all([yi > ymin for yi in keff]): # set ymin all values are greater than it and it exists. ax = plt.gca() ax.set_ylim(bottom=ymin) figName = name + ".keff." + extension plt.savefig(figName) plt.close(1) report.setData("K-Eff", os.path.abspath(figName), report.KEFF_PLOT) def movesVsCycle(name, scalars, extension=None): """ Make a bar chart showing the number of moves per cycle in the full core. A move is defined as an assembly being picked up, moved, and put down. So if two assemblies are swapped, that is 2 moves. Note that it does not count temporary storage for such swaps. This is an approximation because in a chain of moves, only one out of the chain would have to be temporarily stored. So as the chains get longer, this approximation gets more accurate. Parameters ---------- name : str reactor.name extension : str, optional The file extension for saving the figure See Also -------- FuelHandler.outage : sets the number of moves in each cycle """ extension = extension or settings.Settings()["outputFileExtension"] cycles = [] yvals = [] for moves, cycle in zip(scalars["numMoves"], scalars["cycle"]): if moves is None: moves = 0.0 if cycle not in cycles: # only one move per cycle # use the cycles scalar val in case burnSteps is dynamic cycles.append(cycle) yvals.append(moves) plt.figure(figsize=(12, 6)) # make it wide and short plt.bar(cycles, yvals, align="center") if len(cycles) > 1: plt.xticks(cycles) plt.grid(color="0.70") plt.xlabel("Cycle") plt.ylabel("Number of Moves") plt.title("Fuel management rate for " + name) figName = name + ".moves." + extension plt.savefig(figName) plt.close(1) report.setData("Moves Plot", os.path.abspath(figName), report.MOVES_PLOT) def plotCoreOverviewRadar(reactors, reactorNames=None): """ Plot key features of a set of reactors on radar/spider plots. Useful for comparing reactors to one another. """ runLog.info("Plotting reactor comparison.") fig = plt.figure(figsize=(17, 9)) fig.subplots_adjust(wspace=0.25, hspace=0.20, top=0.85, bottom=0.05) colors = itertools.cycle(["b", "r", "g"]) axes = {} thetas = {} scrapers = [ _getNeutronicVals, _getMechanicalVals, _getFuelVals, _getPhysicalVals, ] firstReactorVals = {} # for normalization numRows, numCols = 2, (len(scrapers) + 1) // 2 for r, color in zip(reactors, colors): for si, scraper in enumerate(scrapers): physicsName, physicsLabels, physicsVals = scraper(r) runLog.info("{}".format(physicsName)) runLog.info("\n".join(["{:10s} {}".format(label, val) for label, val in zip(physicsLabels, physicsVals)])) physicsVals = np.array(physicsVals) theta = thetas.get(physicsName) if theta is None: # first time through. Build the radar, store the axis theta = _radarFactory(len(physicsLabels), frame="polygon") thetas[physicsName] = theta firstReactorVals[physicsName] = physicsVals ax = fig.add_subplot(numRows, numCols, si + 1, projection="radar") axes[physicsName] = ax ax.set_title( physicsName, weight="bold", size="medium", position=(0.5, 1.1), horizontalalignment="center", verticalalignment="center", ) ax.set_var_labels(physicsLabels) plt.rgrids([0.2, 0.4, 0.6, 0.8]) # radial grid lines else: ax = axes[physicsName] with np.errstate(divide="ignore", invalid="ignore"): vals = ( physicsVals / firstReactorVals[physicsName] ) # normalize to first reactor b/c values differ by a lot. vals[np.isnan(vals)] = 0.2 ax.plot(theta, vals, color=color) ax.fill(theta, vals, facecolor=color, alpha=0.25) if reactorNames: plt.subplot(numRows, numCols, 1) # legend on top-left plot legend = plt.legend(reactorNames, loc=(0.9, 0.95), labelspacing=0.1) plt.setp(legend.get_texts(), fontsize="small") plt.figtext( 0.5, 0.965, "Comparison", ha="center", color="black", weight="bold", size="large", ) plt.savefig("reactor_comparison.png") plt.close() def _getNeutronicVals(r): labels, vals = list( zip( *[ ("Rx. Swing", r.core.p.rxSwing), ("Beta", r.core.p.beta), ("Peak flux", r.core.p.maxFlux), ] ) ) return "Neutronics", labels, vals def _getMechanicalVals(r): labels, vals = list( zip( *[ ("Hold down", 1.0), ("Distortion", 3.0), ] ) ) return "Mechanical", labels, vals def _getPhysicalVals(r): avgHeight = 0.0 fuelA = r.core.getAssemblies(Flags.FUEL) # get average height avgHeight = 0 for a in fuelA: for b in a.iterBlocks(Flags.FUEL): try: avgHeight += b.getInputHeight() except AttributeError: avgHeight += b.getHeight() avgHeight /= len(fuelA) radius = r.core.getCoreRadius() labels, vals = list( zip( *[ ("Cold fuel height", avgHeight), ("Fuel assems", len(fuelA)), ("Assem weight", r.core.getFirstAssembly(Flags.FUEL).getMass()), ("Core radius", radius), ("Core aspect ratio", (2 * radius) / avgHeight), # width/height ("Fissile mass", r.core.getFissileMass()), ] ) ) return "Dimensions", labels, vals def _getFuelVals(r): tOverD = 0.0 numClad = 0.0 for b in r.core.iterBlocks(Flags.FUEL): clad = b.getComponent(Flags.CLAD) if clad: cladOD = clad.getDimension("od") cladID = clad.getDimension("id") tOverD += (cladOD - cladID) / cladOD numClad += 1 tOverD /= numClad data = [ ( "Smear dens.", r.core.calcAvgParam("smearDensity", generationNum=2, typeSpec=Flags.FUEL), ), ("Clad T/D", tOverD), ("dpa", r.core.p.maxdetailedDpaPeak), ] labels, vals = list(zip(*data)) return "Fuel Perf.", labels, vals def _radarFactory(numVars, frame="circle"): """Create a radar chart with `numVars` axes. This function creates a RadarAxes projection and registers it. Raises ------ ValueError If value of the frame is unknown. Parameters ---------- numVars : int Number of variables for radar chart. frame : {'circle' | 'polygon'} Shape of frame surrounding axes. """ # calculate evenly-spaced axis angles # rotate theta such that the first axis is at the top # keep within 0 to 2pi range though. theta = (np.linspace(0, 2 * np.pi, numVars, endpoint=False) + np.pi / 2) % (2.0 * np.pi) def drawPolyPatch(): verts = _unitPolyVerts(theta) return plt.Polygon(verts, closed=True, edgecolor="k") def drawCirclePatch(): # unit circle centered on (0.5, 0.5) return plt.Circle((0.5, 0.5), 0.5) def close_line(line): """Closes the input line.""" x, y = line.get_data() if x[0] != x[-1]: x = np.concatenate((x, [x[0]])) y = np.concatenate((y, [y[0]])) line.set_data(x, y) patchDict = {"polygon": drawPolyPatch, "circle": drawCirclePatch} if frame not in patchDict: raise ValueError("unknown value for `frame`: %s" % frame) class _RadarAxes(matplotlib.projections.polar.PolarAxes): """ Radar projection. Note different PEP8 naming convention to comply with parent class. """ name = "radar" # use 1 line segment to connect specified points RESOLUTION = 1 # define draw_frame method draw_patch = staticmethod(patchDict[frame]) def fill(self, *args, **kwargs): """Override fill so that line is closed by default.""" closed = kwargs.pop("closed", True) return super(_RadarAxes, self).fill(closed=closed, *args, **kwargs) def plot(self, *args, **kwargs): """Override plot so that line is closed by default.""" lines = super(_RadarAxes, self).plot(*args, **kwargs) for line in lines: close_line(line) def set_var_labels(self, labels): self.set_thetagrids(np.degrees(theta), labels) def _gen_axes_patch(self): return self.draw_patch() def _gen_axes_spines(self): if frame == "circle": return matplotlib.projections.polar.PolarAxes._gen_axes_spines(self) # The following is a hack to get the spines (i.e. the axes frame) # to draw correctly for a polygon frame. # spine_type must be 'left', 'right', 'top', 'bottom', or `circle`. spine_type = "circle" verts = _unitPolyVerts(theta) # close off polygon by repeating first vertex verts.append(verts[0]) path = matplotlib.path.Path(verts) spine = matplotlib.spines.Spine(self, spine_type, path) spine.set_transform(self.transAxes) return {"polar": spine} matplotlib.projections.register_projection(_RadarAxes) return theta def _unitPolyVerts(theta): """Return vertices of polygon for subplot axes. This polygon is circumscribed by a unit circle centered at (0.5, 0.5) """ x0 = y0 = r = 0.5 verts = list(zip(r * np.cos(theta) + x0, r * np.sin(theta) + y0)) return verts def createPlotMetaData(title, xLabel, yLabel, xMajorTicks=None, yMajorTicks=None, legendLabels=None): """ Create plot metadata (title, labels, ticks). Parameters ---------- title : str Plot title xLabel : str x-axis label yLabel : str y-axis label xMajorTicks : list of float List of axial position at which to insert major ticks yMajorTicks : list of float List of axial position at which to insert major ticks legendsLabels : list of str Labels to used in the plot legend Returns ------- metadata : dict Dictionary with all plot metadata information """ metadata = {} metadata["title"] = title metadata["xlabel"] = xLabel metadata["ylabel"] = yLabel metadata["xMajorTicks"] = xMajorTicks metadata["yMajorTicks"] = yMajorTicks metadata["legendLabels"] = legendLabels return metadata def plotAxialProfile(zVals, dataVals, fName, metadata, nPlot=1, yLog=False): """ Plot the axial profile of quantity zVals. Parameters ---------- zVals: list of float Axial position of the quantity to be plotted dataVals: list of float Axial quantity to be plotted fName: str The file name for the plot image file. metadata : bool Metadata (title, labels, legends, ticks) nPlot: int Number of plots to be generated yLog: bool Boolean flag indicating that y-axis is to be plotted on a log scale. """ plt.figure(figsize=(15, 10)) plt.xlabel(metadata["xlabel"]) plt.ylabel(metadata["ylabel"]) plt.title(metadata["title"]) if metadata["legendLabels"]: plt.legend(metadata["legendLabels"], loc=1, fontsize="small") ax = plt.gca() if yLog: # plot the axial profiles on a log scale dataVals = np.log10(abs(dataVals)) if nPlot > 1: colormap = colormaps["jet"] norm = mpltcolors.Normalize(0, nPlot - 1) # alternate between line styles to help distinguish neighboring groups (close on the color map) lineTypes = ["", ":", "--", "-."] nLineTypes = len(lineTypes) for n in range(nPlot): # reverse order for color map, so high E is red and low E is blue n_ = nPlot - n - 1 color = colormap(norm(n_)) lineTypeIndex = int(math.fmod(n, nLineTypes)) plt.plot(zVals, dataVals[:, n], lineTypes[lineTypeIndex], color=color) else: plt.plot(zVals, dataVals) ax.autoscale_view() if metadata["xMajorTicks"]: ax.set_xticks(metadata["xMajorTicks"]) ax.set_xticklabels([str(int(x)) for x in metadata["xMajorTicks"]], fontsize=12) if metadata["yMajorTicks"]: ax.set_xticks(metadata["yMajorTicks"]) ax.set_xticklabels([str(int(x)) for x in metadata["yMajorTicks"]], fontsize=12) ax.xaxis.grid() ax.yaxis.grid() plt.savefig(fName) plt.close() ================================================ FILE: armi/utils/tabulate.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Pretty-print tabular data. This file started out as the MIT-licensed "tabulate". Though we have made, and will continue to make, many arbitrary changes as we need. Thanks to the tabulate team. https://github.com/astanin/python-tabulate Usage ----- The module provides just one function, `tabulate`, which takes a list of lists or other tabular data type as the first argument, and outputs anicely-formatted plain-text table:: >>> from armi.utils.tabulate import tabulate >>> table = [["Sun",696000,1989100000],["Earth",6371,5973.6], ... ["Moon",1737,73.5],["Mars",3390,641.85]] >>> print(tabulate(table)) ----- ------ ------------- Sun 696000 1.9891e+09 Earth 6371 5973.6 Moon 1737 73.5 Mars 3390 641.85 ----- ------ ------------- The following tabular data types are supported: - list of lists or another iterable of iterables - list or another iterable of dicts (keys as columns) - dict of iterables (keys as columns) - list of dataclasses (field names as columns) - two-dimensional NumPy array - NumPy record arrays (names as columns) Table headers ------------- To print nice column headers, supply the second argument (`headers`): - `headers` can be an explicit list of column headers - if `headers="firstrow"`, then the first row of data is used - if `headers="keys"`, then dictionary keys or column indices are used Otherwise a headerless table is produced. If the number of headers is less than the number of columns, they are supposed to be names of the last columns. This is consistent with the plain-text format of R:: >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]], ... headers="firstrow")) sex age ----- ----- ----- Alice F 24 Bob M 19 Column and Headers alignment ---------------------------- `tabulate` tries to detect column types automatically, and aligns the values properly. By default it aligns decimal points of the numbers (or flushes integer numbers to the right), and flushes everything else to the left. Possible column alignments (`numAlign`, `strAlign`) are: "right", "center", "left", "decimal" (only for `numAlign`), and None (to disable alignment). `colGlobalAlign` allows for global alignment of columns, before any specific override from `colAlign`. Possible values are: None (defaults according to coltype), "right", "center", "decimal", "left". `colAlign` allows for column-wise override starting from left-most column. Possible values are: "global" (no override), "right", "center", "decimal", "left". `headersGlobalAlign` allows for global headers alignment, before any specific override from `headersAlign`. Possible values are: None (follow columns alignment), "right", "center", "left". `headersAlign` allows for header-wise override starting from left-most given header. Possible values are: "global" (no override), "same" (follow column alignment), "right", "center", "left". Note on intended behaviour: If there is no `data`, any column alignment argument is ignored. Hence, in this case, header alignment cannot be inferred from column alignment. Table formats ------------- `intFmt` is a format specification used for columns which contain numeric data without a decimal point. This can also be a list or tuple of format strings, one per column. `floatFmt` is a format specification used for columns which contain numeric data with a decimal point. This can also be a list or tuple of format strings, one per column. `None` values are replaced with a `missingVal` string (like `floatFmt`, this can also be a list of values for different columns):: >>> print(tabulate([["spam", 1, None], ... ["eggs", 42, 3.14], ... ["other", None, 2.7]], missingVal="?")) ----- -- ---- spam 1 ? eggs 42 3.14 other ? 2.7 ----- -- ---- Various plain-text table formats (`tableFmt`) are supported: 'plain', 'simple', 'grid', 'rst', and `tsv`. Variable `tabulateFormats` contains the list of currently supported formats. "plain" format doesn't use any pseudographics to draw tables, it separates columns with a double space:: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "plain")) strings numbers spam 41.9999 eggs 451 >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tableFmt="plain")) spam 41.9999 eggs 451 "simple" format is like Pandoc simple_tables:: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "simple")) strings numbers --------- --------- spam 41.9999 eggs 451 >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tableFmt="simple")) ---- -------- spam 41.9999 eggs 451 ---- -------- "grid" is similar to tables produced by Emacs table.el package or Pandoc grid_tables:: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "grid")) +-----------+-----------+ | strings | numbers | +===========+===========+ | spam | 41.9999 | +-----------+-----------+ | eggs | 451 | +-----------+-----------+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tableFmt="grid")) +------+----------+ | spam | 41.9999 | +------+----------+ | eggs | 451 | +------+----------+ "rst" is like a simple table format from reStructuredText; please note that reStructuredText accepts also "grid" tables:: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "rst")) ========= ========= strings numbers ========= ========= spam 41.9999 eggs 451 ========= ========= >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tableFmt="rst")) ==== ======== spam 41.9999 eggs 451 ==== ======== Number parsing -------------- By default, anything which can be parsed as a number is a number. This ensures numbers represented as strings are aligned properly. This can lead to weird results for particular strings such as specific git SHAs e.g. "42992e1" will be parsed into the number 429920 and aligned as such. To completely disable number parsing (and alignment), use `disableNumParse=True`. For more fine grained control, a list column indices is used to disable number parsing only on those columns e.g. `disableNumParse=[0, 2]` would disable number parsing only on the first and third columns. Column Widths and Auto Line Wrapping ------------------------------------ Tabulate will, by default, set the width of each column to the length of the longest element in that column. However, in situations where fields are expected to reasonably be too long to look good as a single line, tabulate can help automate word wrapping long fields for you. Use the parameter `maxcolwidth` to provide a list of maximal column widths:: >>> print(tabulate( \ [('1', 'John Smith', \ 'This is a rather long description that might look better if it is wrapped a bit')], \ headers=("Issue Id", "Author", "Description"), \ maxColWidths=[None, None, 30], \ tableFmt="grid" \ )) +------------+------------+-------------------------------+ | Issue Id | Author | Description | +============+============+===============================+ | 1 | John Smith | This is a rather long | | | | description that might look | | | | better if it is wrapped a bit | +------------+------------+-------------------------------+ Header column width can be specified in a similar way using `maxheadercolwidth`. """ import dataclasses import math import re from collections import namedtuple from collections.abc import Iterable, Sized from functools import partial, reduce from itertools import chain, zip_longest from textwrap import TextWrapper from armi import runLog __all__ = ["tabulate", "tabulateFormats"] # minimum extra space in headers MIN_PADDING = 2 # Whether or not to preserve leading/trailing whitespace in data. PRESERVE_WHITESPACE = False _DEFAULT_FLOAT_FMT = "g" _DEFAULT_INT_FMT = "" _DEFAULT_MISSING_VAL = "" # default align will be overwritten by "left", "center" or "decimal" depending on the formatter _DEFAULT_ALIGN = "default" # Constant that can be used as part of passed rows to generate a separating line. It is purposely an # unprintable character, very unlikely to be used in a table SEPARATING_LINE = "\001" Line = namedtuple("Line", ["begin", "hline", "sep", "end"]) DataRow = namedtuple("DataRow", ["begin", "sep", "end"]) # A table structure is supposed to be: # # --- lineabove --------- # headerrow # --- linebelowheader --- # datarow # --- linebetweenrows --- # ... (more datarows) ... # --- linebetweenrows --- # last datarow # --- linebelow --------- # # TableFormat's line* elements can be # # - either None, if the element is not used, # - or a Line tuple, # - or a function: [col_widths], [col_alignments] -> string. # # TableFormat's *row elements can be # # - either None, if the element is not used, # - or a DataRow tuple, # - or a function: [cell_values], [col_widths], [col_alignments] -> string. # # padding (an integer) is the amount of white space around data values. # # withHeaderHide: # # - either None, to display all table elements unconditionally, # - or a list of elements not to be displayed if the table has column headers. # TableFormat = namedtuple( "TableFormat", [ "lineabove", "linebelowheader", "linebetweenrows", "linebelow", "headerrow", "datarow", "padding", "withHeaderHide", ], ) def _isSeparatingLine(row): rowType = type(row) isSl = (rowType is list or rowType is str) and ( (len(row) >= 1 and row[0] == SEPARATING_LINE) or (len(row) >= 2 and row[1] == SEPARATING_LINE) ) return isSl def _rstEscapeFirstColumn(rows, headers): def escapeEmpty(val): if isinstance(val, (str, bytes)) and not val.strip(): return ".." else: return val newHeaders = list(headers) newRows = [] if headers: newHeaders[0] = escapeEmpty(headers[0]) for row in rows: newRow = list(row) if newRow: newRow[0] = escapeEmpty(row[0]) newRows.append(newRow) return newRows, newHeaders _tableFormats = { "armi": TableFormat( lineabove=Line("", "-", " ", ""), linebelowheader=Line("", "-", " ", ""), linebetweenrows=None, linebelow=Line("", "-", " ", ""), headerrow=DataRow("", " ", ""), datarow=DataRow("", " ", ""), padding=0, withHeaderHide=None, ), "simple": TableFormat( lineabove=Line("", "-", " ", ""), linebelowheader=Line("", "-", " ", ""), linebetweenrows=None, linebelow=Line("", "-", " ", ""), headerrow=DataRow("", " ", ""), datarow=DataRow("", " ", ""), padding=0, withHeaderHide=["lineabove", "linebelow"], ), "plain": TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("", " ", ""), datarow=DataRow("", " ", ""), padding=0, withHeaderHide=None, ), "grid": TableFormat( lineabove=Line("+", "-", "+", "+"), linebelowheader=Line("+", "=", "+", "+"), linebetweenrows=Line("+", "-", "+", "+"), linebelow=Line("+", "-", "+", "+"), headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, withHeaderHide=None, ), "github": TableFormat( lineabove=Line("|", "-", "|", "|"), linebelowheader=Line("|", "-", "|", "|"), linebetweenrows=None, linebelow=None, headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, withHeaderHide=["lineabove"], ), "pretty": TableFormat( lineabove=Line("+", "-", "+", "+"), linebelowheader=Line("+", "-", "+", "+"), linebetweenrows=None, linebelow=Line("+", "-", "+", "+"), headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, withHeaderHide=None, ), "psql": TableFormat( lineabove=Line("+", "-", "+", "+"), linebelowheader=Line("|", "-", "+", "|"), linebetweenrows=None, linebelow=Line("+", "-", "+", "+"), headerrow=DataRow("|", "|", "|"), datarow=DataRow("|", "|", "|"), padding=1, withHeaderHide=None, ), "rst": TableFormat( lineabove=Line("", "=", " ", ""), linebelowheader=Line("", "=", " ", ""), linebetweenrows=None, linebelow=Line("", "=", " ", ""), headerrow=DataRow("", " ", ""), datarow=DataRow("", " ", ""), padding=0, withHeaderHide=None, ), "tsv": TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("", "\t", ""), datarow=DataRow("", "\t", ""), padding=0, withHeaderHide=None, ), } tabulateFormats = list(sorted(_tableFormats.keys())) # The table formats for which multiline cells will be folded into subsequent table rows. The key is # the original format, the value is the format that will be used to represent it. multilineFormats = { "armi": "armi", "plain": "plain", "simple": "simple", "grid": "grid", "pretty": "pretty", "psql": "psql", "rst": "rst", } _multilineCodes = re.compile(r"\r|\n|\r\n") _multilineCodesBytes = re.compile(b"\r|\n|\r\n") # Handle ANSI escape sequences for both control sequence introducer (CSI) and operating system # command (OSC). Both of these begin with 0x1b (or octal 033), which will be shown below as ESC. # # CSI ANSI escape codes have the following format, defined in section 5.4 of ECMA-48: # # CSI: ESC followed by the '[' character (0x5b) # Parameter Bytes: 0..n bytes in the range 0x30-0x3f # Intermediate Bytes: 0..n bytes in the range 0x20-0x2f # Final Byte: a single byte in the range 0x40-0x7e # # Also include the terminal hyperlink sequences as described here: # https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda # # OSC 8 ; params ; uri ST display_text OSC 8 ;; ST # # Example: \x1b]8;;https://example.com\x5ctext to show\x1b]8;;\x5c # # Where: # OSC: ESC followed by the ']' character (0x5d) # params: 0..n optional key value pairs separated by ':' (e.g. foo=bar:baz=qux:abc=123) # URI: the actual URI with protocol scheme (e.g. https://, file://, ftp://) # ST: ESC followed by the '\' character (0x5c) _esc = r"\x1b" _csi = rf"{_esc}\[" _osc = rf"{_esc}\]" _st = rf"{_esc}\\" _ansiEscapePat = rf""" ( # terminal colors, etc {_csi} # CSI [\x30-\x3f]* # parameter bytes [\x20-\x2f]* # intermediate bytes [\x40-\x7e] # final byte | # terminal hyperlinks {_osc}8; # OSC opening (\w+=\w+:?)* # key=value params list (submatch 2) ; # delimiter ([^{_esc}]+) # URI - anything but ESC (submatch 3) {_st} # ST ([^{_esc}]+) # link text - anything but ESC (submatch 4) {_osc}8;;{_st} # "closing" OSC sequence ) """ _ansiCodes = re.compile(_ansiEscapePat, re.VERBOSE) _ansiCodesBytes = re.compile(_ansiEscapePat.encode("utf8"), re.VERBOSE) _floatWithThousandsSeparators = re.compile(r"^(([+-]?[0-9]{1,3})(?:,([0-9]{3}))*)?(?(1)\.[0-9]*|\.[0-9]+)?$") def _isnumberWithThousandsSeparator(string): """Function to test of a string is a number with a thousands separator. >>> _isnumberWithThousandsSeparator(".") False >>> _isnumberWithThousandsSeparator("1") True >>> _isnumberWithThousandsSeparator("1.") True >>> _isnumberWithThousandsSeparator(".1") True >>> _isnumberWithThousandsSeparator("1000") False >>> _isnumberWithThousandsSeparator("1,000") True >>> _isnumberWithThousandsSeparator("1,0000") False >>> _isnumberWithThousandsSeparator(b"1,000.1234") True >>> _isnumberWithThousandsSeparator("+1,000.1234") True >>> _isnumberWithThousandsSeparator("-1,000.1234") True """ try: string = string.decode() except (UnicodeDecodeError, AttributeError): pass return bool(re.match(_floatWithThousandsSeparators, string)) def _isconvertible(conv, string): try: conv(string) return True except (ValueError, TypeError): return False def _isnumber(string): """Helper function; is this string a number. >>> _isnumber("123.45") True >>> _isnumber("123") True >>> _isnumber("spam") False >>> _isnumber("123e45678") False >>> _isnumber("inf") True """ if not _isconvertible(float, string): return False elif isinstance(string, (str, bytes)) and (math.isinf(float(string)) or math.isnan(float(string))): return string.lower() in ["inf", "-inf", "nan"] return True def _isint(string, inttype=int): """Determine if a string is an integer. >>> _isint("123") True >>> _isint("123.45") False """ return ( type(string) is inttype or ( (hasattr(string, "is_integer") or hasattr(string, "__array__")) and str(type(string)).startswith("<class 'numpy.int") ) # numpy.int64 and similar or (isinstance(string, (bytes, str)) and _isconvertible(inttype, string)) # integer as string ) def _isbool(string): """Test if a string is a boolean. >>> _isbool(True) True >>> _isbool("False") True >>> _isbool(1) False """ return type(string) is bool or (isinstance(string, (bytes, str)) and string in ("True", "False")) def _type(string, hasInvisible=True, numparse=True): r"""The least generic type (type(None), int, float, str, unicode). >>> _type(None) is type(None) True >>> _type("foo") is type("") True >>> _type("1") is type(1) True >>> _type("\x1b[31m42\x1b[0m") is type(42) True >>> _type("\x1b[31m42\x1b[0m") is type(42) True """ if hasInvisible and isinstance(string, (str, bytes)): string = _stripAnsi(string) if string is None: return type(None) elif hasattr(string, "isoformat"): # datetime.datetime, date, and time return str elif _isbool(string): return bool elif _isint(string) and numparse: return int elif _isnumber(string) and numparse: return float elif isinstance(string, bytes): return bytes else: return str def _afterpoint(string): """Symbols after a decimal point, -1 if the string lacks the decimal point. >>> _afterpoint("123.45") 2 >>> _afterpoint("1001") -1 >>> _afterpoint("eggs") -1 >>> _afterpoint("123e45") 2 >>> _afterpoint("123,456.78") 2 """ if _isnumber(string) or _isnumberWithThousandsSeparator(string): if _isint(string): return -1 else: pos = string.rfind(".") pos = string.lower().rfind("e") if pos < 0 else pos if pos >= 0: return len(string) - pos - 1 else: # no point return -1 else: # not a number return -1 def _padleft(width, s): r"""Flush right. >>> _padleft(6, "\u044f\u0439\u0446\u0430") == " \u044f\u0439\u0446\u0430" True """ fmt = "{0:>%ds}" % width return fmt.format(s) def _padright(width, s): r"""Flush left. >>> _padright(6, "\u044f\u0439\u0446\u0430") == "\u044f\u0439\u0446\u0430 " True """ fmt = "{0:<%ds}" % width return fmt.format(s) def _padboth(width, s): r"""Center string. >>> _padboth(6, "\u044f\u0439\u0446\u0430") == " \u044f\u0439\u0446\u0430 " True """ fmt = "{0:^%ds}" % width return fmt.format(s) def _padnone(ignoreWidth, s): return s def _stripAnsi(s): r"""Remove ANSI escape sequences, both CSI and OSC hyperlinks. CSI sequences are simply removed from the output, while OSC hyperlinks are replaced with the link text. Note: it may be desirable to show the URI instead but this is not supported. >>> repr(_stripAnsi("\x1b]8;;https://example.com\x1b\\This is a link\x1b]8;;\x1b\\")) "'This is a link'" >>> repr(_stripAnsi("\x1b[31mred\x1b[0m text")) "'red text'" """ if isinstance(s, str): return _ansiCodes.sub(r"\4", s) else: # a bytestring return _ansiCodesBytes.sub(r"\4", s) def _visibleWidth(s): r"""Visible width of a printed string. >>> _visibleWidth("\x1b[31mhello\x1b[0m"), _visibleWidth("world") (5, 5) """ if isinstance(s, (str, bytes)): return len(_stripAnsi(s)) else: return len(str(s)) def _isMultiline(s): if isinstance(s, str): return bool(re.search(_multilineCodes, s)) else: # a bytestring return bool(re.search(_multilineCodesBytes, s)) def _multilineWidth(multilineS, lineWidthFn=len): """Visible width of a potentially multiline content.""" return max(map(lineWidthFn, re.split("[\r\n]", multilineS))) def _chooseWidthFn(hasInvisible, isMultiline): """Return a function to calculate visible cell width.""" if hasInvisible: lineWidthFn = _visibleWidth else: lineWidthFn = len if isMultiline: widthFn = lambda s: _multilineWidth(s, lineWidthFn) else: widthFn = lineWidthFn return widthFn def _alignColumnChoosePadfn(strings, alignment, hasInvisible): if alignment == "right": if not PRESERVE_WHITESPACE: strings = [s.strip() for s in strings] padfn = _padleft elif alignment == "center": if not PRESERVE_WHITESPACE: strings = [s.strip() for s in strings] padfn = _padboth elif alignment == "decimal": if hasInvisible: decimals = [_afterpoint(_stripAnsi(s)) for s in strings] else: decimals = [_afterpoint(s) for s in strings] maxdecimals = max(decimals) strings = [s + (maxdecimals - decs) * " " for s, decs in zip(strings, decimals)] padfn = _padleft elif not alignment: padfn = _padnone else: if not PRESERVE_WHITESPACE: strings = [s.strip() for s in strings] padfn = _padright return strings, padfn def _alignColumnChooseWidthFn(hasInvisible, isMultiline): if hasInvisible: lineWidthFn = _visibleWidth else: lineWidthFn = len if isMultiline: widthFn = lambda s: _alignColumnMultilineWidth(s, lineWidthFn) else: widthFn = lineWidthFn return widthFn def _alignColumnMultilineWidth(multilineS, lineWidthFn=len): """Visible width of a potentially multiline content.""" return list(map(lineWidthFn, re.split("[\r\n]", multilineS))) def _flatList(nestedList): ret = [] for item in nestedList: if isinstance(item, list): for subitem in item: ret.append(subitem) else: ret.append(item) return ret def _alignColumn(strings, alignment, minwidth=0, hasInvisible=True, isMultiline=False): """[string] -> [padded_string].""" strings, padfn = _alignColumnChoosePadfn(strings, alignment, hasInvisible) widthFn = _alignColumnChooseWidthFn(hasInvisible, isMultiline) sWidths = list(map(widthFn, strings)) maxwidth = max(max(_flatList(sWidths)), minwidth) if isMultiline: if not hasInvisible: paddedStrings = ["\n".join([padfn(maxwidth, s) for s in ms.splitlines()]) for ms in strings] else: # enable wide-character width corrections sLens = [[len(s) for s in re.split("[\r\n]", ms)] for ms in strings] visibleWidths = [[maxwidth - (w - ll) for w, ll in zip(mw, ml)] for mw, ml in zip(sWidths, sLens)] # wcswidth and _visibleWidth don't count invisible characters; # padfn doesn't need to apply another correction paddedStrings = [ "\n".join([padfn(w, s) for s, w in zip((ms.splitlines() or ms), mw)]) for ms, mw in zip(strings, visibleWidths) ] else: # single-line cell values if not hasInvisible: paddedStrings = [padfn(maxwidth, s) for s in strings] else: # enable wide-character width corrections sLens = list(map(len, strings)) visibleWidths = [maxwidth - (w - ll) for w, ll in zip(sWidths, sLens)] # wcswidth and _visibleWidth don't count invisible characters; # padfn doesn't need to apply another correction paddedStrings = [padfn(w, s) for s, w in zip(strings, visibleWidths)] return paddedStrings def _moreGeneric(type1, type2): types = { type(None): 0, bool: 1, int: 2, float: 3, bytes: 4, str: 5, } invtypes = { 5: str, 4: bytes, 3: float, 2: int, 1: bool, 0: type(None), } moregeneric = max(types.get(type1, 5), types.get(type2, 5)) return invtypes[moregeneric] def _columnType(strings, hasInvisible=True, numparse=True): r"""The least generic type all column values are convertible to. >>> _columnType([True, False]) is bool True >>> _columnType(["1", "2"]) is int True >>> _columnType(["1", "2.3"]) is float True >>> _columnType(["1", "2.3", "four"]) is str True >>> _columnType(["four", "\u043f\u044f\u0442\u044c"]) is str True >>> _columnType([None, "brux"]) is str True >>> _columnType([1, 2, None]) is int True >>> import datetime as dt >>> _columnType([dt.datetime(1991, 2, 19), dt.time(17, 35)]) is str True """ types = [_type(s, hasInvisible, numparse) for s in strings] return reduce(_moreGeneric, types, bool) def _format(val, valtype, floatFmt, intFmt, missingVal="", hasInvisible=True): r"""Format a value according to its type. Unicode is supported:: >>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \ tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \ good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \ tabulate(tbl, headers=hrow) == good_result True """ # noqa if val is None: return missingVal if valtype is str: return f"{val}" elif valtype is int: return format(val, intFmt) elif valtype is bytes: try: return str(val, "ascii") except (TypeError, UnicodeDecodeError): return str(val) elif valtype is float: isAColoredNumber = hasInvisible and isinstance(val, (str, bytes)) if isAColoredNumber: rawVal = _stripAnsi(val) formattedVal = format(float(rawVal), floatFmt) return val.replace(rawVal, formattedVal) else: return format(float(val), floatFmt) else: return f"{val}" def _alignHeader(header, alignment, width, visibleWidth, isMultiline=False, widthFn=None): """Pad string header to width chars given known visibleWidth of the header.""" if isMultiline: headerLines = re.split(_multilineCodes, header) paddedLines = [_alignHeader(h, alignment, width, widthFn(h)) for h in headerLines] return "\n".join(paddedLines) ninvisible = len(header) - visibleWidth width += ninvisible if alignment == "left": return _padright(width, header) elif alignment == "center": return _padboth(width, header) elif not alignment: return f"{header}" else: return _padleft(width, header) def _removeSeparatingLines(rows): if type(rows) is list: separatingLines = [] sansRows = [] for index, row in enumerate(rows): if _isSeparatingLine(row): separatingLines.append(index) else: sansRows.append(row) return sansRows, separatingLines else: return rows, None def _reinsertSeparatingLines(rows, separatingLines): if separatingLines: for index in separatingLines: rows.insert(index, SEPARATING_LINE) def _prependRowIndex(rows, index): """Add a left-most index column.""" if index is None or index is False: return rows if isinstance(index, Sized) and len(index) != len(rows): raise ValueError( "index must be as long as the number of data rows: " + "len(index)={} len(rows)={}".format(len(index), len(rows)) ) sansRows, separatingLines = _removeSeparatingLines(rows) newRows = [] indexIter = iter(index) for row in sansRows: indexV = next(indexIter) newRows.append([indexV] + list(row)) rows = newRows _reinsertSeparatingLines(rows, separatingLines) return rows def _bool(val): """A wrapper around standard bool() which doesn't throw on NumPy arrays.""" try: return bool(val) except ValueError: # val is likely to be a numpy array with many elements return False def _normalizeTabularData(data, headers, showIndex="default"): """Transform a supported data type to a list of lists & a list of headers, with header padding. Supported tabular data types: * list-of-lists or another iterable of iterables * list of named tuples (usually used with headers="keys") * list of dicts (usually used with headers="keys") * list of OrderedDicts (usually used with headers="keys") * list of dataclasses (Python 3.7+ only, usually used with headers="keys") * 2D NumPy arrays * NumPy record arrays (usually used with headers="keys") * dict of iterables (usually used with headers="keys") The first row can be used as headers if headers="firstrow", column indices can be used as headers if headers="keys". If showIndex="always", show row indices for all types of data. If showIndex="never", don't show row indices for all types of data. If showIndex is an iterable, show its values as row indices. """ try: bool(headers) except ValueError: # numpy.ndarray, ... headers = list(headers) index = None if hasattr(data, "keys"): # dict-like keys = data.keys() # fill out default values, to ensure all data lists are the same length vals = list(data.values()) maxLen = max([len(v) for v in vals], default=0) vals = [[v for v in vv] + [None] * (maxLen - len(vv)) for vv in vals] rows = [tuple(v[i] for v in vals) for i in range(maxLen)] if headers == "keys": # headers should be strings headers = list(map(str, keys)) else: # it's a usual iterable of iterables, or a NumPy array, or an iterable of dataclasses rows = list(data) if headers == "keys" and not rows: # an empty table headers = [] elif headers == "keys" and hasattr(data, "dtype") and getattr(data.dtype, "names"): # numpy record array headers = data.dtype.names elif headers == "keys" and len(rows) > 0 and isinstance(rows[0], tuple) and hasattr(rows[0], "_fields"): # namedtuple headers = list(map(str, rows[0]._fields)) elif len(rows) > 0 and hasattr(rows[0], "keys") and hasattr(rows[0], "values"): # dict-like object uniqKeys = set() # implements hashed lookup keys = [] # storage for set if headers == "firstrow": firstdict = rows[0] if len(rows) > 0 else {} keys.extend(firstdict.keys()) uniqKeys.update(keys) rows = rows[1:] for row in rows: for k in row.keys(): # Save unique items in input order if k not in uniqKeys: keys.append(k) uniqKeys.add(k) if headers == "keys": headers = keys elif isinstance(headers, dict): # a dict of headers for a list of dicts headers = [headers.get(k, k) for k in keys] headers = list(map(str, headers)) elif headers == "firstrow": if len(rows) > 0: headers = [firstdict.get(k, k) for k in keys] headers = list(map(str, headers)) else: headers = [] elif headers: raise ValueError("headers for a list of dicts is not a dict or a keyword") rows = [[row.get(k) for k in keys] for row in rows] elif len(rows) > 0 and dataclasses.is_dataclass(rows[0]): # Python 3.7+'s dataclass fieldNames = [field.name for field in dataclasses.fields(rows[0])] if headers == "keys": headers = fieldNames rows = [[getattr(row, f) for f in fieldNames] for row in rows] elif headers == "keys" and len(rows) > 0: # keys are column indices headers = list(map(str, range(len(rows[0])))) # take headers from the first row if necessary if headers == "firstrow" and len(rows) > 0: if index is not None: headers = [index[0]] + list(rows[0]) index = index[1:] else: headers = rows[0] headers = list(map(str, headers)) # headers should be strings rows = rows[1:] elif headers == "firstrow": headers = [] headers = list(map(str, headers)) rows = list(map(lambda r: r if _isSeparatingLine(r) else list(r), rows)) # add or remove an index column showIndexIsSStr = type(showIndex) in [str, bytes] if showIndex == "default" and index is not None: rows = _prependRowIndex(rows, index) elif isinstance(showIndex, Sized) and not showIndexIsSStr: rows = _prependRowIndex(rows, list(showIndex)) elif isinstance(showIndex, Iterable) and not showIndexIsSStr: rows = _prependRowIndex(rows, showIndex) elif showIndex == "always" or (_bool(showIndex) and not showIndexIsSStr): if index is None: index = list(range(len(rows))) rows = _prependRowIndex(rows, index) # pad with empty headers for initial columns if necessary headersPad = 0 if headers and len(rows) > 0: headersPad = max(0, len(rows[0]) - len(headers)) headers = [""] * headersPad + headers return rows, headers, headersPad def _wrapTextToColWidths(listOfLists, colwidths, numparses=True): if len(listOfLists): numCols = len(listOfLists[0]) else: numCols = 0 numparses = _expandIterable(numparses, numCols, True) result = [] for row in listOfLists: newRow = [] for cell, width, numparse in zip(row, colwidths, numparses): if _isnumber(cell) and numparse: newRow.append(cell) continue if width is not None: wrapper = TextWrapper(width=width) # Cast based on our internal type handling. Any future custom formatting of types # (such as datetimes) may need to be more explicit than just `str` of the object castedCell = str(cell) if _isnumber(cell) else _type(cell, numparse)(cell) wrapped = ["\n".join(wrapper.wrap(line)) for line in castedCell.splitlines() if line.strip() != ""] newRow.append("\n".join(wrapped)) else: newRow.append(cell) result.append(newRow) return result def _toStr(s, encoding="utf8", errors="ignore"): """ A type safe wrapper for converting a bytestring to str. This is essentially just a wrapper around .decode() intended for use with things like map(), but with some specific behavior: 1. if the given parameter is not a bytestring, it is returned unmodified 2. decode() is called for the given parameter and assumes utf8 encoding, but the default error behavior is changed from 'strict' to 'ignore' >>> repr(_toStr(b"foo")) "'foo'" >>> repr(_toStr("foo")) "'foo'" >>> repr(_toStr(42)) "'42'" """ if isinstance(s, bytes): return s.decode(encoding=encoding, errors=errors) return str(s) def tabulate( data, headers=(), tableFmt="simple", floatFmt=_DEFAULT_FLOAT_FMT, intFmt=_DEFAULT_INT_FMT, numAlign=_DEFAULT_ALIGN, strAlign=_DEFAULT_ALIGN, missingVal=_DEFAULT_MISSING_VAL, showIndex="default", disableNumParse=False, colGlobalAlign=None, colAlign=None, maxColWidths=None, headersGlobalAlign=None, headersAlign=None, rowAlign=None, maxHeaderColWidths=None, ): """Format a fixed width table for pretty printing. Parameters ---------- data : object The tabular data you want to print. This can be a list-of-lists/iterables, dict-of-lists/ iterables, 2D numpy arrays, or list of dataclasses. headers=(), optional Nice column names. If this is "firstrow", the first row of the data will be used. If it is "keys"m, then dictionary keys or column indices are used. tableFmt : str, optional There are custom table formats defined in this file, and you can choose between them with this string: "armi", "simple", "plain", "grid", "github", "pretty", "psql", "rst", "tsv". floatFmt : str, optional A format specification used for columns which contain numeric data with a decimal point. This can also be a list or tuple of format strings, one per column. intFmt : str, optional A format specification used for columns which contain numeric data without a decimal point. This can also be a list or tuple of format strings, one per column. numAlign : str, optional Specially align numbers, options: "right", "center", "left", "decimal". strAlign : str, optional Specially align strings, options: "right", "center", "left". missingVal : str, optional `None` values are replaced with a `missingVal` string. showIndex : str, optional Show these rows of data. If "always", show row indices for all types of data. If "never", don't show row indices for all types of data. If showIndex is an iterable, show its values.. disableNumParse : bool, optional To disable number parsing (and alignment), use `disableNumParse=True`. For more fine grained control, `[0, 2]` would disable number parsing on the first and third columns. colGlobalAlign : str, optional Allows for global alignment of columns, before any specific override from `colAlign`. Possible values are: None, "right", "center", "decimal", "left". colAlign : str, optional Allows for column-wise override starting from left-most column. Possible values are: "global" (no override), "right", "center", "decimal", "left". maxColWidths : list, optional A list of the maximum column widths. headersGlobalAlign : str, optional Allows for global headers alignment, before any specific override from `headersAlign`. Possible values are: None (follow columns alignment), "right", "center", "left". headersAlign : str, optional Allows for header-wise override starting from left-most given header. Possible values are: "global" (no override), "same" (follow column alignment), "right", "center", "left". rowAlign : str, optional How do you want to align rows: "right", "center", "decimal", "left". maxHeaderColWidths : list, optional List of column widths for the header. Returns ------- str A text representation of the tabular data. """ if data is None: data = [] listOfLists, headers, headersPad = _normalizeTabularData(data, headers, showIndex=showIndex) listOfLists, separatingLines = _removeSeparatingLines(listOfLists) if maxColWidths is not None: if len(listOfLists): numCols = len(listOfLists[0]) else: numCols = 0 if isinstance(maxColWidths, int): # Expand scalar for all columns maxColWidths = _expandIterable(maxColWidths, numCols, maxColWidths) else: # Ignore col width for any 'trailing' columns maxColWidths = _expandIterable(maxColWidths, numCols, None) numparses = _expandNumparse(disableNumParse, numCols) listOfLists = _wrapTextToColWidths(listOfLists, maxColWidths, numparses=numparses) if maxHeaderColWidths is not None: numCols = len(listOfLists[0]) if isinstance(maxHeaderColWidths, int): # Expand scalar for all columns maxHeaderColWidths = _expandIterable(maxHeaderColWidths, numCols, maxHeaderColWidths) else: # Ignore col width for any 'trailing' columns maxHeaderColWidths = _expandIterable(maxHeaderColWidths, numCols, None) numparses = _expandNumparse(disableNumParse, numCols) headers = _wrapTextToColWidths([headers], maxHeaderColWidths, numparses=numparses)[0] # empty values in the first column of RST tables should be escaped # "" should be escaped as "\\ " or ".." if tableFmt == "rst": listOfLists, headers = _rstEscapeFirstColumn(listOfLists, headers) # Pretty table formatting does not use any extra padding. Numbers are not parsed and are treated # the same as strings for alignment. Check if pretty is the format being used and override the # defaults so it does not impact other formats. minPadding = MIN_PADDING if tableFmt == "pretty": minPadding = 0 disableNumParse = True numAlign = "center" if numAlign == _DEFAULT_ALIGN else numAlign strAlign = "center" if strAlign == _DEFAULT_ALIGN else strAlign else: numAlign = "decimal" if numAlign == _DEFAULT_ALIGN else numAlign strAlign = "left" if strAlign == _DEFAULT_ALIGN else strAlign # optimization: look for ANSI control codes once, enable smart width functions only if a control # code is found # # convert the headers and rows into a single, tab-delimited string ensuring that any bytestrings # are decoded safely (i.e. errors ignored) plainText = "\t".join( chain( # headers map(_toStr, headers), # rows: chain the rows together into a single iterable after mapping the bytestring # conversion to each cell value chain.from_iterable(map(_toStr, row) for row in listOfLists), ) ) hasInvisible = _ansiCodes.search(plainText) is not None if not isinstance(tableFmt, TableFormat) and tableFmt in multilineFormats and _isMultiline(plainText): tableFmt = multilineFormats.get(tableFmt, tableFmt) isMultiline = True else: isMultiline = False widthFn = _chooseWidthFn(hasInvisible, isMultiline) # format rows and columns, convert numeric values to strings cols = list(zip_longest(*listOfLists)) numparses = _expandNumparse(disableNumParse, len(cols)) coltypes = [_columnType(col, numparse=np) for col, np in zip(cols, numparses)] if isinstance(floatFmt, str): # old version: just duplicate the string to use in each column floatFormats = len(cols) * [floatFmt] else: # if floatFmt is list, tuple etc we have one per column floatFormats = list(floatFmt) if len(floatFormats) < len(cols): floatFormats.extend((len(cols) - len(floatFormats)) * [_DEFAULT_FLOAT_FMT]) if isinstance(intFmt, str): # old version: just duplicate the string to use in each column intFormats = len(cols) * [intFmt] else: # if intFmt is list, tuple etc we have one per column intFormats = list(intFmt) if len(intFormats) < len(cols): intFormats.extend((len(cols) - len(intFormats)) * [_DEFAULT_INT_FMT]) if isinstance(missingVal, str): missingVals = len(cols) * [missingVal] else: missingVals = list(missingVal) if len(missingVals) < len(cols): missingVals.extend((len(cols) - len(missingVals)) * [_DEFAULT_MISSING_VAL]) cols = [ [_format(v, ct, flFmt, intFmt, missV, hasInvisible) for v in c] for c, ct, flFmt, intFmt, missV in zip(cols, coltypes, floatFormats, intFormats, missingVals) ] # align columns # first set global alignment if colGlobalAlign is not None: # if global alignment provided aligns = [colGlobalAlign] * len(cols) else: # default aligns = [numAlign if ct in [int, float] else strAlign for ct in coltypes] # then specific alignments if colAlign is not None: assert isinstance(colAlign, Iterable) if isinstance(colAlign, str): runLog.warning( f"As a string, `colAlign` is interpreted as {[c for c in colAlign]}. Did you " + f'mean `colGlobalAlign = "{colAlign}"` or `colAlign = ("{colAlign}",)`?' ) for idx, align in enumerate(colAlign): if not idx < len(aligns): break elif align != "global": aligns[idx] = align minwidths = [widthFn(h) + minPadding for h in headers] if headers else [0] * len(cols) cols = [_alignColumn(c, a, minw, hasInvisible, isMultiline) for c, a, minw in zip(cols, aligns, minwidths)] alignsHeaders = None if headers: # align headers and add headers tCols = cols or [[""]] * len(headers) # first set global alignment if headersGlobalAlign is not None: # if global alignment provided alignsHeaders = [headersGlobalAlign] * len(tCols) else: # default alignsHeaders = aligns or [strAlign] * len(headers) # then specific header alignments if headersAlign is not None: assert isinstance(headersAlign, Iterable) if isinstance(headersAlign, str): runLog.warning( f"As a string, `headersAlign` is interpreted as {[c for c in headersAlign]}. " + f'Did you mean `headersGlobalAlign = "{headersAlign}"` or `headersAlign = ' + f'("{headersAlign}",)`?' ) for idx, align in enumerate(headersAlign): hidx = headersPad + idx if not hidx < len(alignsHeaders): break elif align == "same" and hidx < len(aligns): # same as column align alignsHeaders[hidx] = aligns[hidx] elif align != "global": alignsHeaders[hidx] = align minwidths = [max(minw, max(widthFn(cl) for cl in c)) for minw, c in zip(minwidths, tCols)] headers = [ _alignHeader(h, a, minw, widthFn(h), isMultiline, widthFn) for h, a, minw in zip(headers, alignsHeaders, minwidths) ] rows = list(zip(*cols)) else: minwidths = [max(widthFn(cl) for cl in c) for c in cols] rows = list(zip(*cols)) if not isinstance(tableFmt, TableFormat): tableFmt = _tableFormats.get(tableFmt, _tableFormats["simple"]) raDefault = rowAlign if isinstance(rowAlign, str) else None rowAligns = _expandIterable(rowAlign, len(rows), raDefault) _reinsertSeparatingLines(rows, separatingLines) return _formatTable( tableFmt, headers, alignsHeaders, rows, minwidths, aligns, isMultiline, rowAligns=rowAligns, ) def _expandNumparse(disableNumParse, columnCount): """ Return a list of bools of length `columnCount` which indicates whether number parsing should be used on each column. If `disableNumParse` is a list of indices, each of those indices are False, and everything else is True. If `disableNumParse` is a bool, then the returned list is all the same. """ if isinstance(disableNumParse, Iterable): numparses = [True] * columnCount for index in disableNumParse: numparses[index] = False return numparses else: return [not disableNumParse] * columnCount def _expandIterable(original, numDesired, default): """ Expands the `original` argument to return a return a list of length `numDesired`. If `original` is shorter than `numDesired`, it will be padded with the value in `default`. If `original` is not a list to begin with (i.e. scalar value) a list of length `numDesired` completely populated with `default` will be returned """ if isinstance(original, Iterable) and not isinstance(original, str): return original + [default] * (numDesired - len(original)) else: return [default] * numDesired def _padRow(cells, padding): if cells: pad = " " * padding paddedCells = [pad + cell + pad for cell in cells] return paddedCells else: return cells def _buildSimpleRow(paddedCells, rowfmt): """Format row according to DataRow format without padding.""" begin, sep, end = rowfmt return (begin + sep.join(paddedCells) + end).rstrip() def _buildRow(paddedCells, colwidths, colAligns, rowfmt): """Return a string which represents a row of data cells.""" if not rowfmt: return None if hasattr(rowfmt, "__call__"): return rowfmt(paddedCells, colwidths, colAligns) else: return _buildSimpleRow(paddedCells, rowfmt) def _appendBasicRow(lines, paddedCells, colwidths, colAligns, rowfmt, rowAlign=None): # NOTE: rowAlign is ignored and exists for api compatibility with _appendMultilineRow lines.append(_buildRow(paddedCells, colwidths, colAligns, rowfmt)) return lines def _alignCellVeritically(textLines, numLines, columnWidth, rowAlignment): deltaLines = numLines - len(textLines) blank = [" " * columnWidth] if rowAlignment == "bottom": return blank * deltaLines + textLines elif rowAlignment == "center": topDelta = deltaLines // 2 bottomDelta = deltaLines - topDelta return topDelta * blank + textLines + bottomDelta * blank else: return textLines + blank * deltaLines def _appendMultilineRow(lines, paddedMultilineCells, paddedWidths, colAligns, rowfmt, pad, rowAlign=None): colwidths = [w - 2 * pad for w in paddedWidths] cellsLines = [c.splitlines() for c in paddedMultilineCells] nlines = max(map(len, cellsLines)) # number of lines in the row cellsLines = [_alignCellVeritically(cl, nlines, w, rowAlign) for cl, w in zip(cellsLines, colwidths)] linesCells = [[cl[i] for cl in cellsLines] for i in range(nlines)] for ln in linesCells: paddedLn = _padRow(ln, pad) _appendBasicRow(lines, paddedLn, colwidths, colAligns, rowfmt) return lines def _buildLine(colwidths, colAligns, linefmt): """Return a string which represents a horizontal line.""" if not linefmt: return None if hasattr(linefmt, "__call__"): return linefmt(colwidths, colAligns) else: begin, fill, sep, end = linefmt cells = [fill * w for w in colwidths] return _buildSimpleRow(cells, (begin, sep, end)) def _appendLine(lines, colwidths, colAligns, linefmt): lines.append(_buildLine(colwidths, colAligns, linefmt)) return lines def _formatTable(fmt, headers, headersAligns, rows, colwidths, colAligns, isMultiline, rowAligns): """Produce a plain-text representation of the table.""" lines = [] hidden = fmt.withHeaderHide if (headers and fmt.withHeaderHide) else [] pad = fmt.padding headerrow = fmt.headerrow paddedWidths = [(w + 2 * pad) for w in colwidths] if isMultiline: padRow = lambda row, _: row appendRow = partial(_appendMultilineRow, pad=pad) else: padRow = _padRow appendRow = _appendBasicRow paddedHeaders = padRow(headers, pad) paddedRows = [padRow(row, pad) for row in rows] if fmt.lineabove and "lineabove" not in hidden: _appendLine(lines, paddedWidths, colAligns, fmt.lineabove) if paddedHeaders: appendRow(lines, paddedHeaders, paddedWidths, headersAligns, headerrow) if fmt.linebelowheader and "linebelowheader" not in hidden: _appendLine(lines, paddedWidths, colAligns, fmt.linebelowheader) if paddedRows and fmt.linebetweenrows and "linebetweenrows" not in hidden: # initial rows with a line below for row, ralign in zip(paddedRows[:-1], rowAligns): appendRow(lines, row, paddedWidths, colAligns, fmt.datarow, rowAlign=ralign) _appendLine(lines, paddedWidths, colAligns, fmt.linebetweenrows) # the last row without a line below appendRow( lines, paddedRows[-1], paddedWidths, colAligns, fmt.datarow, rowAlign=rowAligns[-1], ) else: separatingLine = ( fmt.linebetweenrows or fmt.linebelowheader or fmt.linebelow or fmt.lineabove or Line("", "", "", "") ) for row in paddedRows: # test to see if either the 1st column or the 2nd column has the SEPARATING_LINE flag if _isSeparatingLine(row): _appendLine(lines, paddedWidths, colAligns, separatingLine) else: appendRow(lines, row, paddedWidths, colAligns, fmt.datarow) if fmt.linebelow and "linebelow" not in hidden: _appendLine(lines, paddedWidths, colAligns, fmt.linebelow) if headers or rows: return "\n".join(lines) else: return "" ================================================ FILE: armi/utils/tests/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ================================================ FILE: armi/utils/tests/resources/lower/includeA.yaml ================================================ full_name: Jennifer Person # some comment in includeA children: !include includeB.yaml ================================================ FILE: armi/utils/tests/resources/lower/includeB.yaml ================================================ - full_name: Elizabeth Person - full_name: Catharine Person ================================================ FILE: armi/utils/tests/resources/root.yaml ================================================ # Behold, the Person family bobby: &bobby full_name: Robert Person billy: full_name: William Person # comment children: - *bobby - !include lower/includeA.yaml ================================================ FILE: armi/utils/tests/test_asciimaps.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test ASCII maps.""" import io import unittest from armi.utils import asciimaps CARTESIAN_MAP = """2 2 2 2 2 2 2 2 2 2 2 1 1 1 2 2 1 3 1 2 2 3 1 1 2 2 2 2 2 2 """ HEX_THIRD_MAP = """- - 3 3 - 3 3 3 3 2 2 3 3 2 2 2 3 2 1 1 2 3 1 1 1 2 3 1 1 1 1 2 3 1 1 1 1 2 3 1 1 1 1 2 1 1 1 1 1 3 1 1 1 1 2 3 1 1 1 1 2 1 1 1 1 1 3 1 1 1 1 2 1 1 1 1 3 1 1 1 1 2 1 1 1 1 3 1 1 1 1 2 """ # This core map is from refTestBase, and exhibited some issues when trying to read with # an older implementation of the 1/3 hex lattice reader. HEX_THIRD_MAP_2 = """- - SH SH - SH SH SH SH OC OC SH SH OC OC OC SH OC EX EX OC SH EX EX EX OC SH EX MC MC EX OC SH MC HX MC EX OC SH MC MC PC EX OC MC IC MC MC EX SH IC IC MC MC OC SH PC IC MC EX OC FA FA IC TG EX SH IC FA IC MC OC IC US MC EX SH EX IC IC MC OC EX FA MC EX SH EX IC IC PC OC """ HEX_THIRD_MAP_WITH_HOLES = """- - SH SH - SH SH SH SH OC OC SH SH OC OC OC SH OC EX EX OC SH EX EX EX OC SH EX MC MC EX OC SH MC HX MC EX OC SH MC - PC EX OC MC IC MC MC EX SH IC IC MC MC OC SH PC IC MC EX OC FA FA IC TG EX SH IC FA IC - OC - US MC EX SH EX IC IC MC OC EX FA MC EX SH EX IC IC PC OC """ HEX_THIRD_MAP_WITH_EMPTY_ROW = """- - SH SH - SH SH SH SH OC OC SH SH OC OC OC SH OC EX EX OC SH EX EX EX OC SH EX MC MC EX OC SH MC HX MC EX OC SH MC - PC EX OC MC IC MC MC EX SH IC IC MC MC OC SH - - - - - FA FA IC TG EX SH IC FA IC - OC - US MC EX SH EX IC IC MC OC EX FA MC EX SH EX IC IC PC OC """ # This is a "corners-up" hexagonal map. HEX_FULL_MAP = """- - - - - - - - - 1 1 1 1 1 1 1 1 1 4 - - - - - - - - 1 1 1 1 1 1 1 1 1 1 1 - - - - - - - 1 8 1 1 1 1 1 1 1 1 1 1 - - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 7 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 6 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 """ # This is a "flats-up" hexagonal map. HEX_FULL_MAP_FLAT = """- - - - ORS ORS ORS - - - ORS ORS ORS ORS - - - ORS IRS IRS IRS ORS - - ORS IRS IRS IRS IRS ORS - - ORS IRS RR89 RR89 RR89 IRS ORS - ORS IRS RR89 RR89 RR89 RR89 IRS ORS - ORS IRS RR89 RR89 RR7 RR89 RR89 IRS ORS - IRS RR89 RR89 RR7 RR7 RR89 RR89 IRS - ORS RR89 RR89 RR7 OC RR7 RR89 RR89 ORS ORS IRS RR89 RR7 OC OC RR7 RR89 IRS ORS - IRS RR89 RR7 OC OC FS RR7 RR89 IRS ORS RR89 RR7 OC OC OC OC RR7 RR89 ORS ORS IRS RR7 OC OC IC OC OC RR7 IRS ORS IRS RR89 OC SC ICS IC SC OC RR89 IRS ORS RR89 RR7 OC IC IC IC OC RR7 RR89 ORS IRS RR89 OC IC IC IC IC OC RR89 IRS ORS RR89 RR7 SC PC ICS PC SC RR7 RR89 ORS IRS RR89 OC IC IC IC IC OC RR89 IRS ORS RR89 RR7 OC IC IC IC OC RR7 RR89 ORS IRS RR89 VOTA ICS IC IRT ICS OC RR89 IRS ORS RR89 RR7 OC IC IC IC OC RR7 RR89 ORS IRS RR89 OC IC IC IC IC OC RR89 IRS ORS RR89 FS OC ICS PC ICS OC RR7 RR89 ORS IRS RR89 OC OC IC IC OC OC RR89 IRS ORS IRS RR7 OC OC IC OC OC RR7 IRS ORS ORS RR89 RR7 OC SC SC OC FS RR89 ORS - IRS RR89 RR7 OC OC OC RR7 RR89 IRS ORS IRS RR89 RR7 OC OC RR7 RR89 IRS ORS - ORS RR89 RR89 RR7 OC RR7 RR89 RR89 ORS - IRS RR89 RR89 RR7 RR7 RR89 RR89 IRS ORS IRS RR89 RR89 RR7 RR89 RR89 IRS ORS ORS IRS RR89 RR89 RR89 RR89 IRS ORS ORS IRS RR89 RR89 RR89 IRS ORS ORS IRS IRS IRS IRS ORS ORS IRS IRS IRS ORS ORS ORS ORS ORS ORS ORS ORS """ HEX_FULL_MAP_SMALL = """F F F F F F F """ class TestAsciiMaps(unittest.TestCase): """Test ascii maps.""" def test_cartesian(self): """Make sure we can read Cartesian maps.""" asciimap = asciimaps.AsciiMapCartesian() with io.StringIO() as stream: stream.write(CARTESIAN_MAP) stream.seek(0) asciimap.readAscii(stream.read()) self.assertEqual(asciimap[0, 0], "2") self.assertEqual(asciimap[1, 1], "3") self.assertEqual(asciimap[2, 2], "3") self.assertEqual(asciimap[3, 3], "1") with self.assertRaises(KeyError): asciimap[5, 2] outMap = asciimaps.AsciiMapCartesian() outMap.asciiLabelByIndices = asciimap.asciiLabelByIndices outMap.gridContentsToAscii() with io.StringIO() as stream: outMap.writeAscii(stream) stream.seek(0) output = stream.read() self.assertEqual(output, CARTESIAN_MAP) def test_hexThird(self): """Read 1/3 core flats-up maps.""" asciimap = asciimaps.AsciiMapHexThirdFlatsUp() with io.StringIO() as stream: stream.write(HEX_THIRD_MAP) stream.seek(0) asciimap.readAscii(stream.read()) with io.StringIO() as stream: asciimap.writeAscii(stream) stream.seek(0) output = stream.read() self.assertEqual(output, HEX_THIRD_MAP) self.assertEqual(asciimap[7, 0], "2") self.assertEqual(asciimap[8, 0], "3") self.assertEqual(asciimap[8, -4], "2") self.assertEqual(asciimap[0, 8], "3") self.assertEqual(asciimap[0, 0], "1") with self.assertRaises(KeyError): asciimap[10, 0] def test_hexWithHoles(self): """Read 1/3 core flats-up maps with holes.""" asciimap = asciimaps.AsciiMapHexThirdFlatsUp() with io.StringIO() as stream: stream.write(HEX_THIRD_MAP_WITH_HOLES) stream.seek(0) asciimap.readAscii(stream.read()) with io.StringIO() as stream: asciimap.writeAscii(stream) stream.seek(0) output = stream.read() self.assertEqual(output, HEX_THIRD_MAP_WITH_HOLES) self.assertEqual(asciimap[1, 1], asciimaps.PLACEHOLDER) self.assertEqual(asciimap[5, 0], "TG") with self.assertRaises(KeyError): asciimap[10, 0] # also test writing from pure data (vs. reading) gives the exact same map :o with io.StringIO() as stream: asciimap2 = asciimaps.AsciiMapHexThirdFlatsUp() asciimap2.asciiLabelByIndices = asciimap.asciiLabelByIndices asciimap2.gridContentsToAscii() asciimap2.writeAscii(stream) stream.seek(0) output = stream.read() self.assertEqual(output, HEX_THIRD_MAP_WITH_HOLES) def test_hexWithEmptyRow(self): """Read 1/3 core flats-up maps with one entirely empty row.""" asciimap = asciimaps.AsciiMapHexThirdFlatsUp() with io.StringIO() as stream: stream.write(HEX_THIRD_MAP_WITH_EMPTY_ROW) stream.seek(0) asciimap.readAscii(stream.read()) with io.StringIO() as stream: asciimap.writeAscii(stream) stream.seek(0) output = stream.read() self.assertEqual(output, HEX_THIRD_MAP_WITH_EMPTY_ROW) self.assertEqual(asciimap[1, 1], asciimaps.PLACEHOLDER) self.assertEqual(asciimap[6, 0], asciimaps.PLACEHOLDER) self.assertEqual(asciimap[5, 0], "TG") with self.assertRaises(KeyError): asciimap[10, 0] def test_troublesomeHexThird(self): asciimap = asciimaps.AsciiMapHexThirdFlatsUp() with io.StringIO() as stream: stream.write(HEX_THIRD_MAP_2) stream.seek(0) asciimap.readAscii(stream.read()) with io.StringIO() as stream: asciimap.writeAscii(stream) stream.seek(0) output = stream.read() self.assertEqual(output, HEX_THIRD_MAP_2) self.assertEqual(asciimap[5, 0], "TG") def test_hexFullCornersUpSpotCheck(self): """Spot check some hex grid coordinates are what they should be.""" # The corners and a central line of non-zero values. corners_map = """- - - - - - - - - 3 0 0 0 0 0 0 0 0 2 - - - - - - - - 0 0 0 0 0 0 0 0 0 0 0 - - - - - - - 0 0 0 0 0 0 0 0 0 0 0 0 - - - - - - 0 0 0 0 0 0 0 0 0 0 0 0 0 - - - - - 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - - - - 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - - - 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - - 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 0 0 0 0 0 0 0 0 0 1 2 3 4 5 6 7 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 0 0 0 0 0 0 0 0 6 """ # hex map is 19 rows tall: from -9 to 9 asciimap = asciimaps.AsciiMapHexFullTipsUp() asciimap.readAscii(corners_map) # verify the corners self.assertEqual(asciimap[9, -9], "1") self.assertEqual(asciimap[9, 0], "2") self.assertEqual(asciimap[0, 9], "3") self.assertEqual(asciimap[-9, 9], "4") self.assertEqual(asciimap[-9, 0], "5") self.assertEqual(asciimap[0, -9], "6") # verify a line of coordinates self.assertEqual(asciimap[0, 0], "0") self.assertEqual(asciimap[1, -1], "1") self.assertEqual(asciimap[2, -2], "2") self.assertEqual(asciimap[3, -3], "3") self.assertEqual(asciimap[4, -4], "4") self.assertEqual(asciimap[5, -5], "5") self.assertEqual(asciimap[6, -6], "6") self.assertEqual(asciimap[7, -7], "7") def test_hexFullCornersUp(self): """Test sample full hex map (with hex corners up) against known answers.""" # hex map is 19 rows tall: from -9 to 9 asciimap = asciimaps.AsciiMapHexFullTipsUp() asciimap.readAscii(HEX_FULL_MAP) # spot check some values in the map self.assertIn("7 1 1 1 1 1 1 1 1 0", str(asciimap)) self.assertEqual(asciimap[-9, 9], "7") self.assertEqual(asciimap[-8, 0], "6") self.assertEqual(asciimap[-1, 0], "2") self.assertEqual(asciimap[-1, 8], "8") self.assertEqual(asciimap[0, -6], "3") self.assertEqual(asciimap[0, 0], "0") self.assertEqual(asciimap[9, 0], "4") # also test writing from pure data (vs. reading) gives the exact same map asciimap2 = asciimaps.AsciiMapHexFullTipsUp() for ij, spec in asciimap.items(): asciimap2.asciiLabelByIndices[ij] = spec with io.StringIO() as stream: asciimap2.gridContentsToAscii() asciimap2.writeAscii(stream) stream.seek(0) output = stream.read() self.assertEqual(output, HEX_FULL_MAP) self.assertIn("7 1 1 1 1 1 1 1 1 0", str(asciimap)) self.assertIn("7 1 1 1 1 1 1 1 1 0", str(asciimap2)) def test_hexFullFlatsUp(self): """Test sample full hex map (with hex flats up) against known answers.""" # hex map is 21 rows tall: from -10 to 10 asciimap = asciimaps.AsciiMapHexFullFlatsUp() asciimap.readAscii(HEX_FULL_MAP_FLAT) # spot check some values in the map self.assertIn("VOTA ICS IC IRT ICS OC", str(asciimap)) self.assertEqual(asciimap[-3, 10], "ORS") self.assertEqual(asciimap[0, -9], "ORS") self.assertEqual(asciimap[0, 0], "IC") self.assertEqual(asciimap[0, 9], "ORS") self.assertEqual(asciimap[4, -6], "RR7") self.assertEqual(asciimap[6, 0], "RR7") self.assertEqual(asciimap[7, -1], "RR89") # also test writing from pure data (vs. reading) gives the exact same map asciimap2 = asciimaps.AsciiMapHexFullFlatsUp() for ij, spec in asciimap.items(): asciimap2.asciiLabelByIndices[ij] = spec with io.StringIO() as stream: asciimap2.gridContentsToAscii() asciimap2.writeAscii(stream) stream.seek(0) output = stream.read() self.assertEqual(output, HEX_FULL_MAP_FLAT) self.assertIn("VOTA ICS IC IRT ICS OC", str(asciimap)) self.assertIn("VOTA ICS IC IRT ICS OC", str(asciimap2)) def test_hexFullFlat(self): """Test sample full hex map against known answers.""" # hex map is 19 rows tall, so it should go from -9 to 9 asciimap = asciimaps.AsciiMapHexFullFlatsUp() with io.StringIO() as stream: stream.write(HEX_FULL_MAP_FLAT) stream.seek(0) asciimap.readAscii(stream.read()) with io.StringIO() as stream: asciimap.writeAscii(stream) stream.seek(0) output = stream.read() self.assertEqual(output, HEX_FULL_MAP_FLAT) self.assertEqual(asciimap[0, 0], "IC") self.assertEqual(asciimap[-5, 2], "VOTA") self.assertEqual(asciimap[2, 3], "FS") # also test writing from pure data (vs. reading) gives the exact same map with io.StringIO() as stream: asciimap2 = asciimaps.AsciiMapHexFullFlatsUp() asciimap2.asciiLabelByIndices = asciimap.asciiLabelByIndices asciimap2.gridContentsToAscii() asciimap2.writeAscii(stream) stream.seek(0) output = stream.read() self.assertEqual(output, HEX_FULL_MAP_FLAT) def test_hexSmallFlat(self): asciimap = asciimaps.AsciiMapHexFullFlatsUp() with io.StringIO() as stream: stream.write(HEX_FULL_MAP_SMALL) stream.seek(0) asciimap.readAscii(stream.read()) with io.StringIO() as stream: asciimap.writeAscii(stream) stream.seek(0) output = stream.read() self.assertEqual(output, HEX_FULL_MAP_SMALL) def test_flatHexBases(self): """For the full core with 2 lines chopped, get the first 3 bases.""" asciimap = asciimaps.AsciiMapHexFullFlatsUp() with io.StringIO() as stream: stream.write(HEX_FULL_MAP_FLAT) stream.seek(0) asciimap.readAscii(stream.read()) bases = [] for li in range(3): bases.append(asciimap._getIJBaseByAsciiLine(li)) self.assertEqual(bases, [(-2, -8), (-3, -7), (-4, -6)]) # chopped ================================================ FILE: armi/utils/tests/test_codeTiming.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for code timing.""" import time import unittest from armi.utils import codeTiming class CodeTimingTest(unittest.TestCase): def setUp(self): codeTiming._Timer._frozen = False codeTiming.MasterTimer._instance = None def tearDown(self): codeTiming._Timer._frozen = False codeTiming.MasterTimer._instance = None def test_methodDefinitions(self): """Test that the timer decorators work and don't interupt the code.""" @codeTiming.timed def someMethod(boop): time.sleep(0.01) return boop @codeTiming.timed("I have this name") def someOtherMethod(boop): time.sleep(0.01) return boop # verify the decorator allows the code to run x = someMethod("dingdong") y = someOtherMethod("bingbong") self.assertEqual(x, "dingdong") self.assertEqual(y, "bingbong") # verify the decorators work table = codeTiming.MasterTimer.report(inclusionCutoff=0.01, totalTime=True) self.assertIn(" AVERAGE ", table) self.assertIn(" CUMULATIVE ", table) self.assertIn(" NUM ITERS", table) self.assertIn("TIMER REPORTS ", table) self.assertIn("TOTAL TIME ", table) self.assertIn("someMethod", table) self.assertIn("I have this name", table) def test_countStartsStops(self): """Test the start and stop counting logic.""" # test the start() and stop() methods, and their side effects master = codeTiming.MasterTimer.getMasterTimer() timer = master.startTimer("bananananana") t0 = timer.stop() self.assertEqual(timer.overStart, 0) # run start a few times in a row, to trip the overstart for i in range(5): time.sleep(0.01) t1 = timer.start() self.assertGreater(t1, t0) t0 = t1 self.assertEqual(timer.overStart, i) # run stop a few times in a row, which is allowed for race conditions for i in range(5): time.sleep(0.01) t2 = timer.stop() self.assertGreater(t2, t1) t1 = t2 self.assertEqual(timer.overStart, 3 - i if 3 - i > 0 else 0) # start will always work from a stopped state time.sleep(0.01) t6 = timer.start() self.assertGreater(t6, t2) self.assertEqual(timer.overStart, 0) # start a second timer to show two can run at once time.sleep(0.01) timer2 = master.endTimer("wazzlewazllewazzzle") t7 = timer2.start() self.assertGreater(t7, t6) self.assertEqual(timer2.overStart, 0) # use the timers as context managers with timer2: with timer: pass # There should be one start/stop each, leaving the over start count the same self.assertEqual(timer.overStart, 0) self.assertEqual(timer2.overStart, 0) def test_propertyAccess(self): """Test property access is okay.""" master = codeTiming.MasterTimer.getMasterTimer() timer = master.startTimer("sometimer") t0 = timer.time time.sleep(0.01) self.assertGreaterEqual(t0, 0) ts = timer.times self.assertEqual(len(ts), 1) self.assertEqual(len(ts[0]), 2) self.assertGreaterEqual(ts[0][0], 0) self.assertGreaterEqual(ts[0][1], 0) tName = timer.name self.assertEqual(tName, "sometimer") tActive = timer.isActive self.assertTrue(tActive) def test_master(self): master = codeTiming.MasterTimer.getMasterTimer() _ = master.time master.startAll() actives = master.getActiveTimers() self.assertEqual(list(master.timers.values()), actives) master.stopAll() actives = master.getActiveTimers() self.assertEqual([], actives) with self.assertRaises(RuntimeError): codeTiming.MasterTimer() def test_messyStartsAndStops(self): master = codeTiming.MasterTimer.getMasterTimer() name = "sometimerthatihaventmadeyet" larger_time_start = master.time() time.sleep(0.01) timer = master.getTimer(name) time.sleep(0.01) lesser_time_start = master.time() timer.start() # 1st time pair timer.start() # 2nd time pair timer.start() # 3rd time pair timer.stop() self.assertIn(name, str(timer)) self.assertTrue(timer.isActive) timer.stop() timer.stop() self.assertFalse(timer.isActive) timer.stop() timer.stop() timer.start() # 4th time pair self.assertTrue(timer.isActive) lesser_time_end = master.time() time.sleep(0.01) timer.stop() self.assertIn(name, str(timer)) self.assertEqual(len(timer.times), 4) time.sleep(0.01) larger_time_end = master.time() # even with all the starts and stops the total time needs to be between these two values. self.assertGreater(timer.time, lesser_time_end - lesser_time_start) self.assertLess(timer.time, larger_time_end - larger_time_start) self.assertEqual(timer.numIterations, 3) def test_report(self): master = codeTiming.MasterTimer.getMasterTimer() name1 = "test_report1" timer1 = master.getTimer(name1) timer1.start() time.sleep(0.01) timer1.stop() name2 = "test_report2" timer2 = master.getTimer(name2) timer2.start() time.sleep(0.01) timer2.stop() # basic validation of the reports table = codeTiming.MasterTimer.report(inclusionCutoff=0.01, totalTime=True) self.assertIn(" AVERAGE ", table) self.assertIn(" CUMULATIVE ", table) self.assertIn(" NUM ITERS", table) self.assertIn("TIMER REPORTS ", table) self.assertIn(name1, table) self.assertIn(name2, table) lines = table.strip().split("\n") self.assertEqual(len(lines), 4) self.assertEqual(len(lines[1].strip().split()), 4) self.assertEqual(len(lines[2].strip().split()), 4) ================================================ FILE: armi/utils/tests/test_custom_exceptions.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Basic tests of the custom exceptions.""" import unittest from armi.tests import mockRunLogs from armi.utils.customExceptions import important, info, warn, warn_when_root class CustomExceptionTests(unittest.TestCase): @info def exampleInfoMessage(self): return "output message" def test_info_decorator(self): with mockRunLogs.BufferLog() as mock: self.assertEqual("", mock.getStdout()) for ii in range(1, 3): self.exampleInfoMessage() self.assertEqual("[info] output message\n" * ii, mock.getStdout()) @important def exampleImportantMessage(self): return "important message?" def test_important_decorator(self): with mockRunLogs.BufferLog() as mock: self.assertEqual("", mock.getStdout()) for ii in range(1, 3): self.exampleImportantMessage() self.assertEqual("[impt] important message?\n" * ii, mock.getStdout()) @warn def exampleWarnMessage(self): return "you're not tall enough to ride this elephant".format() def test_warn_decorator(self): with mockRunLogs.BufferLog() as mock: for ii in range(1, 4): self.exampleWarnMessage() self.assertEqual( "[warn] you're not tall enough to ride this elephant\n" * ii, mock.getStdout(), ) @warn_when_root def exampleWarnWhenRootMessage(self): return "warning from root".format() def test_warn_when_root_decorator(self): import armi with mockRunLogs.BufferLog() as mock: for ii in range(1, 4): self.exampleWarnWhenRootMessage() msg = "[warn] warning from root\n" * ii self.assertEqual(msg, mock.getStdout()) armi.MPI_RANK = 1 self.exampleWarnWhenRootMessage() self.assertEqual(msg, mock.getStdout()) armi.MPI_RANK = 0 ================================================ FILE: armi/utils/tests/test_densityTools.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test densityTools.""" import unittest import numpy as np from armi.materials.material import Material from armi.materials.uraniumOxide import UO2 from armi.nucDirectory.nuclideBases import NuclideBases from armi.utils import densityTools class UraniumOxide(Material): """A test material that needs to be stored in a different namespace. This is a duplicate (by name only) of :py:class:`armi.materials.uraniumOxide.UraniumOxide` and is used for testing in :py:meth:`armi.materials.tests.test_materials.MaterialFindingTests.test_namespacing` """ def pseudoDensity(self, Tk=None, Tc=None): return 0.0 def density(self, Tk=None, Tc=None): return 0.0 class TestDensityTools(unittest.TestCase): @classmethod def setUpClass(cls): cls.nuclideBases = NuclideBases() cls.elements = cls.nuclideBases.elements def test_expandElementalMassFracsToNuclides(self): """ Expand mass fraction to nuclides. .. test:: Expand mass fractions to nuclides. :id: T_ARMI_UTIL_EXP_MASS_FRACS :tests: R_ARMI_UTIL_EXP_MASS_FRACS """ element = self.elements.bySymbol["N"] mass = {"N": 1.0} densityTools.expandElementalMassFracsToNuclides(mass, [(element, None)]) self.assertNotIn("N", mass) self.assertIn("N15", mass) self.assertIn("N14", mass) self.assertAlmostEqual(sum(mass.values()), 1.0) self.assertNotIn("N13", mass) # nothing unnatural. def test_expandElementalZeroMassFrac(self): """As above, but try with a zero mass frac elemental.""" elementals = [(self.elements.bySymbol["N"], None), (self.elements.bySymbol["O"], None)] mass = {"N": 0.0, "O": 1.0} densityTools.expandElementalMassFracsToNuclides(mass, elementals) self.assertNotIn("N", mass) self.assertNotIn("O", mass) # Current expectation is for elements with zero mass fraction get expanded and # isotopes with zero mass remain in the dictionary. self.assertIn("N14", mass) self.assertAlmostEqual(sum(mass.values()), 1.0) def test_getChemicals(self): u235 = self.nuclideBases.byName["U235"] u238 = self.nuclideBases.byName["U238"] o16 = self.nuclideBases.byName["O16"] uo2 = UO2() uo2Chemicals = densityTools.getChemicals(uo2.massFrac) for symbol in ["U", "O"]: self.assertIn(symbol, uo2Chemicals.keys()) self.assertAlmostEqual(uo2Chemicals["U"], uo2.massFrac["U235"] + uo2.massFrac["U238"], 6) self.assertAlmostEqual(uo2Chemicals["O"], uo2.massFrac["O"], 6) # ensure getChemicals works if the nuclideBase is the dict key massFrac = {u238: 0.87, u235: 0.12, o16: 0.01} uo2Chemicals = densityTools.getChemicals(massFrac) for symbol in ["U", "O"]: self.assertIn(symbol, uo2Chemicals.keys()) self.assertAlmostEqual(uo2Chemicals["U"], massFrac[u235] + massFrac[u238], 2) self.assertAlmostEqual(uo2Chemicals["O"], massFrac[o16], 2) def test_expandElement(self): """Ensure isotopic subset feature works in expansion.""" elemental = self.elements.bySymbol["O"] massFrac = 1.0 subset = [self.nuclideBases.byName["O16"], self.nuclideBases.byName["O17"]] m1 = densityTools.expandElementalNuclideMassFracs(elemental, massFrac) m2 = densityTools.expandElementalNuclideMassFracs(elemental, massFrac, subset) self.assertIn("O18", m1) self.assertNotIn("O18", m2) self.assertAlmostEqual(1.0, sum(m1.values())) self.assertAlmostEqual(1.0, sum(m2.values())) # expect some small difference due to renormalization self.assertNotAlmostEqual(m1["O17"], m2["O17"]) self.assertAlmostEqual(m1["O17"], m2["O17"], delta=1e-5) def test_applyIsotopicsMix(self): """Ensure isotopc classes get mixed properly.""" uo2 = UO2() massFracO = uo2.massFrac["O"] uo2.class1_wt_frac = 0.2 enrichedMassFracs = {"U235": 0.3, "U234": 0.1, "PU239": 0.6} fertileMassFracs = {"U238": 0.3, "PU240": 0.7} densityTools.applyIsotopicsMix(uo2, enrichedMassFracs, fertileMassFracs) self.assertAlmostEqual(uo2.massFrac["U234"], (1 - massFracO) * 0.2 * 0.1) # HM blended self.assertAlmostEqual(uo2.massFrac["U238"], (1 - massFracO) * 0.8 * 0.3) # HM blended self.assertAlmostEqual(uo2.massFrac["O"], massFracO) # non-HM stays unchanged def test_getNDensFromMasses(self): """ Number densities from masses. .. test:: Number densities are retrievable from masses. :id: T_ARMI_UTIL_MASS2N_DENS :tests: R_ARMI_UTIL_MASS2N_DENS """ nucs, nDens = densityTools.getNDensFromMasses(1, {"O": 1, "H": 2}) O = np.where(nucs == "O".encode())[0] H = np.where(nucs == "H".encode())[0] self.assertAlmostEqual(nDens[O][0], 0.03764, 5) self.assertAlmostEqual(nDens[H][0], 1.19490, 5) def test_getMassFractions(self): """Number densities to mass fraction.""" numDens = {"O17": 0.1512, "PU239": 1.5223, "U234": 0.135} massFracs = densityTools.getMassFractions(numDens) self.assertAlmostEqual(massFracs["O17"], 0.006456746320668389) self.assertAlmostEqual(massFracs["PU239"], 0.9141724414849527) self.assertAlmostEqual(massFracs["U234"], 0.07937081219437897) def test_calculateNumberDensity(self): """Mass fraction to number density.""" nDens = densityTools.calculateNumberDensity("U235", 1, 1) self.assertAlmostEqual(nDens, 0.0025621344549254283) nDens = densityTools.calculateNumberDensity("PU239", 0.00012, 0.001) self.assertAlmostEqual(nDens, 0.0003023009578309138) nDens = densityTools.calculateNumberDensity("N15", 111, 222) self.assertAlmostEqual(nDens, 0.020073659896941428) def test_getMassInGrams(self): m = densityTools.getMassInGrams("N16", 1.001, None) self.assertEqual(m, 0) m = densityTools.getMassInGrams("O17", 1.001, 0.00123) self.assertAlmostEqual(m, 0.034754813848559635) m = densityTools.getMassInGrams("PU239", 1.001, 2.123) self.assertAlmostEqual(m, 843.5790671316283) def test_normalizeNuclideList(self): """Normalize a nuclide list.""" nList = {"PU239": 23.2342, "U234": 0.001234, "U235": 34.152} norm = densityTools.normalizeNuclideList(nList) self.assertAlmostEqual(norm["PU239"], 0.40486563661306063) self.assertAlmostEqual(norm["U234"], 2.1502965265880334e-05) self.assertAlmostEqual(norm["U235"], 0.5951128604216736) def test_formatMaterialCard(self): """Formatting material information into an MCNP input card. .. test:: Create MCNP material card :id: T_ARMI_UTIL_MCNP_MAT_CARD :tests: R_ARMI_UTIL_MCNP_MAT_CARD """ u235 = self.nuclideBases.byName["U235"] pu239 = self.nuclideBases.byName["PU239"] o16 = self.nuclideBases.byName["O16"] numDens = {o16: 0.7, pu239: 0.1, u235: 0.2} matCard = densityTools.formatMaterialCard( numDens, matNum=1, sigFigs=4, ) refMatCard = """m1 8016 7.0000e-01 92235 2.0000e-01 94239 1.0000e-01 """ self.assertEqual(refMatCard, "".join(matCard)) lfp35 = self.nuclideBases.byName["LFP35"] dump1 = self.nuclideBases.byName["DUMP1"] o16 = self.nuclideBases.byName["O16"] numDens = {o16: 0.7, pu239: 1e-8, u235: 0.2, lfp35: 1e-3, dump1: 1e-4} matCard = densityTools.formatMaterialCard( numDens, matNum=-1, minDens=1e-6, mcnp6Compatible=True, mcnpLibrary="81", ) refMatCard = """m{} 8016 7.00000000e-01 92235 2.00000000e-01 94239 1.00000000e-06 nlib=81c """ self.assertEqual(refMatCard, "".join(matCard)) numDens = {lfp35: 0.5, dump1: 0.5} matCard = densityTools.formatMaterialCard( numDens, mcnp6Compatible=False, mcnpLibrary=None, ) refMatCard = [] self.assertEqual(refMatCard, matCard) ================================================ FILE: armi/utils/tests/test_directoryChangers.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for testing directoryChangers.""" import os import shutil import unittest from pathlib import Path from armi.utils import directoryChangers, directoryChangersMpi class ExpectedException(Exception): pass class TestDirectoryChangers(unittest.TestCase): """Tests for directory changers.""" def setUp(self): self.temp_directory = self._testMethodName + "ThisIsATemporaryDirectory-AAZZ0099" if os.path.exists(self.temp_directory): shutil.rmtree(self.temp_directory) def tearDown(self): if os.path.exists(self.temp_directory): shutil.rmtree(self.temp_directory) def test_mpiAction(self): try: os.mkdir(self.temp_directory) cdma = directoryChangersMpi._ChangeDirectoryMpiAction(self.temp_directory) self.assertTrue(cdma.invoke(None, None, None)) finally: os.chdir("..") os.rmdir(self.temp_directory) def test_mpiActionFailsOnNonexistentPath(self): with self.assertRaises(IOError): cdma = directoryChangersMpi._ChangeDirectoryMpiAction(self.temp_directory) cdma.invoke(None, None, None) def test_exception(self): """Make sure directory changers bring back full folder when an exception is raised.""" try: with directoryChangers.ForcedCreationDirectoryChanger(self.temp_directory): Path("file1.txt").touch() Path("file2.txt").touch() os.mkdir("subdir") raise ExpectedException("Ooops") except ExpectedException: pass retrievedFolder = f"dump-{self.temp_directory}" self.assertTrue(os.path.exists(os.path.join(retrievedFolder, "file1.txt"))) self.assertTrue(os.path.exists(os.path.join(retrievedFolder, "file2.txt"))) shutil.rmtree(retrievedFolder) def test_exception_disabled(self): """Make sure directory changers do not bring back full folder when handling is disabled.""" try: with directoryChangers.ForcedCreationDirectoryChanger(self.temp_directory, dumpOnException=False): Path("file1.txt").touch() Path("file2.txt").touch() raise ExpectedException("Ooops") except ExpectedException: pass self.assertFalse(os.path.exists(os.path.join(f"dump-{self.temp_directory}", "file1.txt"))) def test_change_to_nonexisting_fails(self): """Fail if destination doesn't exist.""" with self.assertRaises(OSError): with directoryChangers.DirectoryChanger(self.temp_directory): pass def test_change_to_nonexisting_works_forced(self): """Succeed with forced creation even when destination doesn't exist.""" with directoryChangers.ForcedCreationDirectoryChanger(self.temp_directory): pass def test_temporary_cleans(self): """Make sure Temporary cleaner cleans up temporary files.""" with directoryChangers.TemporaryDirectoryChanger() as dc: Path("file1.txt").touch() Path("file2.txt").touch() tempName = dc.destination self.assertFalse(os.path.exists(tempName)) def test_file_retrieval(self): """ Make sure requested files and/or globs get copied back. * Checks basic copy feature * Checks rename feature * Checks glob expansion * Checks copy to output path """ def f(name): """Utility to avoid test clashes during cleanups.""" return self._testMethodName + name with directoryChangers.TemporaryDirectoryChanger(filesToRetrieve=[(f("file1.txt"), f("newfile1.txt"))]): Path(f("file1.txt")).touch() Path(f("file2.txt")).touch() self.assertTrue(os.path.exists(f("newfile1.txt"))) os.remove(f("newfile1.txt")) with directoryChangers.TemporaryDirectoryChanger( filesToRetrieve=[f("file*.txt")], outputPath="temp", ) as _: Path(f("file1.txt")).touch() Path(f("file2.txt")).touch() self.assertTrue(os.path.exists(f("file1.txt"))) self.assertTrue(os.path.exists(f("file2.txt"))) os.remove(f("file1.txt")) os.remove(f("file2.txt")) self.assertTrue(os.path.exists(os.path.join("temp", f("file1.txt")))) self.assertTrue(os.path.exists(os.path.join("temp", f("file2.txt")))) shutil.rmtree("temp") def test_file_retrieval_missing_file(self): """Tests that the directory changer still returns a subset of files even if all do not exist.""" def f(name): """Utility to avoid test clashes during cleanups.""" return self._testMethodName + name with directoryChangers.TemporaryDirectoryChanger(filesToRetrieve=[f("file1.txt"), f("file2.txt")]): Path(f("file1.txt")).touch() self.assertTrue(os.path.exists(f("file1.txt"))) self.assertFalse(os.path.exists(f("file2.txt"))) os.remove(f("file1.txt")) class TestDirectoryChangersEnvEdits(unittest.TestCase): """Tests that will use monkeypatch to alter an environment variable.""" def setUp(self): # We cannot import pytest at the top of the file right now. The ARMI unit tests are currently imported at # runtime, and until that is changed, we don't want pytest to be a runtime dependency. For now, hide the import # down here. Once the testing module is complete and ARMI's unit tests aren't all imported, the pytest import # can move up to where it belongs. import pytest self.monkeypatch = pytest.MonkeyPatch() def tearDown(self): self.monkeypatch.undo() def test_tempDirChangerNonDefault(self): """Make sure TemporaryDirectoryChanger uses an alternative root when user edits the appropriate environment variable. """ # Alter the root path to be in this directory altRoot = Path(__file__).parent / "altRoot" self.monkeypatch.setenv("ARMI_TEMP_ROOT_PATH", str(altRoot)) with directoryChangers.TemporaryDirectoryChanger() as td: self.assertEqual(Path(td.destination).parent, altRoot) # This test creates a path that isn't auto deleted with TempDirChanger, which deletes the temp dir, not the root if os.path.exists(altRoot): shutil.rmtree(altRoot) ================================================ FILE: armi/utils/tests/test_directoryChangersMpi.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test the MpiDirectoryChanger. These tests will be generally ignored by pytest if you are trying to run them in an environment without MPI installed. To run these tests from the command line, install MPI and mpi4py, and do: mpiexec -n 2 python -m pytest test_parallel.py or mpiexec.exe -n 2 python -m pytest test_parallel.py """ import os import shutil import unittest from armi import context, mpiActions from armi.utils.directoryChangersMpi import MpiDirectoryChanger # determine if this is a parallel run, and MPI is installed MPI_EXE = None if shutil.which("mpiexec.exe") is not None: MPI_EXE = "mpiexec.exe" elif shutil.which("mpiexec") is not None: MPI_EXE = "mpiexec" class RevealYourDirectory(mpiActions.MpiAction): def invokeHook(self): # make a dir with name corresponding to the rank, that way we can confirm # that all ranks actually executed this code os.mkdir(str(context.MPI_RANK)) return True class TestMPI(unittest.TestCase): def setUp(self): self.targetDir = "mpiDir" if context.MPI_RANK == 0: os.mkdir(self.targetDir) def tearDown(self): context.MPI_COMM.barrier() if context.MPI_RANK == 0: shutil.rmtree(self.targetDir) @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, "Parallel test only") def test_MpiDirectoryChanger(self): # make sure all workers start outside the targetDir self.assertNotIn(self.targetDir, os.getcwd()) # put the workers in a loop, waiting for command from the main process if context.MPI_RANK != 0: while True: cmd = context.MPI_COMM.bcast(None, root=0) print(cmd) if cmd == "quit": break cmd.invoke(None, None, None) # from main, send commands to the workers to move into the targetDir # and then create folders within there if context.MPI_RANK == 0: with MpiDirectoryChanger(self.targetDir): RevealYourDirectory.invokeAsMaster(None, None, None) # make the workers exit the waiting loop context.MPI_COMM.bcast("quit", root=0) context.MPI_COMM.barrier() if context.MPI_RANK == 0: # from main, confirm that subdirectories were created by all workers for i in range(context.MPI_SIZE): self.assertTrue(os.path.isdir(os.path.join(os.getcwd(), self.targetDir, str(i)))) # make sure all workers have moved back out from the targetDir self.assertNotIn(self.targetDir, os.getcwd()) context.MPI_COMM.barrier() ================================================ FILE: armi/utils/tests/test_flags.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing flags.py.""" import unittest from armi.reactor.composites import FlagSerializer from armi.utils.flags import Flag, auto class ExampleFlag(Flag): FOO = auto() BAR = auto() BAZ = auto() class TestFlag(unittest.TestCase): """Tests for the utility Flag class and cohorts.""" def test_auto(self): """ Make sure that auto() works right, and that mixing it with explicit values doesn't lead to collision. """ class F(Flag): foo = auto() bar = 1 baz = auto() f = F(F.bar) self.assertEqual(int(f), 1) # check that baz got a higher number than foo. Not a guaranteed behavior/bit of # an implementation detail, but nice to know we understand what's happening # under the hood. self.assertTrue(int(F.baz) > int(F.foo)) def test_extend(self): """Ensure the set of flags can be programmatically extended.""" class F(Flag): foo = auto() bar = 1 baz = auto() self.assertEqual(F.width(), 1) F.extend({"A": auto(), "B": 8, "C": auto(), "D": auto(), "E": auto()}) self.assertEqual(int(F.B), 8) self.assertEqual(F.width(), 1) F.extend({"LAST": auto()}) self.assertEqual(F.width(), 2) f = F.A | F.foo | F.C array = f.to_bytes() self.assertEqual(len(array), 2) f2 = F.from_bytes(array) self.assertEqual(f, f2) def test_collision_extension(self): """Ensure the set of flags cannot be programmatically extended if duplicate created. .. test:: Set of flags are extensible without loss of uniqueness. :id: T_ARMI_FLAG_EXTEND0 :tests: R_ARMI_FLAG_EXTEND """ class F(Flag): foo = auto() bar = 1 baz = auto() F.extend({"a": auto()}) F.extend({"b": 1}) def test_collision_creation(self): """Make sure that we catch value collisions upon creation. .. test:: No two flags have equivalence. :id: T_ARMI_FLAG_DEFINE :tests: R_ARMI_FLAG_DEFINE """ with self.assertRaises(AssertionError): class F(Flag): foo = 1 bar = 1 class D(Flag): foo = auto() bar = auto() baz = auto() self.assertEqual(D.foo._value, 1) self.assertEqual(D.bar._value, 2) self.assertEqual(D.baz._value, 4) def test_bool(self): f = ExampleFlag() self.assertFalse(f) def test_inclusion(self): f = ExampleFlag.FOO | ExampleFlag.BAZ self.assertIn(ExampleFlag.FOO, f) self.assertIn(ExampleFlag.BAZ, f) self.assertNotIn(ExampleFlag.BAR, f) def test_bitwise(self): """Make sure that bitwise operators work right.""" f = ExampleFlag.FOO | ExampleFlag.BAR self.assertTrue(f & ExampleFlag.FOO) self.assertTrue(f & ExampleFlag.BAR) self.assertFalse(f & ExampleFlag.BAZ) # mask off BAR f &= ExampleFlag.FOO self.assertEqual(f, ExampleFlag.FOO) # OR in BAZ f |= ExampleFlag.BAZ self.assertIn(ExampleFlag.BAZ, f) # XOR them. Should turn off FOO, since they both have it f2 = ExampleFlag.FOO | ExampleFlag.BAR self.assertEqual(f2 ^ f, ExampleFlag.BAR | ExampleFlag.BAZ) def test_iteration(self): """We want to be able to iterate over set flags.""" f = ExampleFlag.FOO | ExampleFlag.BAZ flagsOn = [val for val in f] self.assertIn(ExampleFlag.FOO, flagsOn) self.assertIn(ExampleFlag.BAZ, flagsOn) self.assertNotIn(ExampleFlag.BAR, flagsOn) def test_hashable(self): f1 = ExampleFlag.FOO f2 = ExampleFlag.BAR self.assertNotEqual(hash(f1), hash(f2)) def test_getitem(self): self.assertEqual(ExampleFlag["FOO"], ExampleFlag.FOO) def test_duplicateFlags(self): """Show that duplicate flags can be added and silently ignored.""" class F(Flag): @classmethod def len(cls): return len(cls._nameToValue) F.extend({"FLAG0": auto()}) for i in range(1, 12): F.extend({f"FLAG{i}": auto()}) num = F.len() F.extend({f"FLAG{i - 1}": auto()}) self.assertEqual(F.len(), num) # While the next two lines do not assert anything, these lines used to raise an error. # So these lines remain as proof against that error in the future. ff = getattr(F, f"FLAG{i}") FlagSerializer._packImpl( [ ff, ], F, ) self.assertEqual(F.len(), num) def test_soManyFlags(self): """Show that many flags can be added without issue.""" class F(Flag): @classmethod def len(cls): return len(cls._nameToValue) for i in range(1, 100): num = F.len() flagName = f"FLAG{i}" F.extend({flagName: auto()}) self.assertEqual(F.len(), num + 1) flag = getattr(F, flagName) flag.to_bytes() self.assertEqual(F.len(), num + 1) ================================================ FILE: armi/utils/tests/test_hexagon.py ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test hexagon tools.""" import math import random import unittest from armi.utils import hexagon class TestHexagon(unittest.TestCase): N_FUZZY_DRAWS: int = 10 """Number of random draws to use in some fuzzy testing""" def test_hexagon_area(self): """Area of a hexagon.""" # Calculate area given a pitch self.assertEqual(hexagon.area(1), math.sqrt(3.0) / 2) self.assertEqual(hexagon.area(2), 4 * math.sqrt(3.0) / 2) def test_numPositionsInRing(self): """Calculate number of positions in a ring of hexagons.""" self.assertEqual(hexagon.numPositionsInRing(1), 1) self.assertEqual(hexagon.numPositionsInRing(2), 6) self.assertEqual(hexagon.numPositionsInRing(3), 12) self.assertEqual(hexagon.numPositionsInRing(4), 18) def test_rotatedCellCenter(self): """Test that location of the center cell is invariant through rotation.""" for rot in range(6): self.assertTrue(hexagon.getIndexOfRotatedCell(1, rot), 1) def test_rotatedFirstRing(self): """Simple test for the corners of the first ring are maintained during rotation.""" # A 60 degree rotation is just incrementing the cell index by one here locations = list(range(2, 8)) for locIndex, initialPosition in enumerate(locations): for rot in range(6): actual = hexagon.getIndexOfRotatedCell(initialPosition, rot) newIndex = (locIndex + rot) % 6 expectedPosition = locations[newIndex] self.assertEqual(actual, expectedPosition, msg=f"{initialPosition=}, {rot=}") def test_rotateFuzzy(self): """Select some position number and rotation and check for consistency.""" N_DRAWS = 100 for _ in range(N_DRAWS): self._rotateFuzzyInner() def _rotateFuzzyInner(self): rot = random.randint(1, 5) initialCell = random.randint(2, 300) testInfoMsg = f"{rot=}, {initialCell=}" newCell = hexagon.getIndexOfRotatedCell(initialCell, rot) self.assertNotEqual(newCell, initialCell, msg=testInfoMsg) # should be in the same ring initialRing = hexagon.numRingsToHoldNumCells(initialCell) newRing = hexagon.numRingsToHoldNumCells(newCell) self.assertEqual(newRing, initialRing, msg=testInfoMsg) # If we un-rotate, we should get our initial cell reverseRot = (6 - rot) % 6 reverseCell = hexagon.getIndexOfRotatedCell(newCell, reverseRot) self.assertEqual(reverseCell, initialCell, msg=testInfoMsg) def test_positionsUpToRing(self): """Test totalPositionsUpToRing is consistent with numPositionsInRing.""" self.assertEqual(hexagon.totalPositionsUpToRing(1), 1) self.assertEqual(hexagon.totalPositionsUpToRing(2), 7) self.assertEqual(hexagon.totalPositionsUpToRing(3), 19) totalPositions = 19 for ring in range(4, 30): posInThisRing = hexagon.numPositionsInRing(ring) totalPositions += posInThisRing self.assertEqual(hexagon.totalPositionsUpToRing(ring), totalPositions, msg=f"{ring=}") def test_rotatedCellIndexErrors(self): """Test errors for non-positive initial cell indices during rotation.""" self._testNonPosRotIndex(0) for _ in range(self.N_FUZZY_DRAWS): index = random.randint(-100, -1) self._testNonPosRotIndex(index) def _testNonPosRotIndex(self, index: int): with self.assertRaisesRegex(ValueError, ".*must be positive", msg=f"{index=}"): hexagon.getIndexOfRotatedCell(index, 0) def test_rotatedCellOrientationErrors(self): """Test errors for invalid orientation numbers during rotation.""" for _ in range(self.N_FUZZY_DRAWS): upper = random.randint(6, 100) self._testRotOrientation(upper) lower = random.randint(-100, -1) self._testRotOrientation(lower) def _testRotOrientation(self, orientation: int): with self.assertRaisesRegex(ValueError, "Orientation number", msg=f"{orientation=}"): hexagon.getIndexOfRotatedCell(initialCellIndex=1, orientationNumber=orientation) def test_indexWithNoRotation(self): """Test that the initial cell location is returned if not rotated.""" for _ in range(self.N_FUZZY_DRAWS): ix = random.randint(1, 300) postRotation = hexagon.getIndexOfRotatedCell(ix, orientationNumber=0) self.assertEqual(postRotation, ix) ================================================ FILE: armi/utils/tests/test_iterables.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittests for iterables.py.""" import unittest import numpy as np from armi.utils import iterables # CONSTANTS _TEST_DATA = {"turtle": [float(vv) for vv in range(-2000, 2000)]} class TestIterables(unittest.TestCase): """Testing our custom Iterables.""" def test_flatten(self): self.assertEqual( iterables.flatten([[1, 2, 3], [4, 5, 6], [7, 8], [9, 10]]), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], ) self.assertEqual( iterables.flatten([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]]), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], ) def test_chunk(self): self.assertEqual( list(iterables.chunk([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 4)), [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]], ) def test_split(self): data = list(range(50)) chu = iterables.split(data, 10) self.assertEqual(len(chu), 10) unchu = iterables.flatten(chu) self.assertEqual(data, unchu) chu = iterables.split(data, 1) self.assertEqual(len(chu), 1) unchu = iterables.flatten(chu) self.assertEqual(data, unchu) chu = iterables.split(data, 60, padWith=[None]) self.assertEqual(len(chu), 60) unchu = iterables.flatten(chu) self.assertEqual(len(unchu), 60) chu = iterables.split(data, 60, padWith=[None]) self.assertEqual(len(chu), 60) data = [0] chu = iterables.split(data, 1) unchu = iterables.flatten(chu) self.assertEqual(unchu, data) def test_packingAndUnpackingBinaryStrings(self): packed = iterables.packBinaryStrings(_TEST_DATA) unpacked = iterables.unpackBinaryStrings(packed["turtle"][0]) self.assertEqual(_TEST_DATA["turtle"], unpacked) def test_packingAndUnpackingHexStrings(self): packed = iterables.packHexStrings(_TEST_DATA) unpacked = iterables.unpackHexStrings(packed["turtle"][0]) self.assertEqual(_TEST_DATA["turtle"], unpacked) def test_sequenceInit(self): # init an empty sequence s = iterables.Sequence() for item in s: self.assertTrue(False, "This shouldn't happen.") # init a sequence with another sequence example = [1, 2, 3] s2 = iterables.Sequence(example) s3 = iterables.Sequence(s2) i = 0 for item in s3: i += 1 self.assertEqual(i, len(example)) def test_sequence(self): # sequentially using methods in the usual way s = iterables.Sequence(range(1000000)) s.drop(lambda i: i % 2 == 0) s.select(lambda i: i < 20) s.transform(lambda i: i * 10) result = tuple(s) self.assertEqual(result, (10, 30, 50, 70, 90, 110, 130, 150, 170, 190)) # stringing together the methods in a more modern Python way s = iterables.Sequence(range(1000000)) result = tuple(s.drop(lambda i: i % 2 == 0).select(lambda i: i < 20).transform(lambda i: i * 10)) self.assertEqual(result, (10, 30, 50, 70, 90, 110, 130, 150, 170, 190)) # call tuple() after a couple methods s = iterables.Sequence(range(1000000)) s.drop(lambda i: i % 2 == 0) s.select(lambda i: i < 20) result = tuple(s) self.assertEqual(result, (1, 3, 5, 7, 9, 11, 13, 15, 17, 19)) # you can't just call tuple() a second time, there is no data left s.transform(lambda i: i * 10) result = tuple(s) self.assertEqual(result, ()) def test_copySequence(self): s = iterables.Sequence(range(4, 8)) sCopy = s.copy() vals = [item for item in sCopy] self.assertEqual(vals[0], 4) self.assertEqual(vals[-1], 7) self.assertEqual(len(vals), 4) def test_extendSequence(self): s = iterables.Sequence(range(3)) ex = range(3, 8) s.extend(ex) vals = [item for item in s] self.assertEqual(vals[0], 0) self.assertEqual(vals[-1], 7) self.assertEqual(len(vals), 8) def test_appendSequence(self): s = iterables.Sequence(range(3)) s.extend([999]) vals = [item for item in s] self.assertEqual(vals[0], 0) self.assertEqual(vals[-1], 999) self.assertEqual(len(vals), 4) def test_addingSequences(self): s1 = iterables.Sequence(range(3)) s2 = iterables.Sequence(range(3, 6)) s3 = s1 + s2 vals = [item for item in s3] self.assertEqual(vals[0], 0) self.assertEqual(vals[-1], 5) self.assertEqual(len(vals), 6) s1 += s2 vals = [item for item in s1] self.assertEqual(vals[0], 0) self.assertEqual(vals[-1], 5) self.assertEqual(len(vals), 6) def test_listPivot(self): data = list(range(10)) loc = 4 actual = iterables.pivot(data, loc) self.assertEqual(actual, data[loc:] + data[:loc]) def test_arrayPivot(self): data = np.arange(10) loc = -7 actual = iterables.pivot(data, loc) expected = np.array(iterables.pivot(data.tolist(), loc)) self.assertTrue((actual == expected).all(), msg=f"{actual=} != {expected=}") # Catch a silent failure case where pivot doesn't change the iterable self.assertTrue( (actual != data).all(), msg=f"Pre-pivot {data=} should not equal post-pivot {actual=}", ) ================================================ FILE: armi/utils/tests/test_mathematics.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing mathematics utilities.""" import unittest from math import sqrt import numpy as np from armi.utils.mathematics import ( average1DWithinTolerance, convertToSlice, efmt, expandRepeatedFloats, findClosest, findNearestValue, fixThreeDigitExp, getFloat, getStepsFromValues, isMonotonic, linearInterpolation, minimizeScalarFunc, newtonsMethod, parabolaFromPoints, parabolicInterpolation, relErr, resampleStepwise, rotateXY, ) class TestMath(unittest.TestCase): """Tests for various math utilities.""" def test_average1DWithinTolerance(self): vals = np.array([np.array([1, 2, 3]), np.array([4, 5, 6]), np.array([7, 8, 9])]) result = average1DWithinTolerance(vals, 0.1) self.assertEqual(len(result), 3) self.assertEqual(result[0], 4.0) self.assertEqual(result[1], 5.0) self.assertEqual(result[2], 6.0) def test_average1DWithinToleranceInvalid(self): vals = np.array([np.array([1, -2, 3]), np.array([4, -5, 6]), np.array([7, -8, 9])]) with self.assertRaises(ValueError): average1DWithinTolerance(vals, 0.1) def test_convertToSlice(self): slice1 = convertToSlice(2) self.assertEqual(slice1, slice(2, 3, None)) slice1 = convertToSlice(2.0, increment=-1) self.assertEqual(slice1, slice(1, 2, None)) slice1 = convertToSlice(None) self.assertEqual(slice1, slice(None, None, None)) slice1 = convertToSlice([1, 2, 3]) self.assertTrue(np.allclose(slice1, np.array([1, 2, 3]))) slice1 = convertToSlice(slice(2, 3, None)) self.assertEqual(slice1, slice(2, 3, None)) slice1 = convertToSlice(np.array([1, 2, 3])) self.assertTrue(np.allclose(slice1, np.array([1, 2, 3]))) with self.assertRaises(Exception): slice1 = convertToSlice("slice") def test_efmt(self): self.assertAlmostEqual(efmt("1.0e+001"), "1.0E+01") self.assertAlmostEqual(efmt("1.0E+01"), "1.0E+01") def test_expandRepeatedFloats(self): repeatedFloats = ["150", "2R", 200.0, 175, "4r", 180.0, "0R"] expectedFloats = [150] * 3 + [200] + [175] * 5 + [180] self.assertEqual(expandRepeatedFloats(repeatedFloats), expectedFloats) def test_findClosest(self): l1 = range(10) self.assertEqual(findClosest(l1, 5.6), 6) self.assertEqual(findClosest(l1, 10.1), 9) self.assertEqual(findClosest(l1, -200), 0) # with index self.assertEqual(findClosest(l1, 5.6, indx=True), (6, 6)) def test_findNearestValue(self): searchList = [0.1, 0.2, 0.25, 0.35, 0.4] searchValue = 0.225 self.assertEqual(findNearestValue(searchList, searchValue), 0.2) searchValue = 0.226 self.assertEqual(findNearestValue(searchList, searchValue), 0.25) searchValue = 0.0 self.assertEqual(findNearestValue(searchList, searchValue), 0.1) searchValue = 10 self.assertEqual(findNearestValue(searchList, searchValue), 0.4) def test_fixThreeDigitExp(self): fixed = fixThreeDigitExp("-9.03231714805651E+101") self.assertEqual(-9.03231714805651e101, fixed) fixed = fixThreeDigitExp("9.03231714805651-101") self.assertEqual(9.03231714805651e-101, fixed) fixed = fixThreeDigitExp("-2.4594981981654+101") self.assertEqual(-2.4594981981654e101, fixed) fixed = fixThreeDigitExp("-2.4594981981654-101") self.assertEqual(-2.4594981981654e-101, fixed) def test_getFloat(self): self.assertIsNone(getFloat("word")) for flt in [-9.123 + f * 0.734 for f in range(25)]: self.assertEqual(getFloat(flt), flt) self.assertEqual(getFloat(str(flt)), flt) def test_getStepsFromValues(self): steps = getStepsFromValues([1.0, 3.0, 6.0, 10.0], prevValue=0.0) self.assertListEqual(steps, [1.0, 2.0, 3.0, 4.0]) def test_isMonotonic(self): self.assertEqual(True, isMonotonic([1, 2, 2, 3], "<=")) self.assertEqual(False, isMonotonic([1, 2, 2, 1], "<=")) self.assertEqual(True, isMonotonic([1, 2, 3], "<")) self.assertEqual(False, isMonotonic([1, 2, 2], "<")) self.assertEqual(True, isMonotonic([3, 2, 1, 1], ">=")) self.assertEqual(False, isMonotonic([3, 2, 1, 2], ">=")) self.assertEqual(True, isMonotonic([3, 2, 1], ">")) self.assertEqual(False, isMonotonic([3, 2, 2], ">")) with self.assertRaises(ValueError): isMonotonic([1, 2, 3, 2], "invalidRelation") def test_linearInterpolation(self): y = linearInterpolation(1.0, 2.0, 3.0, 4.0, targetX=20.0) x = linearInterpolation(1.0, 2.0, 3.0, 4.0, targetY=y) x2 = linearInterpolation(1.0, 1.0, 2.0, 2.0, targetY=50) self.assertEqual(x, 20.0) self.assertEqual(x2, 50.0) with self.assertRaises(ZeroDivisionError): _ = linearInterpolation(1.0, 1.0, 1.0, 2.0) def test_minimizeScalarFunc(self): f = lambda x: (x + 1) ** 2 minimum = minimizeScalarFunc(f, -3.0, 10.0, maxIterations=10) self.assertAlmostEqual(minimum, -1.0, places=3) minimum = minimizeScalarFunc(f, -3.0, 10.0, maxIterations=10, positiveGuesses=True) self.assertAlmostEqual(minimum, 0.0, places=3) def test_newtonsMethod(self): f = lambda x: (x + 2) * (x - 1) root = newtonsMethod(f, 0.0, 5.0, maxIterations=10, positiveGuesses=True) self.assertAlmostEqual(root, 1.0, places=3) root = newtonsMethod(f, 0.0, -10.0, maxIterations=10) self.assertAlmostEqual(root, -2.0, places=3) def test_parabola(self): # test the parabola function a, b, c = parabolaFromPoints((0, 1), (1, 2), (-1, 2)) self.assertEqual(a, 1.0) self.assertEqual(b, 0.0) self.assertEqual(c, 1.0) with self.assertRaises(Exception): a, b, c = parabolaFromPoints((0, 1), (0, 1), (-1, 2)) def test_parabolicInterpolation(self): realRoots = parabolicInterpolation(2.0e-6, -5.0e-4, 1.02, 1.0) self.assertAlmostEqual(realRoots[0][0], 200.0) self.assertAlmostEqual(realRoots[0][1], 3.0e-4) self.assertAlmostEqual(realRoots[1][0], 50.0) self.assertAlmostEqual(realRoots[1][1], -3.0e-4) noRoots = parabolicInterpolation(2.0e-6, -4.0e-4, 1.03, 1.0) self.assertAlmostEqual(noRoots[0][0], -100.0) self.assertAlmostEqual(noRoots[0][1], 0.0) # 3. run time error with self.assertRaises(RuntimeError): _ = parabolicInterpolation(2.0e-6, 4.0e-4, 1.02, 1.0) def test_relErr(self): self.assertAlmostEqual(relErr(1.00, 1.01), 0.01) self.assertAlmostEqual(relErr(100.0, 97.0), -0.03) self.assertAlmostEqual(relErr(0.00, 1.00), -1e99) def test_resampleStepwiseAvg0(self): """Test resampleStepwise() averaging when in and out bins match.""" xin = [0, 1, 2, 13.3] yin = [4.76, 9.99, -123.456] xout = [0, 1, 2, 13.3] yout = resampleStepwise(xin, yin, xout) self.assertEqual(len(yout), len(xout) - 1) self.assertAlmostEqual(yout[0], 4.76) self.assertAlmostEqual(yout[1], 9.99) self.assertAlmostEqual(yout[2], -123.456) def test_resampleStepwiseAvg1(self): """Test resampleStepwise() averaging for one arbitrary case.""" xin = [0, 1, 2, 3, 4] yin = [3, 2, 5, 3] xout = [0, 2, 3.5, 4] yout = resampleStepwise(xin, yin, xout) self.assertEqual(len(yout), len(xout) - 1) self.assertEqual(yout[0], 2.5) self.assertAlmostEqual(yout[1], 4.333333333333333) self.assertEqual(yout[2], 3) def test_resampleStepwiseAvg2(self): """Test resampleStepwise() averaging for another arbitrary case.""" xin = [0, 1, 2, 3, 4, 5] yin = [3, 2, 5, 3, 4] xout = [0, 2, 3.5, 5] yout = resampleStepwise(xin, yin, xout) self.assertEqual(len(yout), len(xout) - 1) self.assertEqual(yout[0], 2.5) self.assertAlmostEqual(yout[1], 4.333333333333333) self.assertAlmostEqual(yout[2], 3.6666666666666665) def test_resampleStepwiseAvg3(self): """Test resampleStepwise() averaging for another arbitrary case.""" xin = [0, 1, 2, 3, 4, 6] yin = [3, 2, 5, 3, 4] xout = [0, 2, 3.5, 6] yout = resampleStepwise(xin, yin, xout) self.assertEqual(len(yout), len(xout) - 1) self.assertEqual(yout[0], 2.5) self.assertAlmostEqual(yout[1], 4.333333333333333) self.assertEqual(yout[2], 3.8) def test_resampleStepwiseAvg4(self): """Test resampleStepwise() averaging for matching, but uneven intervals.""" xin = [0, 3, 5, 6.777, 9.123] yin = [3.1, 2.2, 5.3, 3.4] xout = [0, 3, 5, 6.777, 9.123] yout = resampleStepwise(xin, yin, xout) self.assertEqual(len(yout), len(xout) - 1) self.assertEqual(yout[0], 3.1) self.assertEqual(yout[1], 2.2) self.assertEqual(yout[2], 5.3) self.assertEqual(yout[3], 3.4) def test_resampleStepwiseAvg5(self): """Test resampleStepwise() averaging for almost matching intervals.""" xin = [0, 3, 5, 6.777, 9.123] yin = [3.1, 2.2, 5.3, 3.4] xout = [0, 5, 9.123] yout = resampleStepwise(xin, yin, xout) self.assertEqual(len(yout), len(xout) - 1) self.assertEqual(yout[0], 2.74) self.assertAlmostEqual(yout[1], 4.21889400921659) def test_resampleStepwiseAvg6(self): """Test resampleStepwise() averaging when the intervals don't line up.""" xin = [0, 1, 2, 3, 4] yin = [11, 22, 33, 44] xout = [2, 3, 4, 5, 6] yout = resampleStepwise(xin, yin, xout) self.assertEqual(len(yout), len(xout) - 1) self.assertEqual(yout[0], 33) self.assertEqual(yout[1], 44) self.assertEqual(yout[2], 0) self.assertEqual(yout[3], 0) def test_resampleStepwiseAvg7(self): """Test resampleStepwise() averaging when the intervals don't line up.""" xin = [2, 4, 6, 8, 10] yin = [11, 22, 33, 44] xout = [-1, 0, 1, 2, 3, 4] yout = resampleStepwise(xin, yin, xout) self.assertEqual(len(yout), len(xout) - 1) self.assertEqual(yout[0], 0) self.assertEqual(yout[1], 0) self.assertEqual(yout[2], 0) self.assertEqual(yout[3], 11) self.assertEqual(yout[4], 11) def test_resampleStepwiseSum0(self): """Test resampleStepwise() summing when in and out bins match.""" xin = [0, 1, 2, 13.3] yin = [4.76, 9.99, -123.456] xout = [0, 1, 2, 13.3] yout = resampleStepwise(xin, yin, xout, avg=False) self.assertEqual(len(yout), len(xout) - 1) self.assertAlmostEqual(yout[0], 4.76) self.assertAlmostEqual(yout[1], 9.99) self.assertAlmostEqual(yout[2], -123.456) self.assertAlmostEqual(sum(yin), sum(yout)) def test_resampleStepwiseSum1(self): """Test resampleStepwise() summing for one arbitrary case.""" xin = [0, 1, 2, 3, 4] yin = [3, 2, 5, 3] xout = [0, 2, 3.5, 4] yout = resampleStepwise(xin, yin, xout, avg=False) self.assertEqual(len(yout), len(xout) - 1) self.assertEqual(yout[0], 5) self.assertEqual(yout[1], 6.5) self.assertEqual(yout[2], 1.5) self.assertEqual(sum(yin), sum(yout)) def test_resampleStepwiseSum2(self): """Test resampleStepwise() summing for another arbitrary case.""" xin = [0, 1, 2, 3, 4, 5] yin = [3, 2, 5, 3, 4] xout = [0, 2, 3.5, 5] yout = resampleStepwise(xin, yin, xout, avg=False) self.assertEqual(len(yout), len(xout) - 1) self.assertEqual(yout[0], 5) self.assertEqual(yout[1], 6.5) self.assertEqual(yout[2], 5.5) self.assertEqual(sum(yin), sum(yout)) def test_resampleStepwiseSum3(self): """Test resampleStepwise() summing for another arbitrary case.""" xin = [0, 1, 2, 3, 4, 6] yin = [3, 2, 5, 3, 4] xout = [0, 2, 3.5, 6] yout = resampleStepwise(xin, yin, xout, avg=False) self.assertEqual(len(yout), len(xout) - 1) self.assertEqual(yout[0], 5) self.assertEqual(yout[1], 6.5) self.assertEqual(yout[2], 5.5) self.assertEqual(sum(yin), sum(yout)) def test_resampleStepwiseSum4(self): """Test resampleStepwise() summing for matching, but uneven intervals.""" xin = [0, 3, 5, 6.777, 9.123] yin = [3.1, 2.2, 5.3, 3.4] xout = [0, 3, 5, 6.777, 9.123] yout = resampleStepwise(xin, yin, xout, avg=False) self.assertEqual(len(yout), len(xout) - 1) self.assertEqual(yout[0], 3.1) self.assertEqual(yout[1], 2.2) self.assertEqual(yout[2], 5.3) self.assertEqual(yout[3], 3.4) self.assertEqual(sum(yin), sum(yout)) def test_resampleStepwiseSum5(self): """Test resampleStepwise() summing for almost matching intervals.""" xin = [0, 3, 5, 6.777, 9.123] yin = [3.1, 2.2, 5.3, 3.4] xout = [0, 5, 9.123] yout = resampleStepwise(xin, yin, xout, avg=False) self.assertEqual(len(yout), len(xout) - 1) self.assertAlmostEqual(yout[0], 5.3) self.assertAlmostEqual(yout[1], 8.7) self.assertAlmostEqual(sum(yin), sum(yout)) def test_resampleStepwiseSum6(self): """Test resampleStepwise() summing when the intervals don't line up.""" xin = [0, 1, 2, 3, 4] yin = [11, 22, 33, 44] xout = [2, 3, 4, 5, 6] yout = resampleStepwise(xin, yin, xout, avg=False) self.assertEqual(len(yout), len(xout) - 1) self.assertEqual(yout[0], 33) self.assertEqual(yout[1], 44) self.assertEqual(yout[2], 0) self.assertEqual(yout[3], 0) def test_resampleStepwiseSum7(self): """Test resampleStepwise() summing when the intervals don't line up.""" xin = [2, 4, 6, 8, 10] yin = [11, 22, 33, 44] xout = [-1, 0, 1, 2, 3, 4] yout = resampleStepwise(xin, yin, xout, avg=False) self.assertEqual(len(yout), len(xout) - 1) self.assertEqual(yout[0], 0) self.assertEqual(yout[1], 0) self.assertEqual(yout[2], 0) self.assertAlmostEqual(yout[3], 11 / 2) self.assertAlmostEqual(yout[4], 11 / 2) def test_resampleStepwiseAvgAllNones(self): """Test resampleStepwise() averaging when the inputs are all None.""" xin = [0, 1, 2, 13.3] yin = [None, None, None] xout = [0, 1, 2, 13.3] yout = resampleStepwise(xin, yin, xout) self.assertEqual(len(yout), len(xout) - 1) self.assertIsNone(yout[0]) self.assertIsNone(yout[1]) self.assertIsNone(yout[2]) def test_resampleStepwiseAvgOneNone(self): """Test resampleStepwise() averaging when one input is None.""" xin = [0, 1, 2, 13.3] yin = [None, 1, 2] xout = [0, 1, 2, 13.3] yout = resampleStepwise(xin, yin, xout) self.assertEqual(len(yout), len(xout) - 1) self.assertIsNone(yout[0]) self.assertEqual(yout[1], 1) self.assertEqual(yout[2], 2) def test_resampleStepwiseSumAllNones(self): """Test resampleStepwise() summing when the inputs are all None.""" xin = [0, 1, 2, 13.3] yin = [None, None, None] xout = [0, 1, 2, 13.3] yout = resampleStepwise(xin, yin, xout, avg=False) self.assertEqual(len(yout), len(xout) - 1) self.assertIsNone(yout[0]) self.assertIsNone(yout[1]) self.assertIsNone(yout[2]) def test_resampleStepwiseSumOneNone(self): """Test resampleStepwise() summing when one inputs is None.""" xin = [0, 1, 2, 13.3] yin = [None, 1, 2] xout = [0, 1, 2, 13.3] yout = resampleStepwise(xin, yin, xout, avg=False) self.assertEqual(len(yout), len(xout) - 1) self.assertIsNone(yout[0]) self.assertEqual(yout[1], 1) self.assertEqual(yout[2], 2) def test_resampleStepwiseAvgComplicatedNone(self): """Test resampleStepwise() averaging with a None value, when the intervals don't line up.""" xin = [2, 4, 6, 8, 10] yin = [11, None, 33, 44] xout = [-1, 0, 1, 2, 4, 7, 9] yout = resampleStepwise(xin, yin, xout) self.assertEqual(len(yout), len(xout) - 1) self.assertEqual(yout[0], 0) self.assertEqual(yout[1], 0) self.assertEqual(yout[2], 0) self.assertEqual(yout[3], 11) self.assertIsNone(yout[4]) self.assertEqual(yout[5], 38.5) def test_resampleStepwiseAvgNpArray(self): """Test resampleStepwise() averaging when some of the values are arrays.""" xin = [0, 1, 2, 3, 4] yin = [11, np.array([1, 1]), np.array([2, 2]), 44] xout = [2, 4, 5, 6, 7] yout = resampleStepwise(xin, yin, xout, avg=True) self.assertEqual(len(yout), len(xout) - 1) self.assertTrue(isinstance(yout[0], type(yin[1]))) self.assertEqual(yout[0][0], 23.0) self.assertEqual(yout[0][1], 23.0) self.assertEqual(yout[1], 0) self.assertEqual(yout[2], 0) self.assertEqual(yout[3], 0) def test_resampleStepwiseAvgNpArrayAverage(self): """Test resampleStepwise() summing when some of the values are arrays.""" xin = [0, 1, 2, 3, 4] yin = [11, np.array([1, 1]), np.array([2, 2]), 44] xout = [2, 4, 5, 6, 7] yout = resampleStepwise(xin, yin, xout, avg=False) self.assertEqual(len(yout), len(xout) - 1) self.assertTrue(isinstance(yout[0], type(yin[1]))) self.assertEqual(yout[0][0], 46.0) self.assertEqual(yout[0][1], 46.0) self.assertEqual(yout[1], 0) self.assertEqual(yout[2], 0) self.assertEqual(yout[3], 0) def test_rotateXY(self): x = [1.0, -1.0] y = [1.0, 1.0] # test operation on scalar xr, yr = rotateXY(x[0], y[0], 45.0) self.assertAlmostEqual(xr, 0.0) self.assertAlmostEqual(yr, sqrt(2)) xr, yr = rotateXY(x[1], y[1], 45.0) self.assertAlmostEqual(xr, -sqrt(2)) self.assertAlmostEqual(yr, 0.0) # test operation on list xr, yr = rotateXY(x, y, 45.0) self.assertAlmostEqual(xr[0], 0.0) self.assertAlmostEqual(yr[0], sqrt(2)) self.assertAlmostEqual(xr[1], -sqrt(2)) self.assertAlmostEqual(yr[1], 0.0) ================================================ FILE: armi/utils/tests/test_outputCache.py ================================================ # Copyright 2022 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests of the output cache tools.""" import os import time import unittest from armi.utils import directoryChangers, outputCache class TestOutputCache(unittest.TestCase): def _buildOutputCache(self, arbitraryString): """ Helper method, to set up a semi-stupid output cache directory It will have one file and a manifest. It is expected this will be run from within a self-cleaning temp dir. """ # create some temp file outFile = "something_{0}.txt".format(arbitraryString) with open(outFile, "w") as f: f.write("test") # create an output location os.mkdir(arbitraryString) # do the worK: call the function that creates the manifest outputCache._makeOutputManifest([outFile], arbitraryString) def test_hashFiles(self): with directoryChangers.TemporaryDirectoryChanger() as _: files = ["test_hashFiles1.txt", "test_hashFiles2.txt"] for fileName in files: with open(fileName, "w") as f: f.write("hi") hashed = outputCache._hashFiles(files) self.assertEqual(hashed, "e9f5713dec55d727bb35392cec6190ce") def test_deleteCache(self): with directoryChangers.TemporaryDirectoryChanger() as _: outDir = "snapshotOutput_Cache" self.assertFalse(os.path.exists(outDir)) os.mkdir(outDir) with open(os.path.join(outDir, "test_deleteCache2.txt"), "w") as f: f.write("hi there") self.assertTrue(os.path.exists(outDir)) time.sleep(2) outputCache.deleteCache(outDir) self.assertFalse(os.path.exists(outDir)) def test_getCachedFolder(self): with directoryChangers.TemporaryDirectoryChanger() as _: exePath = "/path/to/what.exe" inputPaths = ["/path/to/something.txt", "/path/what/some.ini"] cacheDir = "/tmp/thing/what/" with self.assertRaises(FileNotFoundError): _ = outputCache._getCachedFolder(exePath, inputPaths, cacheDir) fakeExe = "what_getCachedFolder.exe" with open(fakeExe, "w") as f: f.write("hi") with self.assertRaises(FileNotFoundError): _ = outputCache._getCachedFolder(fakeExe, inputPaths, cacheDir) fakeIni = "fake_getCachedFolder.ini" with open(fakeIni, "w") as f: f.write("hey") folder = outputCache._getCachedFolder(fakeExe, [fakeIni], cacheDir) self.assertTrue(folder.startswith("/tmp/thing/what/what_getCachedFolder")) def test_makeOutputManifest(self): with directoryChangers.TemporaryDirectoryChanger() as _: # validate manifest doesn't exist yet manifest = "test_makeOutputManifest/CRC-manifest.json" self.assertFalse(os.path.exists(manifest)) # create outputCache dir and manifest self._buildOutputCache("test_makeOutputManifest") # validate manifest was created manifest = "test_makeOutputManifest/CRC-manifest.json" self.assertTrue(os.path.exists(manifest)) def test_retrieveOutput(self): with directoryChangers.TemporaryDirectoryChanger() as _: # create outputCache dir and manifest cacheDir = "test_retrieveOutput_Output_Cache" self._buildOutputCache(cacheDir) # validate manifest was created manifest = "{0}/CRC-manifest.json".format(cacheDir) self.assertTrue(os.path.exists(manifest)) # create a dummy file (not executable), to stand in for the executable fakeExe = "what_{0}.exe".format(cacheDir) with open(fakeExe, "w") as f: f.write("hi") # create folder to retrieve to inputPaths = ["something_{0}.txt".format(cacheDir)] newFolder = outputCache._getCachedFolder(fakeExe, inputPaths, cacheDir) os.makedirs(newFolder) # throw a new manifest into the new out cache with open(os.path.join(newFolder, "CRC-manifest.json"), "w") as f: f.write(open(manifest, "r").read()) # attempt to retrieve some output from dummy caches result = outputCache.retrieveOutput(fakeExe, inputPaths, cacheDir, newFolder) self.assertFalse(result) ================================================ FILE: armi/utils/tests/test_parsing.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for parsing.""" import unittest from armi.utils import parsing class LiteralEvalTest(unittest.TestCase): def test_tryLiteralEval(self): self.assertEqual(parsing.tryLiteralEval("1"), 1) self.assertEqual(parsing.tryLiteralEval(1), 1) self.assertEqual(parsing.tryLiteralEval("1.0"), 1.0) self.assertEqual(parsing.tryLiteralEval(1.0), 1.0) self.assertEqual(parsing.tryLiteralEval(1), 1) self.assertEqual( parsing.tryLiteralEval("['apple','banana','mango']"), ["apple", "banana", "mango"], ) self.assertEqual( parsing.tryLiteralEval(["apple", "banana", "mango"]), ["apple", "banana", "mango"], ) self.assertEqual( parsing.tryLiteralEval("{'apple':1,'banana':2,'mango':3}"), {"apple": 1, "banana": 2, "mango": 3}, ) self.assertEqual( parsing.tryLiteralEval({"apple": 1, "banana": 2, "mango": 3}), {"apple": 1, "banana": 2, "mango": 3}, ) self.assertEqual(parsing.tryLiteralEval("(1,2)"), (1, 2)) self.assertEqual(parsing.tryLiteralEval((1, 2)), (1, 2)) self.assertEqual(parsing.tryLiteralEval("u'apple'"), "apple") self.assertEqual(parsing.tryLiteralEval("apple"), "apple") self.assertEqual(parsing.tryLiteralEval("apple"), "apple") self.assertEqual(parsing.tryLiteralEval(tuple), tuple) def test_parseValue(self): self.assertEqual(parsing.parseValue("5", int), 5) self.assertEqual(parsing.parseValue(5, int), 5) self.assertEqual(parsing.parseValue("5", float), 5.0) self.assertEqual(parsing.parseValue("True", bool), True) self.assertEqual( parsing.parseValue("['apple','banana','mango']", list), ["apple", "banana", "mango"], ) self.assertEqual( parsing.parseValue({"apple": 1, "banana": 2, "mango": 3}, dict), {"apple": 1, "banana": 2, "mango": 3}, ) self.assertEqual( parsing.parseValue("{'apple':1,'banana':2,'mango':3}", dict), {"apple": 1, "banana": 2, "mango": 3}, ) self.assertEqual(parsing.parseValue("(1,2)", tuple), (1, 2)) self.assertEqual(parsing.parseValue("None", int, True), 0) self.assertEqual(parsing.parseValue(None, int, True), 0) self.assertEqual(parsing.parseValue("None", bool, True), False) self.assertEqual(parsing.parseValue(None, bool, True), False) self.assertEqual(parsing.parseValue(None, bool, True, False), None) with self.assertRaises(TypeError): parsing.parseValue("5", str) with self.assertRaises(ValueError): parsing.parseValue("5", bool) ================================================ FILE: armi/utils/tests/test_pathTools.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for pathTools.""" import os import time import types import unittest from armi import context from armi.tests import mockRunLogs from armi.utils import pathTools from armi.utils.directoryChangers import TemporaryDirectoryChanger THIS_DIR = os.path.dirname(__file__) class PathToolsTests(unittest.TestCase): def test_copyOrWarnFile(self): with TemporaryDirectoryChanger(): # Test a successful copy path = "test.txt" pathCopy = "testcopy.txt" with open(path, "w") as f1: f1.write("test") pathTools.copyOrWarn("Test File", path, pathCopy) self.assertTrue(os.path.exists(pathCopy)) # Test a non-existent file with mockRunLogs.BufferLog() as mock: pathTools.copyOrWarn("Test File", "FileDoesntExist.txt", pathCopy) self.assertIn("Could not copy", mock.getStdout()) def test_copyOrWarnDir(self): with TemporaryDirectoryChanger(): # Test a successful copy pathDir = "testDir" path = os.path.join(pathDir, "test.txt") pathDirCopy = "testcopy" os.mkdir(pathDir) with open(path, "w") as f1: f1.write("test") pathTools.copyOrWarn("Test File", pathDir, pathDirCopy) self.assertTrue(os.path.exists(pathDirCopy)) self.assertTrue(os.path.exists(os.path.join(pathDirCopy, "test.txt"))) # Test a non-existent file with mockRunLogs.BufferLog() as mock: pathTools.copyOrWarn("Test File", "DirDoesntExist", pathDirCopy) self.assertIn("Could not copy", mock.getStdout()) def test_separateModuleAndAttribute(self): self.assertRaises(ValueError, pathTools.separateModuleAndAttribute, r"path/with/no/colon") self.assertEqual( (r"aPath/file.py", "MyClass"), pathTools.separateModuleAndAttribute(r"aPath/file.py:MyClass"), ) # testing windows stuff mapped drives since they have more than 1 colon self.assertEqual( (r"c:/aPath/file.py", "MyClass"), pathTools.separateModuleAndAttribute(r"c:/aPath/file.py:MyClass"), ) # not what we want but important to demonstrate what you get when no module # attribute is defined. self.assertEqual( ("c", r"/aPath/file.py"), pathTools.separateModuleAndAttribute(r"c:/aPath/file.py"), ) def test_importCustomModule(self): """Test that importCustomPyModule is usable just like any other module.""" module = pathTools.importCustomPyModule(os.path.join(THIS_DIR, __file__)) self.assertIsInstance(module, types.ModuleType) self.assertIn("THIS_DIR", module.__dict__) # test that this class is present in the import self.assertIn(self.__class__.__name__, module.__dict__) def test_moduleAndAttributeExist(self): """Test that determination of existence of module attribute works.""" # test that no `:` doesn't raise an exception self.assertFalse(pathTools.moduleAndAttributeExist(r"path/that/not/exist.py")) # test that multiple `:` doesn't raise an exception self.assertFalse(pathTools.moduleAndAttributeExist(r"c:/path/that/not/exist.py:MyClass")) thisFile = os.path.join(THIS_DIR, __file__) # no module attribute specified self.assertFalse(pathTools.moduleAndAttributeExist(thisFile)) self.assertFalse(pathTools.moduleAndAttributeExist(thisFile + ":doesntExist")) self.assertTrue(pathTools.moduleAndAttributeExist(thisFile + ":THIS_DIR")) self.assertTrue(pathTools.moduleAndAttributeExist(thisFile + ":PathToolsTests")) @unittest.skipUnless(context.MPI_RANK == 0, "test only on root node") def test_cleanPathNoMpi(self): """Simple tests of cleanPath(), in the no-MPI scenario.""" with TemporaryDirectoryChanger(): # TEST 0: File is not safe to delete, due not being a temp dir or under FAST_PATH filePath0 = "test0_cleanPathNoMpi" open(filePath0, "w").write("something") self.assertTrue(os.path.exists(filePath0)) with self.assertRaises(Exception): pathTools.cleanPath(filePath0, mpiRank=0) # TEST 1: Delete a single file under FAST_PATH filePath1 = os.path.join(context.getFastPath(), "test1_cleanPathNoMpi") open(filePath1, "w").write("something") self.assertTrue(os.path.exists(filePath1)) pathTools.cleanPath(filePath1, mpiRank=0) self.assertFalse(os.path.exists(filePath1)) # TEST 2: Delete an empty directory under FAST_PATH dir2 = os.path.join(context.getFastPath(), "letitgo") os.mkdir(dir2) self.assertTrue(os.path.exists(dir2)) pathTools.cleanPath(dir2, mpiRank=0) self.assertFalse(os.path.exists(dir2)) # TEST 3: Delete an empty directory with forceClean=True dir3 = "noyoureadirectory" os.mkdir(dir3) self.assertTrue(os.path.exists(dir3)) pathTools.cleanPath(dir3, mpiRank=0, forceClean=True) self.assertFalse(os.path.exists(dir3)) # TEST 4: Delete a directory with two files inside with forceClean=True dir4 = "dirplease" os.mkdir(dir4) open(os.path.join(dir4, "file1.txt"), "w").write("something1") open(os.path.join(dir4, "file2.txt"), "w").write("something2") # delete the directory and test self.assertTrue(os.path.exists(dir4)) self.assertTrue(os.path.exists(os.path.join(dir4, "file1.txt"))) self.assertTrue(os.path.exists(os.path.join(dir4, "file2.txt"))) pathTools.cleanPath(dir4, mpiRank=0, forceClean=True) self.assertFalse(os.path.exists(dir4)) def test_isFilePathNewer(self): with TemporaryDirectoryChanger(): path1 = "test_isFilePathNewer1.txt" with open(path1, "w") as f1: f1.write("test1") time.sleep(1) path2 = "test_isFilePathNewer2.txt" with open(path2, "w") as f2: f2.write("test2") self.assertFalse(pathTools.isFilePathNewer(path1, path2)) self.assertTrue(pathTools.isFilePathNewer(path2, path1)) def test_isAccessible(self): with TemporaryDirectoryChanger(): path1 = "test_isAccessible.txt" with open(path1, "w") as f1: f1.write("test") self.assertTrue(pathTools.isAccessible(path1)) ================================================ FILE: armi/utils/tests/test_plotting.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for basic plotting tools.""" import os import shutil import unittest from glob import glob from unittest.mock import patch import matplotlib.pyplot as plt import numpy as np from armi import settings from armi.nuclearDataIO.cccc import isotxs from armi.reactor import blueprints, reactors from armi.reactor.flags import Flags from armi.reactor.tests import test_reactors from armi.testing import TESTING_ROOT from armi.tests import ISOAA_PATH, TEST_ROOT, getEmptyHexReactor from armi.utils import plotting from armi.utils.directoryChangers import TemporaryDirectoryChanger class TestPlotting(unittest.TestCase): """ Test and demonstrate some plotting capabilities of ARMI. Notes ----- These tests don't do a great job of making sure the plot appears correctly, but they do check that the lines of code run, and that an image is produced, and demonstrate how they are meant to be called. """ @classmethod def setUpClass(cls): cls.o, cls.r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") def test_plotDepthMap(self): """Indirectly tests plot face map.""" with TemporaryDirectoryChanger(): # set some params to visualize for i, b in enumerate(self.o.r.core.iterBlocks()): b.p.percentBu = i / 100 fName = plotting.plotBlockDepthMap(self.r.core, param="percentBu", fName="depthMapPlot.png", depthIndex=2) self._checkFileExists(fName) # catch an edge case error (no matching assemblies) with self.assertRaises(ValueError): r = getEmptyHexReactor() plotting.plotBlockDepthMap(r.core) def test_plotFaceMap(self): """Indirectly tests plot face map.""" with TemporaryDirectoryChanger(): for i, b in enumerate(self.o.r.core.iterBlocks()): b.p.percentBu = i / 100 # make sure some of the plot files exist fName = plotting.plotFaceMap(self.r.core, param="percentBu", fName="faceMapPlot0.png", makeColorBar=True) self._checkFileExists(fName) fName = plotting.plotFaceMap(self.r.core, param="percentBu", fName="faceMapPlot1.png", vals="average") self._checkFileExists(fName) # catch an edge case error (bad val name) with self.assertRaises(ValueError): plotting.plotFaceMap(self.r.core, param="percentBu", fName="faceMapPlot2.png", vals="whoops") # this should not throw an error plotting.close() def test_plotAssemblyTypes(self): with TemporaryDirectoryChanger(): plotPath = "coreAssemblyTypes1.png" plotting.plotAssemblyTypes(list(self.r.core.parent.blueprints.assemblies.values()), plotPath) self._checkFileExists(plotPath) if os.path.exists(plotPath): os.remove(plotPath) plotPath = "coreAssemblyTypes2.png" fig = plotting.plotAssemblyTypes( list(self.r.core.parent.blueprints.assemblies.values()), plotPath, yAxisLabel="y axis", title="title", ) self.assertFalse(fig.subfigures(1, 1).subplots().has_data()) self.assertEqual(fig.axes[0]._children[0].xy, (0.5, 0)) self._checkFileExists(plotPath) for _ in range(3): if os.path.exists(plotPath): os.remove(plotPath) def test_plotRadialReactorLayouts(self): figs = plotting.plotRadialReactorLayouts(self.r) self.assertEqual(len(figs), 1) self.assertEqual(figs[0].axes[0]._children[0].xy, (0.5, 0)) plotPath = "coreAssemblyTypes1-rank0.png" for _ in range(3): if os.path.exists(plotPath): os.remove(plotPath) def test_plotScatterMatrix(self): plotPath = "test_plotScatterMatrix.png" lib = isotxs.readBinary(ISOAA_PATH) u235 = lib.getNuclide("U235", "AA") scatterMatrix = u235.micros.inelasticScatter img = plotting.plotScatterMatrix(scatterMatrix, fName=plotPath) self.assertGreater(len(img.axes.get_children()), 10) self.assertLess(len(img.axes.get_children()), 30) self.assertTrue(img.axes.has_data()) for _ in range(3): if os.path.exists(plotPath): os.remove(plotPath) def test_plotBlocksInAssembly(self): _fig, ax = plt.subplots(figsize=(15, 15), dpi=300) xBlockLoc, yBlockHeights, yBlockAxMesh = plotting._plotBlocksInAssembly( ax, self.r.core.getFirstAssembly(Flags.FUEL), True, [], set(), 0.5, 5.6, True, hot=True, ) self.assertEqual(xBlockLoc, 0.5) self.assertEqual(yBlockHeights[0], 25.0) yBlockAxMesh = list(yBlockAxMesh)[0] self.assertIn(10.0, yBlockAxMesh) self.assertIn(25.0, yBlockAxMesh) self.assertIn(1, yBlockAxMesh) def test_plotBlockFlux(self): with TemporaryDirectoryChanger(): xslib = isotxs.readBinary(ISOAA_PATH) self.r.core.lib = xslib blocks = self.r.core.getBlocks() for b in blocks: b.p.mgFlux = range(33) plotting.plotBlockFlux(self.r.core, fName="flux.png", bList=blocks) self.assertTrue(os.path.exists("flux.png")) plotting.plotBlockFlux(self.r.core, fName="peak.png", bList=blocks, peak=True) self._checkFileExists("peak.png") plotting.plotBlockFlux( self.r.core, fName="bList2.png", bList=blocks, bList2=blocks, ) self._checkFileExists("bList2.png") def test_plotHexBlock(self): with TemporaryDirectoryChanger(): first_fuel_block = self.r.core.getFirstBlock(Flags.FUEL) first_fuel_block.autoCreateSpatialGrids(self.r.core.spatialGrid) plotting.plotBlockDiagram(first_fuel_block, "blockDiagram23.svg", True) self._checkFileExists("blockDiagram23.svg") def test_plotCartesianBlock(self): with TemporaryDirectoryChanger(): cs = settings.Settings(os.path.join(TESTING_ROOT, "reactors", "c5g7", "c5g7-settings.yaml")) blueprint = blueprints.loadFromCs(cs) _ = reactors.factory(cs, blueprint) for name, bDesign in blueprint.blockDesigns.items(): b = bDesign.construct(cs, blueprint, 0, 1, 1, "AA", {}) plotting.plotBlockDiagram(b, "{}.svg".format(name), True) self._checkFileExists("uo2.svg") self._checkFileExists("mox.svg") def _checkFileExists(self, fName): self.assertTrue(os.path.exists(fName)) class TestPatches(unittest.TestCase): """Test the ability to correctly make patches.""" @classmethod def setUpClass(cls): # Prepare the input files. This is important so the unit tests run from wherever they need to run from. cls.td = TemporaryDirectoryChanger() cls.td.__enter__() @classmethod def tearDownClass(cls): cls.td.__exit__(None, None, None) @patch("armi.utils.plotting.plt.figure") @patch("armi.utils.plotting.plt.savefig") def test_makeAssemPatches(self, mockSavefig, mockFigure): # mock up a flats-up version of the smallest test reactor for fPath in glob(os.path.join(TEST_ROOT, "smallestTestReactor", "*.yaml")): fName = os.path.basename(fPath) shutil.copyfile(fPath, fName) txt = open("refSmallestReactor.yaml", "r").read() txt = txt.replace("geom: hex_corners_up", "geom: hex") with open("refSmallestReactor.yaml", "w") as f: f.write(txt) # this one is flats-up with many assemblies in the core _, rHexFlatsUp = test_reactors.loadTestReactor(inputFilePath=".", inputFileName="armiRunSmallest.yaml") nAssems = len(rHexFlatsUp.core) self.assertEqual(nAssems, 1) patches = plotting._makeAssemPatches(rHexFlatsUp.core) self.assertEqual(len(patches), nAssems) # find the patch corresponding to the center assembly for pat in patches: if np.allclose(pat.xy, (0, 0)): break vertices = pat.get_verts() # there should be 1 more than the number of points in the shape self.assertEqual(len(vertices), 7) # for flats-up, the first vertex should have a y position of ~zero self.assertAlmostEqual(vertices[0][1], 0) # this one is corners-up, with only a single assembly _, rHexCornersUp = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") nAssems = len(rHexCornersUp.core) self.assertEqual(nAssems, 1) patches = plotting._makeAssemPatches(rHexCornersUp.core) self.assertEqual(len(patches), 1) vertices = patches[0].get_verts() self.assertEqual(len(vertices), 7) # for corners-up, the first vertex should have an x position of ~zero self.assertAlmostEqual(vertices[0][0], 0) # this one is cartestian, with many assemblies in the core _, rCartesian = test_reactors.loadTestReactor(inputFileName="refTestCartesian.yaml") nAssems = len(rCartesian.core) self.assertGreater(nAssems, 1) patches = plotting._makeAssemPatches(rCartesian.core) self.assertEqual(nAssems, len(patches)) # Just pick a given patch and ensure that it is square-like. Orientation is not important here. vertices = patches[0].get_verts() self.assertEqual(len(vertices), 5) ================================================ FILE: armi/utils/tests/test_properties.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests of the properties class.""" import unittest from armi.utils import properties class ImmutableClass: myNum = properties.createImmutableProperty("myNum", "You must invoke the initialize() method", "My random number") def initialize(self, val): properties.unlockImmutableProperties(self) try: self.myNum = val finally: properties.lockImmutableProperties(self) class ImmutablePropertyTests(unittest.TestCase): def test_retreivingUnassignedValue(self): """Attempting to retreive an unassigned value should raise an error.""" ic = ImmutableClass() with self.assertRaises(properties.ImmutablePropertyError): print(ic.myNum) def test_noAssignImmutableProperty(self): """Cannot assign a value to an immutable property.""" ic = ImmutableClass() ic.myNum = 4.0 with self.assertRaises(properties.ImmutablePropertyError): ic.myNum = 2.2 self.assertEqual(ic.myNum, 4.0) def test_unlockImmutableReassignment(self): """Unlock does not permit reassignment of an immutable property.""" ic = ImmutableClass() ic.myNum = 7.7 with self.assertRaises(properties.ImmutablePropertyError): ic.initialize(3.4) self.assertEqual(ic.myNum, 7.7) ================================================ FILE: armi/utils/tests/test_reportPlotting.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test plotting.""" import copy import os import unittest import numpy as np from armi.reactor.flags import Flags from armi.reactor.tests import test_reactors from armi.tests import TEST_ROOT from armi.utils.directoryChangers import TemporaryDirectoryChanger from armi.utils.reportPlotting import ( _getPhysicalVals, createPlotMetaData, keffVsTime, movesVsCycle, plotAxialProfile, plotCoreOverviewRadar, valueVsTime, ) class TestRadar(unittest.TestCase): def setUp(self): self.o, self.r = test_reactors.loadTestReactor( TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml" ) self.td = TemporaryDirectoryChanger() self.td.__enter__() def tearDown(self): self.td.__exit__(None, None, None) def test_radar(self): """Test execution of radar plot. Note this has no asserts and is therefore a smoke test.""" r2 = copy.deepcopy(self.r) plotCoreOverviewRadar([self.r, r2], ["Label1", "Label2"]) self.assertTrue(os.path.exists("reactor_comparison.png")) def test_getPhysicalVals(self): dims, labels, vals = _getPhysicalVals(self.r) self.assertEqual(dims, "Dimensions") self.assertEqual(labels[0], "Cold fuel height") self.assertEqual(labels[1], "Fuel assems") self.assertEqual(labels[2], "Assem weight") self.assertEqual(labels[3], "Core radius") self.assertEqual(labels[4], "Core aspect ratio") self.assertEqual(labels[5], "Fissile mass") self.assertEqual(len(labels), 6) self.assertEqual(vals[0], 25.0) self.assertEqual(vals[1], 1) self.assertAlmostEqual(vals[2], 52474.8927038, delta=1e-5) self.assertEqual(vals[3], 16.8) self.assertAlmostEqual(vals[5], 4290.60340961, delta=1e-5) self.assertEqual(len(vals), 6) # this test will use getInputHeight() instead of getHeight() radius = self.r.core.getCoreRadius() avgHeight = 0 fuelA = self.r.core.getAssemblies(Flags.FUEL) for a in fuelA: for b in a.getBlocks(Flags.FUEL): avgHeight += b.getInputHeight() avgHeight /= len(fuelA) coreAspectRatio = (2 * radius) / avgHeight self.assertEqual(vals[4], coreAspectRatio) def test_createPlotMetaData(self): title = "test_createPlotMetaData" xLabel = "xLabel" yLabel = "yLabel" xTicks = [1, 2] yTicks = [3, 4] labels = ["a", "b"] meta = createPlotMetaData(title, xLabel, yLabel, xTicks, yTicks, labels) self.assertEqual(len(meta), 6) self.assertEqual(meta["title"], title) self.assertEqual(meta["xlabel"], xLabel) self.assertEqual(meta["ylabel"], yLabel) def test_plotAxialProfile(self): vals = list(range(1, 10, 2)) fName = "test_plotAxialProfile" xLabel = "xLabel" yLabel = "yLabel" xTicks = [1, 2] yTicks = [3, 4] labels = ["a", "b"] meta = createPlotMetaData(fName, xLabel, yLabel, xTicks, yTicks, labels) plotAxialProfile(vals, np.ones((5, 2)), fName, meta, nPlot=2) self.assertTrue(os.path.exists(fName + ".png")) def test_keffVsTime(self): t = list(range(12)) ext = "png" # plot with no keff function keffVsTime(self.r.name, t, t, keffUnc=[], extension=ext) self.assertTrue(os.path.exists("R-armiRunSmallest.keff.png")) self.assertGreater(os.path.getsize("R-armiRunSmallest.keff.png"), 0) # plot with a keff function keffVsTime(self.r.name, t, t, t, extension=ext) self.assertTrue(os.path.exists("R-armiRunSmallest.keff.png")) self.assertGreater(os.path.getsize("R-armiRunSmallest.keff.png"), 0) def test_valueVsTime(self): t = list(range(12)) ext = "png" valueVsTime(self.r.name, t, t, "val", "yaxis", "title", extension=ext) self.assertTrue(os.path.exists("R-armiRunSmallest.val.png")) self.assertGreater(os.path.getsize("R-armiRunSmallest.val.png"), 0) def test_movesVsCycle(self): name = "movesVsCycle" scalars = { "cycle": [1, 2, 3, 4], "maxBuF": [6, 7, 8, 9], "maxBuI": [6, 7, 8, 9], "maxDPA": [6, 7, 8, 9], "numMoves": [2, 2, 2, 2], "time": [1, 2, 3, 4], } figName = name + ".moves.png" movesVsCycle(name, scalars, "png") self.assertTrue(os.path.exists(figName)) self.assertGreater(os.path.getsize(figName), 0) ================================================ FILE: armi/utils/tests/test_tabulate.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tabulate. This file started out as the MIT-licensed "tabulate". Though we have made, and will continue to make many arbitrary changes as we need. Thanks to the tabulate team. https://github.com/astanin/python-tabulate """ import unittest from collections import OrderedDict, UserDict, defaultdict, namedtuple from dataclasses import dataclass from datetime import datetime import numpy as np from armi.utils.tabulate import ( SEPARATING_LINE, _alignCellVeritically, _alignColumn, _bool, _buildLine, _buildRow, _format, _isMultiline, _multilineWidth, _normalizeTabularData, _tableFormats, _type, _visibleWidth, _wrapTextToColWidths, tabulate, tabulateFormats, ) class TestTabulateAPI(unittest.TestCase): def test_tabulateFormats(self): """API: tabulateFormats is a list of strings.""" supported = tabulateFormats self.assertEqual(type(supported), list) for fmt in supported: self.assertEqual(type(fmt), str) class TestTabulateInputs(unittest.TestCase): def test_iterableOfEmpties(self): """Input: test various empty inputs.""" ii = iter(map(lambda x: iter(x), [])) result = tabulate(ii, "firstrow") self.assertEqual("", result) ij = iter(map(lambda x: iter(x), ["abcde"])) expected = "\n".join( [ "a b c d e", "--- --- --- --- ---", ] ) result = tabulate(ij, "firstrow") self.assertEqual(expected, result) ik = iter([]) expected = "\n".join( [ "a b c", "--- --- ---", ] ) result = tabulate(ik, "abc") self.assertEqual(expected, result) def test_iterableOfIterables(self): """Input: an iterable of iterables.""" ii = iter(map(lambda x: iter(x), [range(5), range(5, 0, -1)])) expected = "\n".join(["- - - - -", "0 1 2 3 4", "5 4 3 2 1", "- - - - -"]) result = tabulate(ii, headersAlign="center") self.assertEqual(expected, result) def test_iterableOfIterablesHeaders(self): """Input: an iterable of iterables with headers.""" ii = iter(map(lambda x: iter(x), [range(5), range(5, 0, -1)])) expected = "\n".join( [ " a b c d e", "--- --- --- --- ---", " 0 1 2 3 4", " 5 4 3 2 1", ] ) result = tabulate(ii, "abcde") self.assertEqual(expected, result) def test_iterableOfIterablesFirstrow(self): """Input: an iterable of iterables with the first row as headers.""" ii = iter(map(lambda x: iter(x), ["abcde", range(5), range(5, 0, -1)])) expected = "\n".join( [ " a b c d e", "--- --- --- --- ---", " 0 1 2 3 4", " 5 4 3 2 1", ] ) result = tabulate(ii, "firstrow") self.assertEqual(expected, result) def test_listOfLists(self): """Input: a list of lists with headers.""" ll = [["a", "one", 1], ["b", "two", None]] expected = "\n".join( [ " string number", "-- -------- --------", "a one 1", "b two", ] ) result = tabulate(ll, headers=["string", "number"]) self.assertEqual(expected, result) def test_listOfListsFirstrow(self): """Input: a list of lists with the first row as headers.""" ll = [["string", "number"], ["a", "one", 1], ["b", "two", None]] expected = "\n".join( [ " string number", "-- -------- --------", "a one 1", "b two", ] ) result = tabulate(ll, headers="firstrow") self.assertEqual(expected, result) def test_listOfListsKeys(self): """Input: a list of lists with column indices as headers.""" ll = [["a", "one", 1], ["b", "two", None]] expected = "\n".join(["0 1 2", "--- --- ---", "a one 1", "b two"]) result = tabulate(ll, headers="keys") self.assertEqual(expected, result) def test_dictLike(self): """Input: a dict of iterables with keys as headers.""" # columns should be padded with None, keys should be used as headers dd = {"a": range(3), "b": range(101, 105)} # keys' order (hence columns' order) is not deterministic in Python 3 # => we have to consider both possible results as valid expected1 = "\n".join([" a b", "--- ---", " 0 101", " 1 102", " 2 103", " 104"]) result = tabulate(dd, "keys") self.assertEqual(result, expected1) def test_numpy2d(self): """Input: a 2D NumPy array with headers.""" na = (np.arange(1, 10, dtype=np.float32).reshape((3, 3)) ** 3) * 0.5 expected = "\n".join( [ " a b c", "----- ----- -----", " 0.5 4 13.5", " 32 62.5 108", "171.5 256 364.5", ] ) result = tabulate(na, ["a", "b", "c"]) self.assertEqual(expected, result) def test_numpy2dFirstrow(self): """Input: a 2D NumPy array with the first row as headers.""" na = np.arange(1, 10, dtype=np.int32).reshape((3, 3)) ** 3 expected = "\n".join([" 1 8 27", "--- --- ----", " 64 125 216", "343 512 729"]) result = tabulate(na, headers="firstrow") self.assertEqual(expected, result) def test_numpy2dKeys(self): """Input: a 2D NumPy array with column indices as headers.""" na = (np.arange(1, 10, dtype=np.float32).reshape((3, 3)) ** 3) * 0.5 expected = "\n".join( [ " 0 1 2", "----- ----- -----", " 0.5 4 13.5", " 32 62.5 108", "171.5 256 364.5", ] ) result = tabulate(na, headers="keys") self.assertEqual(expected, result) def test_numpyRecordArray(self): """Input: a 2D NumPy record array without header.""" na = np.asarray( [("Alice", 23, 169.5), ("Bob", 27, 175.0)], dtype={ "names": ["name", "age", "height"], "formats": ["S32", "uint8", "float32"], }, ) expected = "\n".join( [ "----- -- -----", "Alice 23 169.5", "Bob 27 175", "----- -- -----", ] ) result = tabulate(na) self.assertEqual(expected, result) def test_numpyRecordArrayKeys(self): """Input: a 2D NumPy record array with column names as headers.""" na = np.asarray( [("Alice", 23, 169.5), ("Bob", 27, 175.0)], dtype={ "names": ["name", "age", "height"], "formats": ["S32", "uint8", "float32"], }, ) expected = "\n".join( [ "name age height", "------ ----- --------", "Alice 23 169.5", "Bob 27 175", ] ) result = tabulate(na, headers="keys") self.assertEqual(expected, result) def test_numpyRecordArrayHeaders(self): """Input: a 2D NumPy record array with user-supplied headers.""" na = np.asarray( [("Alice", 23, 169.5), ("Bob", 27, 175.0)], dtype={ "names": ["name", "age", "height"], "formats": ["S32", "uint8", "float32"], }, ) expected = "\n".join( [ "person years cm", "-------- ------- -----", "Alice 23 169.5", "Bob 27 175", ] ) result = tabulate(na, headers=["person", "years", "cm"]) self.assertEqual(expected, result) def test_listOfNamedtuples(self): """Input: a list of named tuples with field names as headers.""" NT = namedtuple("NT", ["foo", "bar"]) lt = [NT(1, 2), NT(3, 4)] expected = "\n".join(["- -", "1 2", "3 4", "- -"]) result = tabulate(lt) self.assertEqual(expected, result) def test_listOfNamedtuplesKeys(self): """Input: a list of named tuples with field names as headers.""" NT = namedtuple("NT", ["foo", "bar"]) lt = [NT(1, 2), NT(3, 4)] expected = "\n".join([" foo bar", "----- -----", " 1 2", " 3 4"]) result = tabulate(lt, headers="keys") self.assertEqual(expected, result) def test_listOfDicts(self): """Input: a list of dictionaries.""" lod = [{"foo": 1, "bar": 2}, {"foo": 3, "bar": 4}] expected1 = "\n".join(["- -", "1 2", "3 4", "- -"]) expected2 = "\n".join(["- -", "2 1", "4 3", "- -"]) result = tabulate(lod) self.assertIn(result, [expected1, expected2]) def test_listOfUserdicts(self): """Input: a list of UserDicts.""" lod = [UserDict(foo=1, bar=2), UserDict(foo=3, bar=4)] expected1 = "\n".join(["- -", "1 2", "3 4", "- -"]) expected2 = "\n".join(["- -", "2 1", "4 3", "- -"]) result = tabulate(lod) self.assertIn(result, [expected1, expected2]) def test_listOfDictsKeys(self): """Input: a list of dictionaries, with keys as headers.""" lod = [{"foo": 1, "bar": 2}, {"foo": 3, "bar": 4}] expected1 = "\n".join([" foo bar", "----- -----", " 1 2", " 3 4"]) expected2 = "\n".join([" bar foo", "----- -----", " 2 1", " 4 3"]) result = tabulate(lod, headers="keys") self.assertIn(result, [expected1, expected2]) def test_listOfUserdictsKeys(self): """Input: a list of UserDicts.""" lod = [UserDict(foo=1, bar=2), UserDict(foo=3, bar=4)] expected1 = "\n".join([" foo bar", "----- -----", " 1 2", " 3 4"]) expected2 = "\n".join([" bar foo", "----- -----", " 2 1", " 4 3"]) result = tabulate(lod, headers="keys") self.assertIn(result, [expected1, expected2]) def test_listOfDictsMissingKeys(self): """Input: a list of dictionaries, with missing keys.""" lod = [{"foo": 1}, {"bar": 2}, {"foo": 4, "baz": 3}] expected = "\n".join( [ " foo bar baz", "----- ----- -----", " 1", " 2", " 4 3", ] ) result = tabulate(lod, headers="keys") self.assertEqual(expected, result) def test_listOfDictsFirstrow(self): """Input: a list of dictionaries, with the first dict as headers.""" lod = [{"foo": "FOO", "bar": "BAR"}, {"foo": 3, "bar": 4, "baz": 5}] # if some key is missing in the first dict, use the key name instead expected1 = "\n".join([" FOO BAR baz", "----- ----- -----", " 3 4 5"]) expected2 = "\n".join([" BAR FOO baz", "----- ----- -----", " 4 3 5"]) result = tabulate(lod, headers="firstrow") self.assertIn(result, [expected1, expected2]) def test_listOfDictsDictOfHeaders(self): """Input: a dict of user headers for a list of dicts.""" table = [{"letters": "ABCDE", "digits": 12345}] headers = {"digits": "DIGITS", "letters": "LETTERS"} expected1 = "\n".join([" DIGITS LETTERS", "-------- ---------", " 12345 ABCDE"]) expected2 = "\n".join(["LETTERS DIGITS", "--------- --------", "ABCDE 12345"]) result = tabulate(table, headers=headers) self.assertIn(result, [expected1, expected2]) def test_listOfDictsListOfHeaders(self): """Input: ValueError on a list of headers with a list of dicts.""" table = [{"letters": "ABCDE", "digits": 12345}] headers = ["DIGITS", "LETTERS"] with self.assertRaises(ValueError): tabulate(table, headers=headers) def test_listOfOrdereddicts(self): """Input: a list of OrderedDicts.""" od = OrderedDict([("b", 1), ("a", 2)]) lod = [od, od] expected = "\n".join([" b a", "--- ---", " 1 2", " 1 2"]) result = tabulate(lod, headers="keys") self.assertEqual(expected, result) def test_listBytes(self): """Input: a list of bytes.""" lb = [["你好".encode("utf-8")], ["你好"]] expected = "\n".join( [ "bytes", "---------------------------", r"b'\xe4\xbd\xa0\xe5\xa5\xbd'", "你好", ] ) result = tabulate(lb, headers=["bytes"]) self.assertEqual(expected, result) def test_tightCouplingExample(self): """Input: Real world-ish example from tight coupling.""" # the two examples below should both produce the same output: border = "-- ------------------------------ -------------- ----------------------------" expected = "\n".join( [ border, " criticalCrIteration: keffUnc dif3d: power thInterface: THavgCladTemp", border, " 0 9.01234e-05 0.00876543 0.00123456", border, ] ) # the data is a regular dictionary data = { "criticalCrIteration: keffUnc": [9.01234e-05], "dif3d: power": [0.00876543], "thInterface: THavgCladTemp": [0.00123456], } result = tabulate(data, headers="keys", showIndex=True, tableFmt="armi") self.assertEqual(expected, result) # the data is a defaultdict dataD = defaultdict(list) for key, vals in data.items(): for val in vals: dataD[key].append(val) result2 = tabulate(dataD, headers="keys", showIndex=True, tableFmt="armi") self.assertEqual(expected, result2) class TestTabulateInternal(unittest.TestCase): def test_alignColumnDecimal(self): """Internal: _align_column(..., 'decimal').""" column = ["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"] result = _alignColumn(column, "decimal") expected = [ " 12.345 ", "-1234.5 ", " 1.23 ", " 1234.5 ", " 1e+234 ", " 1.0e234", ] self.assertEqual(expected, result) def test_alignColDecThousandSeps(self): """Internal: _align_column(..., 'decimal').""" column = ["12.345", "-1234.5", "1.23", "1,234.5", "1e+234", "1.0e234"] output = _alignColumn(column, "decimal") expected = [ " 12.345 ", "-1234.5 ", " 1.23 ", "1,234.5 ", " 1e+234 ", " 1.0e234", ] self.assertEqual(expected, output) def test_alignColDecIncorrectThousandSeps(self): """Internal: _align_column(..., 'decimal').""" column = ["12.345", "-1234.5", "1.23", "12,34.5", "1e+234", "1.0e234"] output = _alignColumn(column, "decimal") expected = [ " 12.345 ", " -1234.5 ", " 1.23 ", "12,34.5 ", " 1e+234 ", " 1.0e234", ] self.assertEqual(expected, output) def test_alignColumnNone(self): """Internal: _align_column(..., None).""" column = ["123.4", "56.7890"] output = _alignColumn(column, None) expected = ["123.4", "56.7890"] self.assertEqual(expected, output) def test_alignColumnMultiline(self): """Internal: _align_column(..., is_multiline=True).""" column = ["1", "123", "12345\n6"] output = _alignColumn(column, "center", isMultiline=True) expected = [" 1 ", " 123 ", "12345" + "\n" + " 6 "] self.assertEqual(expected, output) def test_alignCellVeriticallyOneLineOnly(self): """Internal: Aligning a single height cell is same regardless of alignment value.""" lines = ["one line"] column_width = 8 top = _alignCellVeritically(lines, 1, column_width, "top") center = _alignCellVeritically(lines, 1, column_width, "center") bottom = _alignCellVeritically(lines, 1, column_width, "bottom") none = _alignCellVeritically(lines, 1, column_width, None) expected = ["one line"] assert top == center == bottom == none == expected def test_alignCellVertTopSingleTxtMultiPad(self): """Internal: Align single cell text to top.""" result = _alignCellVeritically(["one line"], 3, 8, "top") expected = ["one line", " ", " "] self.assertEqual(expected, result) def test_alignCellVertCenterSingleTxtMultiPad(self): """Internal: Align single cell text to center.""" result = _alignCellVeritically(["one line"], 3, 8, "center") expected = [" ", "one line", " "] self.assertEqual(expected, result) def test_alignCellVertBottomSingleTxtMultiPad(self): """Internal: Align single cell text to bottom.""" result = _alignCellVeritically(["one line"], 3, 8, "bottom") expected = [" ", " ", "one line"] self.assertEqual(expected, result) def test_alignCellVertTopMultiTxtMultiPad(self): """Internal: Align multiline celltext text to top.""" text = ["just", "one ", "cell"] result = _alignCellVeritically(text, 6, 4, "top") expected = ["just", "one ", "cell", " ", " ", " "] self.assertEqual(expected, result) def test_alignCellVertCenterMultiTxtMultiPad(self): """Internal: Align multiline celltext text to center.""" text = ["just", "one ", "cell"] result = _alignCellVeritically(text, 6, 4, "center") # Even number of rows, can't perfectly center, but we pad less # at top when required to do make a judgement expected = [" ", "just", "one ", "cell", " ", " "] self.assertEqual(expected, result) def test_alignCellVertBottomMultiTxtMultiPad(self): """Internal: Align multiline celltext text to bottom.""" text = ["just", "one ", "cell"] result = _alignCellVeritically(text, 6, 4, "bottom") expected = [" ", " ", " ", "just", "one ", "cell"] self.assertEqual(expected, result) def test_assortedRareEdgeCases(self): """Test some of the more rare edge cases in the purely internal functions.""" from armi.utils.tabulate import ( _alignHeader, _prependRowIndex, _removeSeparatingLines, ) self.assertEqual(_alignHeader("123", False, 3, 3, False, None), "123") result = _removeSeparatingLines(123) self.assertEqual(result[0], 123) self.assertIsNone(result[1]) self.assertEqual(_prependRowIndex([123], None), [123]) def test_bool(self): self.assertTrue(_bool("stuff")) self.assertFalse(_bool("")) self.assertTrue(_bool(123)) self.assertFalse(_bool(np.array([1, 0, -1]))) def test_buildLine(self): """Basic sanity test of internal _buildLine() function.""" lineFormat = _tableFormats["armi"].lineabove self.assertEqual(_buildLine([2, 2], ["center", "center"], lineFormat), "-- --") formatter = lambda a, b: "xyz" self.assertEqual(_buildLine([2, 2], ["center", "center"], formatter), "xyz") self.assertIsNone(_buildLine([2, 2], ["center", "center"], None)) def test_buildRow(self): """Basic sanity test of internal _buildRow() function.""" rowFormat = _tableFormats["armi"].datarow self.assertEqual(_buildRow("", [2, 2], ["center", "center"], rowFormat), "") formatter = lambda a, b, c: "xyz" d = {"a": 1, "b": 2} self.assertEqual(_buildRow(d, [2, 2], ["center", "center"], formatter), "xyz") lst = ["ab", "cd"] self.assertEqual(_buildRow(lst, [2, 2], ["center", "center"], rowFormat), "ab cd") self.assertIsNone(_buildRow("ab", [2, 2], ["center", "center"], "")) def test_format(self): """Basic sanity test of internal _format() function.""" self.assertEqual(_format(None, str, "8", "", "X", True), "X") self.assertEqual(_format(123, str, "8", "", "X", True), "123") self.assertEqual(_format("123", int, "8", "", "X", True), "123") self.assertEqual(_format(bytes("abc", "utf-8"), bytes, "8", "", "X", True), "abc") self.assertEqual(_format("3.14", float, "4", "", "X", True), "3.14") colorNum = "\x1b[31m3.14\x1b[0m" self.assertEqual(_format(colorNum, float, "4", "", "X", True), colorNum) self.assertEqual(_format(None, None, "8", "", "X", True), "X") def test_isMultiline(self): """Basic sanity test of internal _isMultiline() function.""" self.assertFalse(_isMultiline("world")) self.assertTrue(_isMultiline("hello\nworld")) self.assertFalse(_isMultiline(bytes("world", "utf-8"))) self.assertTrue(_isMultiline(bytes("hello\nworld", "utf-8"))) def test_multilineWidth(self): """Internal: _multilineWidth().""" multilineString = "\n".join(["foo", "barbaz", "spam"]) self.assertEqual(_multilineWidth(multilineString), 6) onelineString = "12345" self.assertEqual(_multilineWidth(onelineString), len(onelineString)) def test_normalizeTabularData(self): """Basic sanity test of internal _normalizeTabularData() function.""" res = _normalizeTabularData([[1, 2], [3, 4]], np.array(["a", "b"]), "default") self.assertEqual(res[0], [[1, 2], [3, 4]]) self.assertEqual(res[1], ["a", "b"]) self.assertEqual(res[2], 0) res = _normalizeTabularData([], "keys", "default") self.assertEqual(len(res[0]), 0) self.assertEqual(len(res[1]), 0) self.assertEqual(res[2], 0) res = _normalizeTabularData([], "firstrow", "default") self.assertEqual(len(res[0]), 0) self.assertEqual(len(res[1]), 0) self.assertEqual(res[2], 0) @dataclass class row: a: int b: int rows = [row(1, 2), row(3, 4)] res = _normalizeTabularData(rows, "keys", "default") self.assertEqual(res[0], [[1, 2], [3, 4]]) self.assertEqual(res[1], ["a", "b"]) self.assertEqual(res[2], 0) res = _normalizeTabularData(rows, ["x", "y"], "default") self.assertEqual(res[0], [[1, 2], [3, 4]]) self.assertEqual(res[1], ["x", "y"]) self.assertEqual(res[2], 0) def test_type(self): """Basic sanity test of internal _type() function.""" self.assertEqual(_type(None), type(None)) self.assertEqual(_type("foo"), type("")) self.assertEqual(_type("1"), type(1)) self.assertEqual(_type("\x1b[31m42\x1b[0m"), type(42)) self.assertEqual(_type("\x1b[31m42\x1b[0m"), type(42)) self.assertEqual(_type(datetime.now()), type("2024-12-31")) def test_visibleWidth(self): """Basic sanity test of internal _visibleWidth() function.""" self.assertEqual(_visibleWidth("world"), 5) self.assertEqual(_visibleWidth("\x1b[31mhello\x1b[0m"), 5) self.assertEqual(_visibleWidth(np.ones(3)), 10) def test_wrapTextToColWidths(self): """Basic sanity test of internal _wrapTextToColWidths() function.""" res = _wrapTextToColWidths([], [2, 2], True) self.assertEqual(len(res), 0) res = _wrapTextToColWidths([[1], [2]], [2, 2], True) self.assertEqual(res[0][0], 1) self.assertEqual(res[1][0], 2) res = _wrapTextToColWidths([["1"], ["2"]], [2, 2], False) self.assertEqual(res[0][0], "1") self.assertEqual(res[1][0], "2") class TestTabulateOutput(unittest.TestCase): @classmethod def setUpClass(cls): cls.testTable = [["spam", 41.9999], ["eggs", "451.0"]] cls.testTableWithSepLine = [ ["spam", 41.9999], SEPARATING_LINE, ["eggs", "451.0"], ] cls.testTableHeaders = ["strings", "numbers"] def test_plain(self): """Output: plain with headers.""" expected = "\n".join(["strings numbers", "spam 41.9999", "eggs 451"]) result = tabulate(self.testTable, self.testTableHeaders, tableFmt="plain") self.assertEqual(expected, result) def test_plainNoHeader(self): """Output: plain without headers.""" expected = "\n".join(["spam 41.9999", "eggs 451"]) result = tabulate(self.testTable, tableFmt="plain") self.assertEqual(expected, result) def test_plainMultilineNoHeader(self): """Output: plain with multiline cells without headers.""" table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]] expected = "\n".join( [ "foo bar hello", " baz", " bau", " multiline", " world", ] ) result = tabulate(table, strAlign="center", tableFmt="plain") self.assertEqual(expected, result) def test_plainMultiline(self): """Output: plain with multiline cells with headers.""" table = [[2, "foo\nbar"]] headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs") expected = "\n".join( [ " more more spam", " spam \x1b[31meggs\x1b[0m & eggs", " 2 foo", " bar", ] ) result = tabulate(table, headers, tableFmt="plain") self.assertEqual(expected, result) def test_plainMultilineLinks(self): """Output: plain with multiline cells with links and headers.""" table = [[2, "foo\nbar"]] headers = ( "more\nspam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\", "more spam\n& eggs", ) expected = "\n".join( [ " more more spam", " spam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\ & eggs", " 2 foo", " bar", ] ) result = tabulate(table, headers, tableFmt="plain") self.assertEqual(expected, result) def test_plainMultilineEmptyCells(self): """Output: plain with multiline cells and empty cells with headers.""" table = [ ["hdr", "data", "fold"], ["1", "", ""], ["2", "very long data", "fold\nthis"], ] expected = "\n".join( [ " hdr data fold", " 1", " 2 very long data fold", " this", ] ) result = tabulate(table, headers="firstrow", tableFmt="plain") self.assertEqual(expected, result) def test_plainMultilineEmptyCellsNoHeader(self): """Output: plain with multiline cells and empty cells without headers.""" table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]] expected = "\n".join(["0", "1", "2 very long data fold", " this"]) result = tabulate(table, tableFmt="plain") self.assertEqual(expected, result) def test_plainMaxcolwidthAutowraps(self): """Output: maxcolwidth will result in autowrapping longer cells.""" table = [["hdr", "fold"], ["1", "very long data"]] expected = "\n".join([" hdr fold", " 1 very long", " data"]) result = tabulate(table, headers="firstrow", tableFmt="plain", maxColWidths=[10, 10]) self.assertEqual(expected, result) def test_plainMaxcolwidthAutowrapsSep(self): """Output: maxcolwidth will result in autowrapping longer cells and separating line.""" table = [ ["hdr", "fold"], ["1", "very long data"], SEPARATING_LINE, ["2", "last line"], ] expected = "\n".join([" hdr fold", " 1 very long", " data", "", " 2 last line"]) result = tabulate(table, headers="firstrow", tableFmt="plain", maxColWidths=[10, 10]) self.assertEqual(expected, result) def test_maxColWidthsingleValue(self): """Output: maxcolwidth can be specified as a single number that works for each column.""" table = [ ["hdr", "fold1", "fold2"], ["mini", "this is short", "this is a bit longer"], ] expected = "\n".join( [ "hdr fold1 fold2", "mini this this", " is is a", " short bit", " longer", ] ) result = tabulate(table, headers="firstrow", tableFmt="plain", maxColWidths=6) self.assertEqual(expected, result) def test_maxcolwidthPadTailingWidths(self): """Output: maxcolwidth, if only partly specified, pads tailing cols with None.""" table = [ ["hdr", "fold1", "fold2"], ["mini", "this is short", "this is a bit longer"], ] expected = "\n".join( [ "hdr fold1 fold2", "mini this this is a bit longer", " is", " short", ] ) result = tabulate(table, headers="firstrow", tableFmt="plain", maxColWidths=[None, 6]) self.assertEqual(expected, result) def test_maxcolwidthHonorDisableParsenum(self): """Output: Using maxcolwidth in conjunction with disable_parsenum is honored.""" table = [ ["first number", 123.456789, "123.456789"], ["second number", "987654321.123", "987654321.123"], ] expected = "\n".join( [ "+--------+---------------+--------+", "| first | 123.457 | 123.45 |", "| number | | 6789 |", "+--------+---------------+--------+", "| second | 9.87654e+08 | 987654 |", "| number | | 321.12 |", "| | | 3 |", "+--------+---------------+--------+", ] ) # Grid makes showing the alignment difference a little easier result = tabulate(table, tableFmt="grid", maxColWidths=6, disableNumParse=[2]) self.assertEqual(expected, result) def test_plainmaxHeaderColWidthsAutowraps(self): """Output: maxHeaderColWidths will result in autowrapping header cell.""" table = [["hdr", "fold"], ["1", "very long data"]] expected = "\n".join([" hdr fo", " ld", " 1 very long", " data"]) result = tabulate( table, headers="firstrow", tableFmt="plain", maxColWidths=[10, 10], maxHeaderColWidths=[None, 2], ) self.assertEqual(expected, result) def test_simple(self): """Output: simple with headers.""" expected = "\n".join( [ "strings numbers", "--------- ---------", "spam 41.9999", "eggs 451", ] ) result = tabulate(self.testTable, self.testTableHeaders, tableFmt="simple") self.assertEqual(expected, result) def test_simpleSepLine(self): """Output: simple with headers and separating line.""" expected = "\n".join( [ "strings numbers", "--------- ---------", "spam 41.9999", "--------- ---------", "eggs 451", ] ) result = tabulate(self.testTableWithSepLine, self.testTableHeaders, tableFmt="simple") self.assertEqual(expected, result) def test_readmeExampleSep(self): table = [["Earth", 6371], ["Mars", 3390], SEPARATING_LINE, ["Moon", 1737]] expected = "\n".join( [ "----- ----", "Earth 6371", "Mars 3390", "----- ----", "Moon 1737", "----- ----", ] ) result = tabulate(table, tableFmt="simple") self.assertEqual(expected, result) def test_simpleMultiline2(self): """Output: simple with multiline cells.""" expected = "\n".join( [ " key value", "----- ---------", " foo bar", "spam multiline", " world", ] ) table = [["key", "value"], ["foo", "bar"], ["spam", "multiline\nworld"]] result = tabulate(table, headers="firstrow", strAlign="center", tableFmt="simple") self.assertEqual(expected, result) def test_simpleMultiline2SepLine(self): """Output: simple with multiline cells.""" expected = "\n".join( [ " key value", "----- ---------", " foo bar", "----- ---------", "spam multiline", " world", ] ) table = [ ["key", "value"], ["foo", "bar"], SEPARATING_LINE, ["spam", "multiline\nworld"], ] result = tabulate(table, headers="firstrow", strAlign="center", tableFmt="simple") self.assertEqual(expected, result) def test_simpleNoHeader(self): """Output: simple without headers.""" expected = "\n".join(["---- --------", "spam 41.9999", "eggs 451", "---- --------"]) result = tabulate(self.testTable, tableFmt="simple") self.assertEqual(expected, result) def test_simpleNoHeaderSepLine(self): """Output: simple without headers.""" expected = "\n".join( [ "---- --------", "spam 41.9999", "---- --------", "eggs 451", "---- --------", ] ) result = tabulate(self.testTableWithSepLine, tableFmt="simple") self.assertEqual(expected, result) def test_simpleMultilineNoHeader(self): """Output: simple with multiline cells without headers.""" table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]] expected = "\n".join( [ "------- ---------", "foo bar hello", " baz", " bau", " multiline", " world", "------- ---------", ] ) result = tabulate(table, strAlign="center", tableFmt="simple") self.assertEqual(expected, result) def test_simpleMultiline(self): """Output: simple with multiline cells with headers.""" table = [[2, "foo\nbar"]] headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs") expected = "\n".join( [ " more more spam", " spam \x1b[31meggs\x1b[0m & eggs", "----------- -----------", " 2 foo", " bar", ] ) result = tabulate(table, headers, tableFmt="simple") self.assertEqual(expected, result) def test_simpleMultilineLinks(self): """Output: simple with multiline cells with links and headers.""" table = [[2, "foo\nbar"]] headers = ( "more\nspam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\", "more spam\n& eggs", ) expected = "\n".join( [ " more more spam", " spam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\ & eggs", "----------- -----------", " 2 foo", " bar", ] ) result = tabulate(table, headers, tableFmt="simple") self.assertEqual(expected, result) def test_simpleMultilineEmptyCells(self): """Output: simple with multiline cells and empty cells with headers.""" table = [ ["hdr", "data", "fold"], ["1", "", ""], ["2", "very long data", "fold\nthis"], ] expected = "\n".join( [ " hdr data fold", "----- -------------- ------", " 1", " 2 very long data fold", " this", ] ) result = tabulate(table, headers="firstrow", tableFmt="simple") self.assertEqual(expected, result) def test_simpleMultilineEmptyCellsNoHeader(self): """Output: simple with multiline cells and empty cells without headers.""" table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]] expected = "\n".join( [ "- -------------- ----", "0", "1", "2 very long data fold", " this", "- -------------- ----", ] ) result = tabulate(table, tableFmt="simple") self.assertEqual(expected, result) def test_github(self): """Output: github with headers.""" expected = "\n".join( [ "| strings | numbers |", "|-----------|-----------|", "| spam | 41.9999 |", "| eggs | 451 |", ] ) result = tabulate(self.testTable, self.testTableHeaders, tableFmt="github") self.assertEqual(expected, result) def test_grid(self): """Output: grid with headers.""" expected = "\n".join( [ "+-----------+-----------+", "| strings | numbers |", "+===========+===========+", "| spam | 41.9999 |", "+-----------+-----------+", "| eggs | 451 |", "+-----------+-----------+", ] ) result = tabulate(self.testTable, self.testTableHeaders, tableFmt="grid") self.assertEqual(expected, result) def test_gridNoHeader(self): """Output: grid without headers.""" expected = "\n".join( [ "+------+----------+", "| spam | 41.9999 |", "+------+----------+", "| eggs | 451 |", "+------+----------+", ] ) result = tabulate(self.testTable, tableFmt="grid") self.assertEqual(expected, result) def test_gridMultilineNoHeader(self): """Output: grid with multiline cells without headers.""" table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]] expected = "\n".join( [ "+---------+-----------+", "| foo bar | hello |", "| baz | |", "| bau | |", "+---------+-----------+", "| | multiline |", "| | world |", "+---------+-----------+", ] ) result = tabulate(table, strAlign="center", tableFmt="grid") self.assertEqual(expected, result) def test_gridMultiline(self): """Output: grid with multiline cells with headers.""" table = [[2, "foo\nbar"]] headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs") expected = "\n".join( [ "+-------------+-------------+", "| more | more spam |", "| spam \x1b[31meggs\x1b[0m | & eggs |", "+=============+=============+", "| 2 | foo |", "| | bar |", "+-------------+-------------+", ] ) result = tabulate(table, headers, tableFmt="grid") self.assertEqual(expected, result) def test_gridMultilineEmptyCells(self): """Output: grid with multiline cells and empty cells with headers.""" table = [ ["hdr", "data", "fold"], ["1", "", ""], ["2", "very long data", "fold\nthis"], ] expected = "\n".join( [ "+-------+----------------+--------+", "| hdr | data | fold |", "+=======+================+========+", "| 1 | | |", "+-------+----------------+--------+", "| 2 | very long data | fold |", "| | | this |", "+-------+----------------+--------+", ] ) result = tabulate(table, headers="firstrow", tableFmt="grid") self.assertEqual(expected, result) def test_gridMultilineEmptyCellsNoHeader(self): """Output: grid with multiline cells and empty cells without headers.""" table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]] expected = "\n".join( [ "+---+----------------+------+", "| 0 | | |", "+---+----------------+------+", "| 1 | | |", "+---+----------------+------+", "| 2 | very long data | fold |", "| | | this |", "+---+----------------+------+", ] ) result = tabulate(table, tableFmt="grid") self.assertEqual(expected, result) def test_pretty(self): """Output: pretty with headers.""" expected = "\n".join( [ "+---------+---------+", "| strings | numbers |", "+---------+---------+", "| spam | 41.9999 |", "| eggs | 451.0 |", "+---------+---------+", ] ) result = tabulate(self.testTable, self.testTableHeaders, tableFmt="pretty") self.assertEqual(expected, result) def test_prettyNoHeader(self): """Output: pretty without headers.""" expected = "\n".join( [ "+------+---------+", "| spam | 41.9999 |", "| eggs | 451.0 |", "+------+---------+", ] ) result = tabulate(self.testTable, tableFmt="pretty") self.assertEqual(expected, result) def test_prettyMultilineNoHeader(self): """Output: pretty with multiline cells without headers.""" table = [["foo bar\nbaz\nbau", "hello"], ["", "multiline\nworld"]] expected = "\n".join( [ "+---------+-----------+", "| foo bar | hello |", "| baz | |", "| bau | |", "| | multiline |", "| | world |", "+---------+-----------+", ] ) result = tabulate(table, tableFmt="pretty") self.assertEqual(expected, result) def test_prettyMultiline(self): """Output: pretty with multiline cells with headers.""" table = [[2, "foo\nbar"]] headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs") expected = "\n".join( [ "+-----------+-----------+", "| more | more spam |", "| spam \x1b[31meggs\x1b[0m | & eggs |", "+-----------+-----------+", "| 2 | foo |", "| | bar |", "+-----------+-----------+", ] ) result = tabulate(table, headers, tableFmt="pretty") self.assertEqual(expected, result) def test_prettyMultilineLinks(self): """Output: pretty with multiline cells with headers.""" table = [[2, "foo\nbar"]] headers = ( "more\nspam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\", "more spam\n& eggs", ) expected = "\n".join( [ "+-----------+-----------+", "| more | more spam |", "| spam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\ | & eggs |", "+-----------+-----------+", "| 2 | foo |", "| | bar |", "+-----------+-----------+", ] ) result = tabulate(table, headers, tableFmt="pretty") self.assertEqual(expected, result) def test_prettyMultilineEmptyCells(self): """Output: pretty with multiline cells and empty cells with headers.""" table = [ ["hdr", "data", "fold"], ["1", "", ""], ["2", "very long data", "fold\nthis"], ] expected = "\n".join( [ "+-----+----------------+------+", "| hdr | data | fold |", "+-----+----------------+------+", "| 1 | | |", "| 2 | very long data | fold |", "| | | this |", "+-----+----------------+------+", ] ) result = tabulate(table, headers="firstrow", tableFmt="pretty") self.assertEqual(expected, result) def test_prettyMultilineEmptyCellsNoHeader(self): """Output: pretty with multiline cells and empty cells without headers.""" table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]] expected = "\n".join( [ "+---+----------------+------+", "| 0 | | |", "| 1 | | |", "| 2 | very long data | fold |", "| | | this |", "+---+----------------+------+", ] ) result = tabulate(table, tableFmt="pretty") self.assertEqual(expected, result) def test_rst(self): """Output: rst with headers.""" expected = "\n".join( [ "========= =========", "strings numbers", "========= =========", "spam 41.9999", "eggs 451", "========= =========", ] ) result = tabulate(self.testTable, self.testTableHeaders, tableFmt="rst") self.assertEqual(expected, result) def test_rstEmptyValuesInFirstColumn(self): """Output: rst with dots in first column.""" test_headers = ["", "what"] test_data = [("", "spam"), ("", "eggs")] expected = "\n".join( [ "==== ======", ".. what", "==== ======", ".. spam", ".. eggs", "==== ======", ] ) result = tabulate(test_data, test_headers, tableFmt="rst") self.assertEqual(expected, result) def test_rstNoHeader(self): """Output: rst without headers.""" expected = "\n".join(["==== ========", "spam 41.9999", "eggs 451", "==== ========"]) result = tabulate(self.testTable, tableFmt="rst") self.assertEqual(expected, result) def test_rstMultiline(self): """Output: rst with multiline cells with headers.""" table = [[2, "foo\nbar"]] headers = ("more\nspam \x1b[31meggs\x1b[0m", "more spam\n& eggs") expected = "\n".join( [ "=========== ===========", " more more spam", " spam \x1b[31meggs\x1b[0m & eggs", "=========== ===========", " 2 foo", " bar", "=========== ===========", ] ) result = tabulate(table, headers, tableFmt="rst") self.assertEqual(expected, result) def test_rstMultilineLinks(self): """Output: rst with multiline cells with headers.""" table = [[2, "foo\nbar"]] headers = ( "more\nspam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\", "more spam\n& eggs", ) expected = "\n".join( [ "=========== ===========", " more more spam", " spam \x1b]8;;target\x1b\\eggs\x1b]8;;\x1b\\ & eggs", "=========== ===========", " 2 foo", " bar", "=========== ===========", ] ) result = tabulate(table, headers, tableFmt="rst") self.assertEqual(expected, result) def test_rstMultilineEmptyCells(self): """Output: rst with multiline cells and empty cells with headers.""" table = [ ["hdr", "data", "fold"], ["1", "", ""], ["2", "very long data", "fold\nthis"], ] expected = "\n".join( [ "===== ============== ======", " hdr data fold", "===== ============== ======", " 1", " 2 very long data fold", " this", "===== ============== ======", ] ) result = tabulate(table, headers="firstrow", tableFmt="rst") self.assertEqual(expected, result) def test_rstMultilineEmptyCellsNoHeader(self): """Output: rst with multiline cells and empty cells without headers.""" table = [["0", "", ""], ["1", "", ""], ["2", "very long data", "fold\nthis"]] expected = "\n".join( [ "= ============== ====", "0", "1", "2 very long data fold", " this", "= ============== ====", ] ) result = tabulate(table, tableFmt="rst") self.assertEqual(expected, result) def test_noData(self): """Output: table with no data.""" expected = "\n".join(["strings numbers", "--------- ---------"]) result = tabulate(None, self.testTableHeaders, tableFmt="simple") self.assertEqual(expected, result) def test_emptyData(self): """Output: table with empty data.""" expected = "\n".join(["strings numbers", "--------- ---------"]) result = tabulate([], self.testTableHeaders, tableFmt="simple") self.assertEqual(expected, result) def test_noDataNoHeader(self): """Output: table with no data and no headers.""" expected = "" result = tabulate(None, tableFmt="simple") self.assertEqual(expected, result) def test_emptyDataNoHeaders(self): """Output: table with empty data and no headers.""" expected = "" result = tabulate([], tableFmt="simple") self.assertEqual(expected, result) def test_intFmt(self): """Output: integer format.""" result = tabulate([[10000], [10]], intFmt=",", tableFmt="plain") expected = "10,000\n 10" self.assertEqual(expected, result) def test_emptyDataHeader(self): """Output: table with empty data and headers as firstrow.""" expected = "" result = tabulate([], headers="firstrow") self.assertEqual(expected, result) def test_floatFmt(self): """Output: floating point format.""" result = tabulate([["1.23456789"], [1.0]], floatFmt=".3f", tableFmt="plain") expected = "1.235\n1.000" self.assertEqual(expected, result) def test_floatFmtMulti(self): """Output: floating point format different for each column.""" result = tabulate([[0.12345, 0.12345, 0.12345]], floatFmt=(".1f", ".3f"), tableFmt="plain") expected = "0.1 0.123 0.12345" self.assertEqual(expected, result) def test_colAlignMulti(self): """Output: string columns with custom colAlign.""" result = tabulate([["one", "two"], ["three", "four"]], colAlign=("right",), tableFmt="plain") expected = " one two\nthree four" self.assertEqual(expected, result) def test_colAlignMultiSepLine(self): """Output: string columns with custom colAlign.""" result = tabulate( [["one", "two"], SEPARATING_LINE, ["three", "four"]], colAlign=("right",), tableFmt="plain", ) expected = " one two\n\nthree four" self.assertEqual(expected, result) def test_columnGlobalAndSpecificAlignment(self): """Test `colGlobalAlign` and `"global"` parameter for `colAlign`.""" table = [[1, 2, 3, 4], [111, 222, 333, 444]] colGlobalAlign = "center" colAlign = ("global", "left", "right") result = tabulate(table, colGlobalAlign=colGlobalAlign, colAlign=colAlign) expected = "\n".join( [ "--- --- --- ---", " 1 2 3 4", "111 222 333 444", "--- --- --- ---", ] ) self.assertEqual(expected, result) def test_headersGlobalAndSpecificAlignment(self): """Test `headersGlobalAlign` and `headersAlign`.""" table = [[1, 2, 3, 4, 5, 6], [111, 222, 333, 444, 555, 666]] colGlobalAlign = "center" colAlign = ("left",) headers = ["h", "e", "a", "d", "e", "r"] headersGlobalAlign = "right" headersAlign = ("same", "same", "left", "global", "center") result = tabulate( table, headers=headers, colGlobalAlign=colGlobalAlign, colAlign=colAlign, headersGlobalAlign=headersGlobalAlign, headersAlign=headersAlign, ) expected = "\n".join( [ "h e a d e r", "--- --- --- --- --- ---", "1 2 3 4 5 6", "111 222 333 444 555 666", ] ) self.assertEqual(expected, result) def test_colAlignOrheadersAlignTooLong(self): """Test `colAlign` and `headersAlign` too long.""" table = [[1, 2], [111, 222]] colAlign = ("global", "left", "center") headers = ["h"] headersAlign = ("center", "right", "same") result = tabulate(table, headers=headers, colAlign=colAlign, headersAlign=headersAlign) expected = "\n".join([" h", "--- ---", " 1 2", "111 222"]) self.assertEqual(expected, result) def test_floatConversions(self): """Output: float format parsed.""" test_headers = [ "str", "bad_float", "just_float", "with_inf", "with_nan", "neg_inf", ] testTable = [ ["spam", 41.9999, "123.345", "12.2", "nan", "0.123123"], ["eggs", "451.0", 66.2222, "inf", 123.1234, "-inf"], ["asd", "437e6548", 1.234e2, float("inf"), float("nan"), 0.22e23], ] result = tabulate(testTable, test_headers, tableFmt="grid") expected = "\n".join( [ "+-------+-------------+--------------+------------+------------+-------------+", "| str | bad_float | just_float | with_inf | with_nan | neg_inf |", "+=======+=============+==============+============+============+=============+", "| spam | 41.9999 | 123.345 | 12.2 | nan | 0.123123 |", "+-------+-------------+--------------+------------+------------+-------------+", "| eggs | 451.0 | 66.2222 | inf | 123.123 | -inf |", "+-------+-------------+--------------+------------+------------+-------------+", "| asd | 437e6548 | 123.4 | inf | nan | 2.2e+22 |", "+-------+-------------+--------------+------------+------------+-------------+", ] ) self.assertEqual(expected, result) def test_missingVal(self): """Output: substitution of missing values.""" result = tabulate([["Alice", 10], ["Bob", None]], missingVal="n/a", tableFmt="plain") expected = "Alice 10\nBob n/a" self.assertEqual(expected, result) def test_missingValMulti(self): """Output: substitution of missing values with different values per column.""" result = tabulate( [["Alice", "Bob", "Charlie"], [None, None, None]], missingVal=("n/a", "?"), tableFmt="plain", ) expected = "Alice Bob Charlie\nn/a ?" self.assertEqual(expected, result) def test_columnAlignment(self): """Output: custom alignment for text and numbers.""" expected = "\n".join(["----- ---", "Alice 1", " Bob 333", "----- ---"]) result = tabulate([["Alice", 1], ["Bob", 333]], strAlign="right", numAlign="center") self.assertEqual(expected, result) def test_dictLikeIndex(self): """Output: a table with a running index.""" dd = {"b": range(101, 104)} expected = "\n".join([" b", "-- ---", " 0 101", " 1 102", " 2 103"]) result = tabulate(dd, "keys", showIndex=True) self.assertEqual(expected, result) def test_listOfListsIndex(self): """Output: a table with a running index.""" dd = zip(*[range(3), range(101, 104)]) # keys' order (hence columns' order) is not deterministic in Python 3 # => we have to consider both possible results as valid expected = "\n".join( [ " a b", "-- --- ---", " 0 0 101", " 1 1 102", " 2 2 103", ] ) result = tabulate(dd, headers=["a", "b"], showIndex=True) self.assertEqual(expected, result) def test_listOfListsIndexSepLine(self): """Output: a table with a running index.""" dd = [(0, 101), SEPARATING_LINE, (1, 102), (2, 103)] # keys' order (hence columns' order) is not deterministic in Python 3 # => we have to consider both possible results as valid expected = "\n".join( [ " a b", "-- --- ---", " 0 0 101", "-- --- ---", " 1 1 102", " 2 2 103", ] ) result = tabulate(dd, headers=["a", "b"], showIndex=True) self.assertEqual(expected, result) def test_listOfListsSuppliedIndex(self): """Output: a table with a supplied index.""" dd = zip(*[list(range(3)), list(range(101, 104))]) expected = "\n".join( [ " a b", "-- --- ---", " 1 0 101", " 2 1 102", " 3 2 103", ] ) result = tabulate(dd, headers=["a", "b"], showIndex=[1, 2, 3]) self.assertEqual(expected, result) # the index must be as long as the number of rows with self.assertRaises(ValueError): tabulate(dd, headers=["a", "b"], showIndex=[1, 2]) def test_listOfListsIndexFirstrow(self): """Output: a table with a running index and header='firstrow'.""" dd = zip(*[["a"] + list(range(3)), ["b"] + list(range(101, 104))]) expected = "\n".join( [ " a b", "-- --- ---", " 0 0 101", " 1 1 102", " 2 2 103", ] ) result = tabulate(dd, headers="firstrow", showIndex=True) self.assertEqual(expected, result) # the index must be as long as the number of rows with self.assertRaises(ValueError): tabulate(dd, headers="firstrow", showIndex=[1, 2]) def test_disableNumParseDefault(self): """Output: Default table output with number parsing and alignment.""" expected = "\n".join( [ "strings numbers", "--------- ---------", "spam 41.9999", "eggs 451", ] ) result = tabulate(self.testTable, self.testTableHeaders) self.assertEqual(expected, result) result = tabulate(self.testTable, self.testTableHeaders, disableNumParse=False) self.assertEqual(expected, result) def test_disableNumParseTrue(self): """Output: Default table output, but without number parsing and alignment.""" expected = "\n".join( [ "strings numbers", "--------- ---------", "spam 41.9999", "eggs 451.0", ] ) result = tabulate(self.testTable, self.testTableHeaders, disableNumParse=True) self.assertEqual(expected, result) def test_disableNumParseList(self): """Output: Default table output, but with number parsing selectively disabled.""" tableHeaders = ["h1", "h2", "h3"] testTable = [["foo", "bar", "42992e1"]] expected = "\n".join(["h1 h2 h3", "---- ---- -------", "foo bar 42992e1"]) result = tabulate(testTable, tableHeaders, disableNumParse=[2]) self.assertEqual(expected, result) expected = "\n".join(["h1 h2 h3", "---- ---- ------", "foo bar 429920"]) result = tabulate(testTable, tableHeaders, disableNumParse=[0, 1]) self.assertEqual(expected, result) ================================================ FILE: armi/utils/tests/test_textProcessors.py ================================================ # Copyright 2020 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for functions in textProcessors.py.""" import logging import os import pathlib import unittest from io import StringIO import ruamel from armi import runLog from armi.testing import TESTING_ROOT from armi.tests import mockRunLogs from armi.utils import textProcessors from armi.utils.directoryChangers import TemporaryDirectoryChanger THIS_DIR = os.path.dirname(__file__) RES_DIR = os.path.join(THIS_DIR, "resources") class TestTextProcessor(unittest.TestCase): """Test Text processor.""" def setUp(self): godivaSettings = os.path.join(TESTING_ROOT, "reactors", "godiva", "godiva.armi.unittest.yaml") self.tp = textProcessors.TextProcessor(godivaSettings) def test_fsearch(self): """Test fsearch in re mode.""" line = self.tp.fsearch("nTasks") self.assertIn("36", line) self.assertEqual(self.tp.fsearch("nTasks"), "") def test_fsearchText(self): """Test fsearch in text mode.""" line = self.tp.fsearch("nTasks", textFlag=True) self.assertIn("36", line) self.assertEqual(self.tp.fsearch("nTasks"), "") class YamlIncludeTest(unittest.TestCase): def test_resolveIncludes(self): with open(os.path.join(RES_DIR, "root.yaml")) as f: resolved = textProcessors.resolveMarkupInclusions(f, root=pathlib.Path(RES_DIR)) # Make sure that there aren't any !include tags left in the converted stream anyIncludes = False for l in resolved: if "!include" in l: anyIncludes = True self.assertFalse(anyIncludes) # Re-parse the resolved stream, make sure that we included the stuff that we want resolved.seek(0) data = ruamel.yaml.YAML().load(resolved) self.assertEqual(data["billy"]["children"][1]["full_name"], "Jennifer Person") self.assertEqual(data["billy"]["children"][1]["children"][0]["full_name"], "Elizabeth Person") # Check that we preserved other round-trip data resolved.seek(0) commentFound = False anchorFound = False for l in resolved: if l.strip() == "# some comment in includeA": commentFound = True if "*bobby" in l: anchorFound = True self.assertTrue(commentFound) self.assertTrue(anchorFound) def test_resolveIncludes_StringIO(self): """Tests that resolveMarkupInclusions handles StringIO input.""" yaml = ruamel.yaml.YAML() with open(os.path.join(RES_DIR, "root.yaml")) as f: loadedYaml = yaml.load(f) stringIO = StringIO() yaml.dump(loadedYaml, stringIO) resolved = textProcessors.resolveMarkupInclusions(src=stringIO, root=pathlib.Path(RES_DIR)) with open(os.path.join(RES_DIR, "root.yaml")) as f: expected = textProcessors.resolveMarkupInclusions(f, root=pathlib.Path(RES_DIR)) # strip it because one method gives an extra newline we don't care about self.assertEqual(resolved.getvalue().strip(), expected.getvalue().strip()) def test_findIncludes(self): includes = textProcessors.findYamlInclusions(pathlib.Path(RES_DIR) / "root.yaml") for i, _mark in includes: self.assertTrue((RES_DIR / i).exists()) self.assertEqual(len(includes), 2) class SequentialReaderTests(unittest.TestCase): textStream = """This is an example test stream. This has multiple lines in it and below it contains a set of data that can be found using a regular expression pattern. FILE DATA X Y 3.5 X Y 4.2 X Y 0.0""" _DUMMY_FILE_NAME = "DUMMY.txt" def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() with open(self._DUMMY_FILE_NAME, "w") as f: f.write(self.textStream) def tearDown(self): if os.path.exists(self._DUMMY_FILE_NAME): try: os.remove(self._DUMMY_FILE_NAME) except OSError: pass self.td.__exit__(None, None, None) def test_readFile(self): with textProcessors.SequentialReader(self._DUMMY_FILE_NAME) as sr: self.assertTrue(sr.searchForText("FILE DATA")) self.assertFalse(sr.searchForText("This text isn't here.")) def test_readFileWithPattern(self): with textProcessors.SequentialReader(self._DUMMY_FILE_NAME) as sr: self.assertTrue(sr.searchForPattern(r"(X\s+Y\s+\d+\.\d+)")) self.assertEqual(float(sr.line.split()[2]), 3.5) def test_issueWarningOnFindingText(self): with textProcessors.SequentialReader(self._DUMMY_FILE_NAME) as sr: warningMsg = "Oh no" sr.issueWarningOnFindingText("example test stream", warningMsg) with mockRunLogs.BufferLog() as mock: runLog.LOG.startLog("test_issueWarningOnFindingText") runLog.LOG.setVerbosity(logging.WARNING) self.assertEqual("", mock.getStdout()) self.assertTrue(sr.searchForPattern("example test stream")) self.assertIn(warningMsg, mock.getStdout()) self.assertFalse(sr.searchForPattern("Killer Tomatoes")) def test_raiseErrorOnFindingText(self): with textProcessors.SequentialReader(self._DUMMY_FILE_NAME) as sr: sr.raiseErrorOnFindingText("example test stream", IOError) with self.assertRaises(IOError): self.assertTrue(sr.searchForPattern("example test stream")) def test_consumeLine(self): with textProcessors.SequentialReader(self._DUMMY_FILE_NAME) as sr: sr.line = "hi" sr.match = 1 sr.consumeLine() self.assertEqual(len(sr.line), 0) self.assertIsNone(sr.match) ================================================ FILE: armi/utils/tests/test_triangle.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test the basic triangle math.""" import math import unittest from armi.utils import triangle class TestTriangle(unittest.TestCase): def test_getTriangleArea(self): """Test that getTriangleArea correctly calculates the area of a right triangle.""" x1 = 0.0 y1 = 0.0 x2 = 1.0 y2 = 0.0 x3 = 0.0 y3 = 1.0 refArea = 1.0 / 2.0 * (y3 - y1) * (x2 - x1) Area = triangle.getTriangleArea(x1, y1, x2, y2, x3, y3) self.assertAlmostEqual(refArea, Area, 6) def test_getTriangleCentroid(self): # Right triangle x, y = triangle.getTriangleCentroid(0, 0, 0, 1, 1, 0) self.assertAlmostEqual(x, 1 / 3, delta=1e-10) self.assertAlmostEqual(y, 1 / 3, delta=1e-10) # Right triangle, but all in the negative part of the coordinate plane x, y = triangle.getTriangleCentroid(-10, -10, -10, -9, -9, -10) self.assertAlmostEqual(x, -10 + 1 / 3, delta=1e-10) self.assertAlmostEqual(y, -10 + 1 / 3, delta=1e-10) # Isosceles triangle x, y = triangle.getTriangleCentroid(-2, 0, 2, 0, 0, 8) self.assertAlmostEqual(x, 0.0, delta=1e-10) self.assertAlmostEqual(y, 2 + 2 / 3, delta=1e-10) # Equilateral triangle x, y = triangle.getTriangleCentroid(0, 0, 2, 0, 1, math.sqrt(3)) self.assertAlmostEqual(x, 1.0, delta=1e-10) self.assertAlmostEqual(y, 1 / math.sqrt(3), delta=1e-10) def test_checkIfPointIsInTriangle(self): """Test that checkIfPointIsInTrinagle can correctly identify if a point is inside or outside of a triangle.""" # First check the right triangle case xT1 = 0.0 yT1 = 0.0 xT2 = 1.0 yT2 = 0.0 xT3 = 0.0 yT3 = 1.0 xP = 0.0 yP = 0.0 rightTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP) self.assertTrue(rightTriangleInOrOut) # now create a case that should evaluate False xP = 2.0 yP = 0.5 rightTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP) self.assertFalse(rightTriangleInOrOut) # Now check non right triangle xT1 = 26.0 yT1 = 10.0 xT2 = 100.0 yT2 = 0.0 xT3 = 0.0 yT3 = 100.0 xP = 50.0 yP = 50.0 generalTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP) self.assertTrue(generalTriangleInOrOut) # now check false case xP = 1.0 yP = 60.0 generalTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP) self.assertFalse(generalTriangleInOrOut) # Check a case that should cause failure since only two triangle can be drawn xP = 0.0 yP = 0.17 generalTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP) self.assertFalse(generalTriangleInOrOut) def test_checkIfPointIsInTriangle2(self): """Test that barycentricCheckIfPointIsInTriangle can identify if a point is inside or outside of a triangle.""" # First check the right triangle case xT1 = 0.0 yT1 = 0.0 xT2 = 1.0 yT2 = 0.0 xT3 = 0.0 yT3 = 1.0 xP = 0.5 yP = 0.5 rightTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP) self.assertTrue(rightTriangleInOrOut) # Check a case that should cause failure for checkIfPointIsInTriangle since only two triangle can be drawn x1 = 0.15 x2 = 0.0 x3 = 0.0 y1 = 0.17 y2 = 0.054 y3 = 0.376 xP = 0.0 yP = 0.17 generalTriangleInOrOut = triangle.checkIfPointIsInTriangle(x1, y1, x2, y2, x3, y3, xP, yP) self.assertTrue(generalTriangleInOrOut) # now create a case that should evaluate False xP = 2.0 yP = 0.5 rightTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP) self.assertFalse(rightTriangleInOrOut) # Now check non right triangle xT1 = 26.0 yT1 = 10.0 xT2 = 100.0 yT2 = 0.0 xT3 = 0.0 yT3 = 100.0 xP = 50.0 yP = 50.0 generalTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP) self.assertTrue(generalTriangleInOrOut) # now check false case xP = 1.0 yP = 60.0 generalTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP) self.assertFalse(generalTriangleInOrOut) ================================================ FILE: armi/utils/tests/test_units.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test armi.utils.units.py.""" import unittest from armi.utils import units class TestUnits(unittest.TestCase): def test_getTc(self): self.assertAlmostEqual(units.getTc(Tc=200), 200.0) self.assertAlmostEqual(units.getTc(Tk=300), 26.85) ## error if no argument provided with self.assertRaisesRegex(ValueError, "Tc=None and Tk=None"): units.getTc() ## error if two arguments provided even if those arguments are "falsy" with self.assertRaisesRegex(ValueError, "Tc=0 and Tk=0"): units.getTc(Tc=0, Tk=0) with self.assertRaisesRegex(ValueError, "Tc=0 and Tk=200"): units.getTc(Tc=0, Tk=200) def test_getTk(self): self.assertAlmostEqual(units.getTk(Tc=200), 473.15) self.assertAlmostEqual(units.getTk(Tk=300), 300.00) ## error if no argument provided with self.assertRaisesRegex(ValueError, "Tc=None and Tk=None"): units.getTk() ## error if two arguments provided even if those arguments are "falsy" with self.assertRaisesRegex(ValueError, "Tc=0 and Tk=0"): units.getTk(Tc=0, Tk=0) with self.assertRaisesRegex(ValueError, "Tc=0 and Tk=200"): units.getTk(Tc=0, Tk=200) def test_getTf(self): # 0 C = 32 F self.assertAlmostEqual(units.getTf(Tc=0), 32.0) self.assertAlmostEqual(units.getTf(Tk=273.15), 32.0) # 100 C = 212 F self.assertAlmostEqual(units.getTf(Tc=100), 212.0) self.assertAlmostEqual(units.getTf(Tk=373.15), 212.0) # -40 C = -40 F self.assertAlmostEqual(units.getTf(Tc=-40), -40) ## error if no argument provided with self.assertRaisesRegex(ValueError, "Tc=None and Tk=None"): units.getTf() ## error if two arguments provided even if those arguments are "falsy" with self.assertRaisesRegex(ValueError, "Tc=0 and Tk=0"): units.getTf(Tc=0, Tk=0) with self.assertRaisesRegex(ValueError, "Tc=0 and Tk=200"): units.getTf(Tc=0, Tk=200) def test_pressure_converter(self): """Converter Pascals to Pascals should just be a pass-through.""" for val in [0.0, -99.141, 123, 3.14159, -2.51212e-12]: self.assertEqual(val, units.PRESSURE_CONVERTERS["Pa"](val)) def test_getTmev(self): val = units.getTmev(Tc=45.0) self.assertAlmostEqual(val, 2.74160430306e-08) val = units.getTmev(Tc=145.0) self.assertAlmostEqual(val, 3.60333754306e-08) val = units.getTmev(Tk=445.0) self.assertAlmostEqual(val, 3.8347129180000004e-08) def test_getTemperature(self): val = units.getTemperature(Tc=42, tempUnits="Tc") self.assertEqual(val, 42) val = units.getTemperature(Tk=42, tempUnits="Tk") self.assertEqual(val, 42) val = units.getTemperature(Tc=42, tempUnits="Tk") self.assertAlmostEqual(val, 315.15) val = units.getTemperature(Tk=42, tempUnits="Tc") self.assertAlmostEqual(val, -231.15) with self.assertRaises(ValueError): units.getTemperature(Tc=42) def test_convertXtoPascal(self): val = units.convertMmhgToPascal(11.1) self.assertAlmostEqual(val, 1479.8782894736883) val = units.convertBarToPascal(2.2) self.assertAlmostEqual(val, 220000) val = units.convertAtmToPascal(3.1) self.assertAlmostEqual(val, 314107.5) def test_sanitizeAngle(self): val = units.sanitizeAngle(0) self.assertEqual(val, 0) val = units.sanitizeAngle(1.01) self.assertEqual(val, 1.01) val = units.sanitizeAngle(-6) self.assertAlmostEqual(val, 0.28318530717958623) val = units.sanitizeAngle(9) self.assertAlmostEqual(val, 2.7168146928204138) def test_getXYLineParameters(self): a, b, c, d = units.getXYLineParameters(0) self.assertEqual(a, 0.0) self.assertEqual(b, 1.0) self.assertEqual(c, 0.0) self.assertEqual(d, 0.0) a, b, c, d = units.getXYLineParameters(1, 0.1, 0.2) self.assertEqual(a, 1) self.assertEqual(b, 0) self.assertEqual(c, 0) self.assertEqual(d, 0.1) ================================================ FILE: armi/utils/tests/test_utils.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing some utility functions.""" import os import unittest from collections import defaultdict import numpy as np from armi import utils from armi.settings.caseSettings import Settings from armi.testing import loadTestReactor from armi.tests import mockRunLogs from armi.utils import ( codeTiming, directoryChangers, getAvailabilityFactors, getBurnSteps, getCumulativeNodeNum, getCycleLengths, getCycleNames, getCycleNodeFromCumulativeNode, getCycleNodeFromCumulativeStep, getFileSHA1Hash, getMaxBurnSteps, getNodesPerCycle, getPowerFractions, getPreviousTimeNode, getStepLengths, hasBurnup, safeCopy, safeMove, ) class TestGeneralUtils(unittest.TestCase): def test_getFileSHA1Hash(self): with directoryChangers.TemporaryDirectoryChanger(): path = "test.txt" with open(path, "w") as f1: f1.write("test") sha = getFileSHA1Hash(path) self.assertIn("a94a8", sha) def test_getFileSHA1HashDir(self): with directoryChangers.TemporaryDirectoryChanger(): pathDir = "testDir" path1 = os.path.join(pathDir, "test1.txt") path2 = os.path.join(pathDir, "test2.txt") os.mkdir(pathDir) for i, path in enumerate([path1, path2]): with open(path, "w") as f1: f1.write(f"test{i}") sha = getFileSHA1Hash(pathDir) self.assertIn("ccd13", sha) def test_mergeableDictionary(self): mergeableDict = utils.MergeableDict() normalDict = {"luna": "thehusky", "isbegging": "fortreats", "right": "now"} mergeableDict.merge({"luna": "thehusky"}, {"isbegging": "fortreats"}, {"right": "now"}) self.assertEqual(mergeableDict, normalDict) def test_createFormattedStrWithDelimiter(self): # Test with a random list of strings dataList = ["hello", "world", "1", "2", "3", "4", "5"] maxNumberOfValuesBeforeDelimiter = 3 delimiter = "\n" outputStr = utils.createFormattedStrWithDelimiter( dataList=dataList, maxNumberOfValuesBeforeDelimiter=maxNumberOfValuesBeforeDelimiter, delimiter=delimiter, ) self.assertEqual(outputStr, "hello, world, 1,\n2, 3,\n4, 5\n") outputStr = utils.createFormattedStrWithDelimiter( dataList=dataList, maxNumberOfValuesBeforeDelimiter=0, delimiter=delimiter, ) self.assertEqual(outputStr, "hello, world, 1, 2, 3, 4, 5\n") # test with an empty list dataList = [] outputStr = utils.createFormattedStrWithDelimiter( dataList=dataList, maxNumberOfValuesBeforeDelimiter=maxNumberOfValuesBeforeDelimiter, delimiter=delimiter, ) self.assertEqual(outputStr, "") def test_capStrLen(self): # Test with strings str1 = utils.capStrLen("sodium", 5) self.assertEqual("so...", str1) str1 = utils.capStrLen("potassium", 6) self.assertEqual("pot...", str1) str1 = utils.capStrLen("rubidium", 7) self.assertEqual("rubi...", str1) with self.assertRaises(Exception): str1 = utils.capStrLen("sodium", 2) def test_list2str(self): # Test with list of strings list1 = ["One", "Two"] list2 = ["Three", "Four"] str1 = "OneTwo" str2 = utils.list2str(list1, 4, None, None) self.assertEqual(str1, str2) str1 = "One Two " str2 = utils.list2str(list1, None, None, 5) self.assertEqual(str1, str2) str1 = "OneTwoThreeFour" str2 = utils.list2str(list2, None, list1, None) self.assertEqual(str1, str2) str1 = "OneTwoThreeFourT...Four" str2 = utils.list2str(list2, 4, list1, None) self.assertEqual(str1, str2) str1 = "OneTwoThreeFourT...FourThreeFour " str2 = utils.list2str(list2, None, list1, 5) self.assertEqual(str1, str2) str1 = "OneTwoThreeFourT...FourThreeFour T... Four " str2 = utils.list2str(list2, 4, list1, 5) self.assertEqual(str1, str2) def test_slantSplit(self): x1 = utils.slantSplit(10.0, 4.0, 4) x2 = utils.slantSplit(10.0, 4.0, 4, order="high first") self.assertListEqual(x1, [1.0, 2.0, 3.0, 4.0]) self.assertListEqual(x2, [4.0, 3.0, 2.0, 1.0]) def test_prependToList(self): a = ["hello", "world"] b = [1, 2, 3] utils.prependToList(a, b) self.assertListEqual(a, [1, 2, 3, "hello", "world"]) def test_plotMatrix(self): matrix = np.zeros([2, 2], dtype=float) matrix[0, 0] = 1 matrix[0, 1] = 2 matrix[1, 0] = 3 matrix[1, 1] = 4 xtick = ([0, 1], ["1", "2"]) ytick = ([0, 1], ["1", "2"]) fname = "test_plotMatrix_testfile" with directoryChangers.TemporaryDirectoryChanger(): utils.plotMatrix(matrix, fname, show=False, title="plot") utils.plotMatrix(matrix, fname, show=False, minV=0, maxV=5, figsize=[3, 4]) utils.plotMatrix(matrix, fname, show=False, xticks=xtick, yticks=ytick) def test_classesInHierarchy(self): """Tests the classesInHierarchy utility.""" # load the test reactor _o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml") # call the `classesInHierarchy` function classCounts = defaultdict(lambda: 0) utils.classesInHierarchy(r, classCounts, None) # validate the `classesInHierarchy` function self.assertGreater(len(classCounts), 30) self.assertEqual(classCounts[type(r)], 1) self.assertEqual(classCounts[type(r.core)], 1) # further validate the Reactor hierarchy is in place self.assertEqual(len(r.core.getAssemblies()), 1) self.assertEqual(len(r.core.getBlocks()), 1) def test_codeTiming(self): """Test that codeTiming preserves function attributes when it wraps a function.""" @codeTiming.timed def testFunc(): """Test function docstring.""" pass self.assertEqual(getattr(testFunc, "__doc__"), "Test function docstring.") self.assertEqual(getattr(testFunc, "__name__"), "testFunc") def test_safeCopy(self): with directoryChangers.TemporaryDirectoryChanger(): os.mkdir("dir1") os.mkdir("dir2") file1 = "dir1/file1.txt" with open(file1, "w") as f: f.write("Hello") file2 = "dir1\\file2.txt" with open(file2, "w") as f: f.write("Hello2") with mockRunLogs.BufferLog() as mock: # Test Linuxy file path self.assertEqual("", mock.getStdout()) safeCopy(file1, "dir2") self.assertIn("Copied", mock.getStdout()) self.assertIn("file1", mock.getStdout()) self.assertIn("->", mock.getStdout()) # Clean up for next safeCopy mock.emptyStdout() # Test Windowsy file path self.assertEqual("", mock.getStdout()) safeCopy(file2, "dir2") self.assertIn("Copied", mock.getStdout()) self.assertIn("file2", mock.getStdout()) self.assertIn("->", mock.getStdout()) self.assertTrue(os.path.exists(os.path.join("dir2", "file1.txt"))) def test_safeMove(self): with directoryChangers.TemporaryDirectoryChanger(): os.mkdir("dir1") os.mkdir("dir2") file1 = "dir1/file1.txt" with open(file1, "w") as f: f.write("Hello") file2 = "dir1\\file2.txt" with open(file2, "w") as f: f.write("Hello2") with mockRunLogs.BufferLog() as mock: # Test Linuxy file path self.assertEqual("", mock.getStdout()) safeMove(file1, "dir2") self.assertIn("Moved", mock.getStdout()) self.assertIn("file1", mock.getStdout()) self.assertIn("->", mock.getStdout()) # Clean up for next safeCopy mock.emptyStdout() # Test Windowsy file path self.assertEqual("", mock.getStdout()) safeMove(file2, "dir2") self.assertIn("Moved", mock.getStdout()) self.assertIn("file2", mock.getStdout()) self.assertIn("->", mock.getStdout()) self.assertTrue(os.path.exists(os.path.join("dir2", "file1.txt"))) def test_safeMoveDir(self): with directoryChangers.TemporaryDirectoryChanger(): os.mkdir("dir1") file1 = "dir1/file1.txt" with open(file1, "w") as f: f.write("Hello") file2 = "dir1\\file2.txt" with open(file2, "w") as f: f.write("Hello2") with mockRunLogs.BufferLog() as mock: self.assertEqual("", mock.getStdout()) safeMove("dir1", "dir2") self.assertIn("Moved", mock.getStdout()) self.assertIn("dir1", mock.getStdout()) self.assertIn("dir2", mock.getStdout()) self.assertTrue(os.path.exists(os.path.join("dir2", "file1.txt"))) class CyclesSettingsTests(unittest.TestCase): """ Check reading of the various cycle history settings for both the detailed and simple input options. """ detailedCyclesSettings = """ metadata: version: uncontrolled settings: power: 1000000000.0 nCycles: 3 cycles: - name: dog cumulative days: [1, 2, 3] power fractions: [0.1, 0.2, 0.3] availability factor: 0.1 - cycle length: 10 burn steps: 5 power fractions: [0.2, 0.2, 0.2, 0.2, 0] availability factor: 0.5 - name: ferret step days: [3, R4] power fractions: [0.3, R4] runType: Standard """ simpleCyclesSettings = """ metadata: version: uncontrolled settings: power: 1000000000.0 nCycles: 3 availabilityFactors: [0.1, R2] cycleLengths: [1, 2, 3] powerFractions: [0.1, 0.2, R1] burnSteps: 3 runType: Standard """ powerFractionsDetailedSolution = [ [0.1, 0.2, 0.3], [0.2, 0.2, 0.2, 0.2, 0], [0.3, 0.3, 0.3, 0.3, 0.3], ] powerFractionsSimpleSolution = [[0.1, 0.1, 0.1], [0.2, 0.2, 0.2], [0.2, 0.2, 0.2]] cycleNamesDetailedSolution = ["dog", None, "ferret"] cycleNamesSimpleSolution = [None, None, None] availabilityFactorsDetailedSolution = [0.1, 0.5, 1] availabilityFactorsSimpleSolution = [0.1, 0.1, 0.1] stepLengthsDetailedSolution = [ [1, 1, 1], [10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5], [3, 3, 3, 3, 3], ] stepLengthsSimpleSolution = [ [1 * 0.1 / 3, 1 * 0.1 / 3, 1 * 0.1 / 3], [2 * 0.1 / 3, 2 * 0.1 / 3, 2 * 0.1 / 3], [3 * 0.1 / 3, 3 * 0.1 / 3, 3 * 0.1 / 3], ] cycleLengthsDetailedSolution = [30, 10, 15] cycleLengthsSimpleSolution = [1, 2, 3] burnStepsDetailedSolution = [3, 5, 5] burnStepsSimpleSolution = [3, 3, 3] nodesPerCycleDetailedSolution = [4, 6, 6] nodesPerCycleSimpleSolution = [4, 4, 4] maxBurnStepsDetailedSolution = 5 maxBurnStepsSimpleSolution = 3 def setUp(self): self.standaloneDetailedCS = Settings() self.standaloneDetailedCS.loadFromString(self.detailedCyclesSettings) self.standaloneSimpleCS = Settings() self.standaloneSimpleCS.loadFromString(self.simpleCyclesSettings) def test_getPowerFractions(self): self.assertEqual( getPowerFractions(self.standaloneDetailedCS), self.powerFractionsDetailedSolution, ) self.assertEqual( getPowerFractions(self.standaloneSimpleCS), self.powerFractionsSimpleSolution, ) def test_getCycleNames(self): self.assertEqual(getCycleNames(self.standaloneDetailedCS), self.cycleNamesDetailedSolution) self.assertEqual(getCycleNames(self.standaloneSimpleCS), self.cycleNamesSimpleSolution) def test_getAvailabilityFactors(self): self.assertEqual( getAvailabilityFactors(self.standaloneDetailedCS), self.availabilityFactorsDetailedSolution, ) self.assertEqual( getAvailabilityFactors(self.standaloneSimpleCS), self.availabilityFactorsSimpleSolution, ) def test_getStepLengths(self): self.assertEqual( getStepLengths(self.standaloneDetailedCS), self.stepLengthsDetailedSolution, ) self.assertEqual( getStepLengths(self.standaloneSimpleCS), self.stepLengthsSimpleSolution, ) def test_getCycleLengths(self): self.assertEqual( getCycleLengths(self.standaloneDetailedCS), self.cycleLengthsDetailedSolution, ) self.assertEqual(getCycleLengths(self.standaloneSimpleCS), self.cycleLengthsSimpleSolution) def test_getBurnSteps(self): self.assertEqual(getBurnSteps(self.standaloneDetailedCS), self.burnStepsDetailedSolution) self.assertEqual(getBurnSteps(self.standaloneSimpleCS), self.burnStepsSimpleSolution) def test_hasBurnup(self): self.assertTrue(hasBurnup(self.standaloneDetailedCS)) def test_getMaxBurnSteps(self): self.assertEqual( getMaxBurnSteps(self.standaloneDetailedCS), self.maxBurnStepsDetailedSolution, ) self.assertEqual(getMaxBurnSteps(self.standaloneSimpleCS), self.maxBurnStepsSimpleSolution) def test_getNodesPerCycle(self): self.assertEqual( getNodesPerCycle(self.standaloneDetailedCS), self.nodesPerCycleDetailedSolution, ) self.assertEqual(getNodesPerCycle(self.standaloneSimpleCS), self.nodesPerCycleSimpleSolution) def test_getCycleNodeFromCumulativeStep(self): self.assertEqual(getCycleNodeFromCumulativeStep(8, self.standaloneDetailedCS), (1, 4)) self.assertEqual(getCycleNodeFromCumulativeStep(12, self.standaloneDetailedCS), (2, 3)) self.assertEqual(getCycleNodeFromCumulativeStep(4, self.standaloneSimpleCS), (1, 0)) self.assertEqual(getCycleNodeFromCumulativeStep(8, self.standaloneSimpleCS), (2, 1)) def test_getCycleNodeFromCumulativeNode(self): self.assertEqual(getCycleNodeFromCumulativeNode(8, self.standaloneDetailedCS), (1, 4)) self.assertEqual(getCycleNodeFromCumulativeNode(12, self.standaloneDetailedCS), (2, 2)) self.assertEqual(getCycleNodeFromCumulativeNode(3, self.standaloneSimpleCS), (0, 3)) self.assertEqual(getCycleNodeFromCumulativeNode(8, self.standaloneSimpleCS), (2, 0)) with self.assertRaises(ValueError): getCycleNodeFromCumulativeNode(-1, self.standaloneSimpleCS) def test_getPreviousTimeNode(self): with self.assertRaises(ValueError): getPreviousTimeNode(0, 0, "foo") self.assertEqual(getPreviousTimeNode(1, 1, self.standaloneSimpleCS), (1, 0)) self.assertEqual(getPreviousTimeNode(1, 0, self.standaloneSimpleCS), (0, 3)) self.assertEqual(getPreviousTimeNode(1, 0, self.standaloneDetailedCS), (0, 3)) self.assertEqual(getPreviousTimeNode(2, 4, self.standaloneDetailedCS), (2, 3)) def test_getCumulativeNodeNum(self): self.assertEqual(getCumulativeNodeNum(2, 0, self.standaloneSimpleCS), 8) self.assertEqual(getCumulativeNodeNum(1, 2, self.standaloneSimpleCS), 6) self.assertEqual(getCumulativeNodeNum(2, 0, self.standaloneDetailedCS), 10) self.assertEqual(getCumulativeNodeNum(1, 0, self.standaloneDetailedCS), 4) ================================================ FILE: armi/utils/textProcessors.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility classes and functions for manipulating text files.""" import io import os import pathlib import re from typing import List, Optional, TextIO, Tuple, Union from armi import runLog _INCLUDE_CTOR = False _INCLUDE_RE = re.compile(r"^([^#]*\s+)?!include\s+(.*)\n?$") _INDENT_RE = re.compile(r"^[\s\-\?:]*([^\s\-\?:].*)?$") # String constants SCIENTIFIC_PATTERN = r"[+-]?\d*\.\d+[eEdD][+-]\d+" """ Matches: * code:` 1.23e10` * code:`-1.23Ee10` * code:`+1.23d10` * code:` .23D10` * code:` 1.23e-10` * code:` 1.23e+1` """ FLOATING_PATTERN = r"[+-]?\d+\.*\d*" """Matches 1, 100, 1.0, -1.2, +12.234""" DECIMAL_PATTERN = r"[+-]?\d*\.\d+" """Matches .1, 1.213423, -23.2342, +.023""" class FileMark: def __init__(self, fName, line, column, relativeTo): self.path = fName self.line = line self.column = column # if the path is relative, where is it relative to? We need this to be able to # normalize relative paths to a root file. self.relativeTo = relativeTo def __str__(self): return "{}, line {}, column {}".format(self.path, self.line, self.column) def _processIncludes( src: Union[TextIO, pathlib.Path], out, includes: List[Tuple[pathlib.Path, FileMark]], root: pathlib.Path, indentation=0, currentFile="<stream>", ): """ This is the workhorse of ``resolveMarkupInclusions`` and friends. Recursively inserts the contents of !included YAML files into the output stream, keeping track of indentation and a list of included files along the way. """ def _beginningOfContent(line: str) -> int: """ Return the position of the first "content" character. This follows the YAML spec at https://yaml.org/spec/current.html#id2519916 In short, it will return the position of the first character that is not whitespace or one of the special "block collection" markers ("-", "?", and ":") """ m = _INDENT_RE.match(line) if m and m.group(1) is not None: return m.start(1) else: return 0 indentSpace = " " * indentation if hasattr(src, "getvalue"): # assume stringIO lines = [ln + "\n" for ln in src.getvalue().split("\n")] else: # assume file stream or TextIOBase, and it has a readlines attr lines = src.readlines() for i, line in enumerate(lines): leadingSpace = indentSpace if i > 0 else "" m = _INCLUDE_RE.match(line) if m: # this line has an !include on it if m.group(1) is not None: out.write(leadingSpace + m.group(1)) fName = pathlib.Path(os.path.expandvars(m.group(2))) path = root / fName if not path.exists(): raise ValueError("The !included file, `{}` does not exist from {}!".format(fName, root)) includes.append((fName, FileMark(currentFile, i, m.start(2), root))) with open(path, "r") as includedFile: firstCharacterPos = _beginningOfContent(line) newIndent = indentation + firstCharacterPos _processIncludes( includedFile, out, includes, path.parent, indentation=newIndent, currentFile=path, ) else: out.write(leadingSpace + line) def resolveMarkupInclusions(src: Union[TextIO, pathlib.Path], root: Optional[pathlib.Path] = None) -> io.StringIO: r""" Process a text stream, appropriately handling ``!include`` tags. This will take the passed IO stream or file path, replacing any instances of ``!include [path]`` with the appropriate contents of the ``!include`` file. What is returned is a new text stream, containing the contents of all of the files stitched together. Parameters ---------- src : StringIO or TextIOBase/Path If a Path is provided, read text from there. If is stream is provided, consume text from the stream. If a stream is provided, ``root`` must also be provided. root : Optional Path The root directory to use for resolving relative paths in !include tags. If a stream is provided for ``src``, ``root`` must be provided. Otherwise, the directory containing the ``src`` path will be used by default. Notes ----- While the use of ``!include`` appears as though it would invoke some sort of special custom YAML constructor code, this does not do that. Processing these inclusions as part of the document parsing/composition that comes with ruamel.yaml could work, but has a number of prohibitive drawbacks (or at least reasons why it might not be worth doing). Using a custom constructor is more-or-less supported by ruamel.yaml (which we do use, as it is what underpins the yamlize package), but it carries limitations about how anchors and aliases can cross included-file boundaries. Getting around this requires either monkey-patching ruamel.yaml, or subclassing it, which in turn would require monkey-patching yamlize. Instead, we treat the ``!include``\ s as a sort of pre-processor directive, which essentially pastes the contents of the ``!include``\ d file into the location of the ``!include``. The result is a text stream containing the entire contents, with all ``!include``\ s resolved. The only degree of sophistication lies in how indentation is handled; since YAML cares about indentation to keep track of object hierarchy, care must be taken that the included file contents are indented appropriately. To precisely describe how the indentation works, it helps to have some definitions: - Included file: The file specified in the ``!include [Included file]`` - Including line: The line that actually contains the ``!include [Included file]`` - Meaningful YAML content: Text in a YAML file that is not either indentation or a special character like "-", ":" or "?". The contents of the included file will be indented such that that the first character of each line in the included file will be found at the first column in the including line that contains meaningful YAML content. The only exception is the first line of the included file, which starts at the location of the ``!include`` itself and is not deliberately indented. In the future, we may wish to do the more sophisticated processing of the ``!include``\ s as part of the YAML parse. For future reference, there is some pure gold on that topic here: https://stackoverflow.com/questions/44910886/pyyaml-include-file-and-yaml-aliases-anchors-references """ return _resolveMarkupInclusions(src, root)[0] def _getRootFromSrc(src: Union[TextIO, pathlib.Path], root: Optional[pathlib.Path]) -> pathlib.Path: if isinstance(src, pathlib.Path): root = root or src.parent.absolute() elif isinstance(src, io.TextIOBase): if root is None: raise ValueError("A stream was provided without a root directory.") else: raise TypeError("Unsupported source type: `{}`!".format(type(src))) return root def findYamlInclusions( src: Union[TextIO, pathlib.Path], root: Optional[pathlib.Path] = None ) -> List[Tuple[pathlib.Path, FileMark]]: """ Return a list containing all of the !included YAML files from a root file. This will attempt to "normalize" relative paths to the passed root. If that is not possible, then an absolute path will be used instead. For example, if a file (A) !includes another file (B) by an absolute path, which in turn !includes more files relative to (B), all of (B)'s relative includes will be turned into absolute paths from the perspective of the root file (A). """ includes = _resolveMarkupInclusions(src, root)[1] root = _getRootFromSrc(src, root) normalizedIncludes = [] for path, mark in includes: if not path.is_absolute(): try: path = (mark.relativeTo / path).relative_to(root or os.getcwd()) except ValueError: # Can't make a relative path. IMO, pathlib gives up a little too early, # but we still probably want to decay to absolute paths if the files # aren't in the same tree. path = (mark.relativeTo / path).absolute() normalizedIncludes.append((path, mark)) return normalizedIncludes def _resolveMarkupInclusions( src: Union[TextIO, pathlib.Path], root: Optional[pathlib.Path] = None ) -> Tuple[io.StringIO, List[Tuple[pathlib.Path, FileMark]]]: root = _getRootFromSrc(src, root) if isinstance(src, pathlib.Path): # this is inefficient, but avoids having to play with io buffers with open(src, "r") as rootFile: src = io.StringIO(rootFile.read()) out = io.StringIO() includes = [] _processIncludes(src, out, includes, root) out.seek(0) # be kind; rewind src.seek(0) return out, includes class SequentialReader: r""" Fast sequential reader that must be used within a with statement. Attributes ---------- line : str value of the current line match : re.match value of the current match Notes ----- This reader will sequentially search a file for a regular expression pattern or string depending on the method used. When the pattern/string is matched/found, the reader will stop, return :code:`True`, and set the attributes :code:`line` and :code:`match`. This pattern makes it easy to cycle through repetitive output in a very fast manner. For example, if you had a text file with consistent chunks of information that always started with the same text followed by information, you could do something like this: >>> with SequentialReader("somefile") as sr: ... data = [] ... while sr.searchForText("start of data chunk"): ... # this needs to repeat for as many chunks as there are. ... if sr.searchForPatternOnNextLine("some-(?P<data>\w+)-pattern"): ... data.append(sr.match["data"]) """ def __init__(self, filePath): self._filePath = filePath self._stream = None self.line = "" self.match = None self._textErrors = [] self._textWarnings = [] self._patternErrors = [] self.ignoreAllErrors = False def issueWarningOnFindingText(self, text, warning): """Add a text search for every line of the file, if the text is found the specified warning will be issued. This is important for determining if issues occurred while searching for text. Parameters ---------- text : str text to find within the file warning : str An warning message to issue. See Also -------- raiseErrorOnFindingText raiseErrorOnFindingPattern """ self._textWarnings.append((text, warning)) def raiseErrorOnFindingText(self, text, error): """Add a text search for every line of the file, if the text is found the specified error will be raised. This is important for determining if errors occurred while searching for text. Parameters ---------- text : str text to find within the file error : Exception An exception to raise. See Also -------- raiseErrorOnFindingPattern """ self._textErrors.append((text, error)) def raiseErrorOnFindingPattern(self, pattern, error): """Add a pattern search for every line of the file, if the pattern is found the specified error will be raised. This is important for determining if errors occurred while searching for text. Parameters ---------- pattern : str regular expression pattern error : Exception An exception to raise. See Also -------- raiseErrorOnFindingText """ self._patternErrors.append((re.compile(pattern), error)) def __repr__(self): return "<{} {} {}>".format( self.__class__.__name__, self._filePath, "open" if self._stream is not None else "closed", ) def __enter__(self): if not os.path.exists(self._filePath): raise OSError("Cannot open non-existing file {}".format(self._filePath)) self._stream = open(self._filePath, "r") return self def __exit__(self, exc_type, exc_value, traceback): # if checking for errors, we need to keep reading if exc_type is not None and not self.ignoreAllErrors and (self._patternErrors or self._textErrors): while self._readLine(): # all lines have '\n' terminators pass if self._stream is not None: try: self._stream.close() except Exception: # We really don't care if anything fails here, plus an exception in exit is ignored anyway pass self._stream = None def searchForText(self, text): """Search the file for the next occurrence of :code:`text`, and set the :code:`self.line` attribute to that line's value if it matched. Notes ----- This will search the file line by line until it finds the text. This sets the attribute :code:`self.line`. If the previous :code:`_searchFor*` method did not match, the last line it did not match will be searched first. Returns ------- matched : bool Boolean indicating whether or not the pattern matched """ self.match = None while True: if text in self.line: return True self.line = self._readLine() if self.line == "": break return False def searchForPattern(self, pattern): """Search the file for the next occurece of :code:`pattern` and set the :code:`self.line` attribute to that line's value if it matched. Notes ----- This will search the file line by line until it finds the pattern. This sets the attribute :code:`self.line`. If the previous :code:`_searchFor*` method did not match, the last line it did not match will be searched first. Returns ------- matched : bool Boolean indicating whether or not the pattern matched """ while True: self.match = re.search(pattern, self.line) if self.match is not None: return True self.line = self._readLine() if self.line == "": break return False def searchForPatternOnNextLine(self, pattern): """Search the next line for a given pattern, and set the :code:`self.line` attribute to that line's value if it matched. Notes ----- This sets the attribute :code:`self.line`. If the previous :code:`_searchFor*` method did not match, the last line it did not match will be searched first. Returns ------- matched : bool Boolean indicating whether or not the pattern matched """ self.match = re.search(pattern, self.line) if self.match is None: self.line = self._readLine() self.match = re.search(pattern, self.line) return self.match is not None def _readLine(self): line = self._stream.readline() if not self.ignoreAllErrors: for text, error in self._textErrors: if text in line: raise error for text, warning in self._textWarnings: if text in line: runLog.warning(warning) for regex, error in self._patternErrors: if regex.match(line): raise error return line def consumeLine(self): """Consumes the line. This is necessary when searching for the same pattern repetitively, because otherwise searchForPatternOnNextLine would not work. """ self.line = "" self.match = None class SequentialStringIOReader(SequentialReader): r""" Fast sequential reader that must be used within a with statement. Attributes ---------- line : str value of the current line match : re.match value of the current match Notes ----- This reader will sequentially search a file for a regular expression pattern or string depending on the method used. When the pattern/string is matched/found, the reader will stop, return :code:`True`, and set the attributes :code:`line` and :code:`match`. This pattern makes it easy to cycle through repetitive output in a very fast manner. For example, if you had a text file with consistent chunks of information that always started with the same text followed by information, you could do something like this: >>> with SequentialReader("somefile") as sr: ... data = [] ... while sr.searchForText("start of data chunk"): ... # this needs to repeat for as many chunks as there are. ... if sr.searchForPatternOnNextLine("some-(?P<data>\\w+)-pattern"): ... data.append(sr.match["data"]) """ def __init__(self, stringIO): SequentialReader.__init__(self, "StringIO") self._stream = stringIO def __enter__(self): """ Override to prevent trying to open/reopen a StringIO object. We don't need to override :code:`__exit__`, because it doesn't care if closing the object fails. """ return self class TextProcessor: """ A general text processing object that extends python's abilities to scan through huge files. Use this instead of a raw file object to read data out of output files, etc. """ scipat = SCIENTIFIC_PATTERN number = FLOATING_PATTERN decimal = DECIMAL_PATTERN def __init__(self, fname, highMem=False): self.eChecking = False # Preserve python 2-like behavior for unit tests that pass None and provide # their own text data (in py2, passing None to abspath yields cwd; py3 raises) self.fpath = os.path.dirname(os.path.abspath(fname or os.getcwd())) f = None if fname is not None: if os.path.exists(fname): f = open(fname) else: # need this not to fail for detecting when RXSUM doesn't exist, etc. # Note: Could make it check before instantiating... raise FileNotFoundError(f"{fname} does not exist.") self.f = f def reset(self): """Rewinds the file so you can search through it again.""" self.f.seek(0) def __repr__(self): return "<Text file at {0}>".format(self.f.name) def errorChecking(self, checkForErrors): self.eChecking = checkForErrors def checkErrors(self, line): pass def fsearch(self, pattern, msg=None, killOn=None, textFlag=False): """ Searches file f for pattern and displays msg when found. Returns line in which pattern is found or FALSE if no pattern is found. Stops searching if finds killOn first. If you specify textFlag=True, the search won't use a regular expression (and can't). The basic result is you get less powerful matching capabilities at a huge speedup (10x or so probably, but that's just a guess.) pattern and killOn must be pure text if you do this. """ current = 0 result = "" if textFlag: # fast, text-only mode for line in self.f: if self.eChecking: self.checkErrors(line) if pattern in line: result = line break elif killOn and killOn in line: result = "" break else: result = "" else: # slower regular expression mode cpat = re.compile(pattern) if killOn: kpat = re.compile(killOn) for line in self.f: if self.eChecking: self.checkErrors(line) if killOn: kill = re.search(kpat, line) if kill: # the kill phrase was found first, so die. result = "" break current = re.search(cpat, line) if current: if msg: print(msg) result = line break if not current: result = "" return result ================================================ FILE: armi/utils/triangle.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generic triangle math.""" import math def getTriangleArea(x1: float, y1: float, x2: float, y2: float, x3: float, y3: float) -> float: """ Get the area of a triangle given the vertices of a triangle using Heron's formula. Parameters ---------- x1 : float x coordinate of first point defining a triangle y1 : float y coordinate of first point defining a triangle x2 : float x coordinate of second point defining a triangle y2 : float y coordinate of second point defining a triangle x3 : float x coordinate of third point defining a triangle y3 : float y coordinate of third point defining a triangle Notes ----- See `https://en.wikipedia.org/wiki/Heron%27s_formula` for more information. """ a = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) b = math.sqrt((x2 - x3) ** 2 + (y2 - y3) ** 2) c = math.sqrt((x1 - x3) ** 2 + (y1 - y3) ** 2) area = 1.0 / 4.0 * math.sqrt((a + (b + c)) * (c - (a - b)) * (c + (a - b)) * (a + (b - c))) return area def getTriangleCentroid(x1, y1, x2, y2, x3, y3): """ Return the x and y coordinates of a triangle's centroid. Parameters ---------- x1 : float x coordinate of first point defining a triangle y1 : float y coordinate of first point defining a triangle x2 : float x coordinate of second point defining a triangle y2 : float y coordinate of second point defining a triangle x3 : float x coordinate of third point defining a triangle y3 : float y coordinate of third point defining a triangle Returns ------- x : float x coordinate of triangle's centroid y : float y coordinate of a triangle's centroid """ x = (x1 + x2 + x3) / 3.0 y = (y1 + y2 + y3) / 3.0 return x, y def checkIfPointIsInTriangle( x1: float, y1: float, x2: float, y2: float, x3: float, y3: float, x: float, y: float ) -> bool: """ Test if a point defined by x,y coordinates is within a triangle defined by vertices with x,y coordinates. Parameters ---------- x1 : float x coordinate of first point of the bounding triangle y1 : float y coordinate of first point of the bounding triangle x2 : float x coordinate of second point of the bounding triangle y2 : float y coordinate of second point of the bounding triangle x3 : float x coordinate of third point of the bounding triangle y3 : float y coordinate of third point of the bounding triangle x : float x coordinate of point being tested y : float y coordinate of point being tested Notes ----- This method uses the barycentric method. See `http://totologic.blogspot.com/2014/01/accurate-point-in-triangle-test.html` """ a = ((y2 - y3) * (x - x3) + (x3 - x2) * (y - y3)) / ((y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3)) b = ((y3 - y1) * (x - x3) + (x1 - x3) * (y - y3)) / ((y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3)) c = 1.0 - a - b epsilon = 1e-10 # need to have some tolerance in case the point lies on the edge of the triangle aCondition = a + epsilon >= 0.0 and a - epsilon <= 1.0 bCondition = b + epsilon >= 0.0 and b - epsilon <= 1.0 cCondition = c + epsilon >= 0.0 and c - epsilon <= 1.0 return aCondition and bCondition and cCondition ================================================ FILE: armi/utils/units.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The units module contains unit conversion functions and constants.""" import math import scipy.constants # Units (misc) DPA = "dpa" FIMA = "FIMA" PERCENT_FIMA = r"%FIMA" MB = "MB" # megabytes MOLES = "mole" MWD = "MWd" PASCALS = "Pa" PERCENT = "%" UNITLESS = "" USD = "USD" # US currency (the dollar) # Units (angles) DEGREES = "degrees" RADIANS = "radians" # Units (energy) EV = "eV" MEV = "MeV" MW = "MW" WATTS = "W" # Units (length) CM = "cm" METERS = "m" MICRONS = chr(181) + "m" # Units (mass) GRAMS = "g" KG = "kg" MT = "MT" # Units (reactivity) CENTS = "cents" # 1/100th of a dollar DOLLARS = "$" # (dk/k/k') / beta PCM = "pcm" REACTIVITY = chr(916) + "k/k/k'" # Units (temperature) DEGC = chr(176) + "C" DEGK = "K" # Units (time) DAYS = "days" MINUTES = "min" SECONDS = "s" YEARS = "yr" # Unit conversions C_TO_K = 273.15 BOLTZMAN_CONSTANT = 8.6173324e-11 # boltzmann constant in MeV/K AVOGADROS_NUMBER = 6.0221415e23 CM2_PER_BARN = 1.0e-24 MOLES_PER_CC_TO_ATOMS_PER_BARN_CM = AVOGADROS_NUMBER * CM2_PER_BARN JOULES_PER_MeV = 1.60217646e-13 JOULES_PER_eV = JOULES_PER_MeV * 1.0e-6 SECONDS_PER_MINUTE = 60.0 MINUTES_PER_HOUR = 60.0 HOURS_PER_DAY = 24.0 SECONDS_PER_HOUR = SECONDS_PER_MINUTE * MINUTES_PER_HOUR SECONDS_PER_DAY = HOURS_PER_DAY * SECONDS_PER_HOUR DAYS_PER_YEAR = 365.24219 # mean tropical year SECONDS_PER_YEAR = 31556926.0 GAS_CONSTANT = 8.3144621 # J/mol-K # Cut-off is taken to be any element/nuclide with an atomic number # that is greater than Actinium (i.e., the first classified Actinide). HEAVY_METAL_CUTOFF_Z = 89 MICRONS_PER_METER = 1.0e6 CM2_PER_M2 = 1.0e4 CM3_PER_M3 = 1.0e6 METERS_PER_CM = 0.01 WATTS_PER_MW = 1.0e6 EV_PER_MEV = 1.0e6 MM_PER_CM = 10.0 G_PER_KG = 1000.0 LITERS_PER_CUBIC_METER = 1000 CC_PER_LITER = CM3_PER_M3 / LITERS_PER_CUBIC_METER DEG_TO_RAD = 1.0 / 180.0 * math.pi # Degrees to Radians RAD_TO_REV = 1.0 / (2 * math.pi) # Radians to Revolutions ATOMIC_MASS_CONSTANT_MEV = scipy.constants.physical_constants["atomic mass constant energy equivalent in MeV"][0] ABS_REACTIVITY_TO_PCM = 1.0e5 PA_PER_ATM = scipy.constants.atm PA_PER_MMHG = 133.322368421053 PA_PER_BAR = 100000.0 CURIE_PER_BECQUEREL = 1.0 / 3.7e10 MICROCURIES_PER_BECQUEREL = CURIE_PER_BECQUEREL * 1e-6 G_PER_CM3_TO_KG_PER_M3 = 1000.0 # constants ASCII_MIN_CHAR = 44 # First char allowed in various FORTRAN inputs ASCII_LETTER_A = 65 ASCII_LETTER_Z = 90 ASCII_LETTER_a = 97 ASCII_ZERO = 48 TRACE_NUMBER_DENSITY = 1e-50 MIN_FUEL_HM_MOLES_PER_CC = 1e-10 # More than 10 decimals can create floating point comparison problems in MCNP and DIF3D FLOAT_DIMENSION_DECIMALS = 8 EFFECTIVELY_ZERO = 10.0 ** (-1 * FLOAT_DIMENSION_DECIMALS) # STEFAN_BOLTZMANN_CONSTANT is for constant for radiation heat transfer [W m^-2 K^-4] STEFAN_BOLTZMANN_CONSTANT = 5.67e-8 # W/m^2-K^4 # GRAVITY is the acceleration due to gravity at the Earths surface in [m s^-2]. GRAVITY = 9.80665 # :code:`REYNOLDS_TURBULENT` is the Reynolds number below which a duct flow will exhibit "laminar" # conditions. Reyonlds numbers greater than :code:`REYNOLDS_TURBULENT` will involve flows that are # "transitional" or "turbulent". REYNOLDS_LAMINAR = 2100.0 # :code:`REYNOLDS_TURBULENT` is the Reynolds number above which a duct flow will exhibit "turbulent" # conditions. Reynolds numbers lower than :code:`REYNOLDS_TURBULENT` will involve flows that are # "transitional" or "laminar". REYNOLDS_TURBULENT = 4000.0 def getTk(Tc=None, Tk=None): """ Return a temperature in Kelvin, given a temperature in Celsius or Kelvin. Returns ------- T : float temperature in Kelvin Raises ------ TypeError The temperature was not provided as an int or float. """ if not ((Tc is not None) ^ (Tk is not None)): raise ValueError(f"Cannot produce T in K from Tc={Tc} and Tk={Tk}. Please supply a single temperature.") return float(Tk) if Tk is not None else Tc + C_TO_K def getTc(Tc=None, Tk=None): """ Return a temperature in Celsius, given a temperature in Celsius or Kelvin. Returns ------- T : float temperature in Celsius Raises ------ TypeError The temperature was not provided as an int or float. """ if not ((Tc is not None) ^ (Tk is not None)): raise ValueError(f"Cannot produce T in C from Tc={Tc} and Tk={Tk}. Please supply a single temperature.") return float(Tc) if Tc is not None else Tk - C_TO_K def getTf(Tc=None, Tk=None): """ Return a temperature in Fahrenheit, given a temperature in Celsius or Kelvin. Returns ------- T : float temperature in Fahrenheit Raises ------ TypeError The temperature was not provided as an int or float. """ return 1.8 * getTc(Tc, Tk) + 32.0 def getTemperature(Tc=None, Tk=None, tempUnits=None): """ Returns the temperature in the prescribed temperature units. Parameters ---------- Tc : float temperature in Celsius Tk : float temperature in Kelvin tempUnits : str a flag for the temperature units of the correlation 'Tk', 'K', 'Kelvin', 'Tc', 'C', or 'Celsius' are acceptable. Returns ------- T : float temperature in units defined by the tempUnits flag Raises ------ ValueError When an invalid tempUnits input is provided. """ if tempUnits in ["Tk", "K", "Kelvin"]: return getTk(Tc=Tc, Tk=Tk) if tempUnits in ["Tc", "C", "Celsius"]: return getTc(Tc=Tc, Tk=Tk) raise ValueError("Invalid inputs provided. Check docstring.") def getTmev(Tc=None, Tk=None): Tk = getTk(Tc, Tk) return BOLTZMAN_CONSTANT * Tk def convertMmhgToPascal(mmhg): """Converts pressure from mmhg to pascal. Parameters ---------- mmhg : float pressure in mmhg Returns ------- pascal : float pressure in pascal """ return mmhg * PA_PER_MMHG def convertBarToPascal(pBar): """Converts pressure from bar to pascal. Parameters ---------- pBar : float pressure in bar Returns ------- pascal : float pressure in pascal """ return pBar * PA_PER_BAR def convertAtmToPascal(pAtm): """Converts pressure from atomspheres to pascal. Parameters ---------- pAtm : float pressure in atomspheres Returns ------- pascal : float pressure in pascal """ return pAtm * PA_PER_ATM PRESSURE_CONVERTERS = { "Pa": lambda pa: pa, "bar": convertBarToPascal, "mmHg": convertMmhgToPascal, "atm": convertAtmToPascal, } def sanitizeAngle(theta): """ Returns an angle between 0 and 2pi. Parameters ---------- theta : float an angle Returns ------- theta : float an angle between 0 and 2*pi """ if theta < 0: theta = theta + (1 + -1 * int(theta / (math.pi * 2.0))) * math.pi * 2.0 if theta > 2.0 * math.pi: theta = theta - int(theta / (math.pi * 2.0)) * math.pi * 2.0 return theta def getXYLineParameters(theta, x=0, y=0): """ Returns parameters A B C D for a plane in the XY direction. Parameters ---------- theta : float angle above x-axis in radians x : float x coordinate y : float y coordinate Returns ------- A : float line coefficient B : float line coefficient C : float line coefficient D : float line coefficient Notes ----- the line is in the form of A*x + B*y + C*z - D = 0 -- this corresponds to a MCNP arbitrary line equation """ theta = sanitizeAngle(theta) if math.fabs(theta) < 1e-10 or math.fabs(theta - math.pi) < 1e-10 or math.fabs(theta - 2.0 * math.pi) < 1e-10: # this is a py plane so y is always y return 0.0, 1.0, 0.0, y if math.fabs(theta - math.pi / 2.0) > 1e-10 or math.fabs(theta - 3 * math.pi / 2.0) > 1e-10: # this is a px plane so x is always x return 1.0, 0.0, 0.0, x A = -1.0 / math.cos(theta) B = 1.0 / math.sin(theta) C = 0.0 D = A * x + B * y return A, B, C, D ================================================ FILE: doc/.static/__init__.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper tools to build the ARMI docs.""" ================================================ FILE: doc/.static/automateScr.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tool to build SCR lists to be added to the RST docs. This script is meant to be called by the docs build process, to help automate the process of generating lists of SCRs. """ import argparse import os import subprocess import requests PR_TYPES = { "docs": "Documentation-Only Changes", "features": "Code Changes, Features", "fixes": "Code Changes, Bugs and Fixes", "trivial": "Code Changes, Maintenance, or Trivial", } def main(): """NOTE: This is not used during CI, but exists only for testing and dev purposes.""" # Instantiate the parser parser = argparse.ArgumentParser(description="An ARMI custom doc tool to build the SCR for this release.") # Required positional argument parser.add_argument("pastCommit", help="The commit hash of the last release.") parser.add_argument( "prNum", nargs="?", type=int, default=-1, help="The current PR number (use -1 if there is no PR)." ) # Parse the command line args = parser.parse_args() pastCommit = args.pastCommit prNum = int(args.prNum) buildScrListing(pastCommit, prNum) def _findOneLineData(lines: list, prNum: str, key: str): """Helper method to find a single line in a GH CLI PR dump. Parameters ---------- lines : list The GH CLI dump of a PR, split into lines for convenience. prNum : str The GitHub PR number in question. key : str The substring that the line in questions starts with. Returns ------- str Data pulled for the key in question. """ for line in lines: if line.startswith(key): return line.split(key)[1].strip() print(f"WARNING: SCR: Could not find {key} in PR#{prNum}.") return "TBD" def _buildScrLine(prNum: str, ghUsers: dict): """Helper method to build a single RST list item in an SCR. Parameters ---------- prNum : str The GitHub PR number in question. ghUsers : dict A mapping from GitHub user names to real names, where possible. Returns ------- str RST-formatted list item. """ txt = subprocess.check_output(["gh", "pr", "view", prNum]).decode("utf-8") lines = [ln.strip() for ln in txt.split("\n") if ln.strip()] # grab title title = _findOneLineData(lines, prNum, "title:") # grab author author = _findOneLineData(lines, prNum, "author:") author = ghUsers.get(author, author) # grab reviewer(s) reviewers = _findOneLineData(lines, prNum, "reviewers:") reviewers = [rr.split("(")[0].strip() for rr in reviewers.split(",")] reviewers = [ghUsers.get(rr, rr) for rr in reviewers] reviewerHeader = "Reviewer(s)" if len(reviewers) > 1 else "Reviewer" reviewers = ", ".join(reviewers) # grab one-line description scrType = _findOneLineData(lines, prNum, "Change Type:") if scrType not in PR_TYPES: print(f"WARNING: SCR: Invalid change type '{scrType}' for PR#{prNum}") scrType = "trivial" # grab one-line description desc = _findOneLineData(lines, prNum, "One-Sentence Rationale:") # grab impact on requirements impact = _findOneLineData(lines, prNum, "One-line Impact on Requirements:") # build RST list item, representing this data tab = " " content = f"* PR #{prNum}: {title}\n\n" content += f"{tab}* Rationale: {desc}\n" content += f"{tab}* Impact on Requirements: {impact}\n" content += f"{tab}* Author: {author}\n" content += f"{tab}* {reviewerHeader}: {reviewers}\n\n" return content, scrType def _buildHeader(scrType: str): """Build a RST list header for an SCR listing. Parameters ---------- scrType : str This has to be one of the defined SCR types: features, fixes, trivial, docs Returns ------- str RST-formatted header title. """ return f"\nList of SCRs of type: {PR_TYPES[scrType]}\n\n" def isMainPR(prNum: int): """Determine if this PR is into the ARMI main branch. Parameters ---------- prNum : int The number of this PR. Returns ------- bool True if this PR is merging INTO the ARMI main branch. Default is True. """ try: url = f"https://github.com/terrapower/armi/pull/{prNum}" r = requests.get(url) return "terrapower/armi:main" in r.text except Exception as e: print(f"WARNING: SCR: Failed to determine if PR#{prNum} merged into the main branch: {e}") return True def parseAuthorsFile(): """ Parse the ARMI "AUTHORS" file to get a mapping from GitHub usernames to human names. This is a custom "data format" where each line looks like:: Bob Ross (the-painter-bob-ross-987) Emmy Noether (crazy-smart-emmy-noether-987, super-smart-dr-nother-123) Returns ------- dict Mapping from GitHub usernames to real / human author names. """ ghUsers = {} # loop through the lines in the ARMI AUTHORS file filePath = os.path.join(os.path.dirname(__file__), "..", "..", "AUTHORS") with open(filePath, "r") as f: for ln in f.readlines(): line = ln.strip() if line.startswith("#") or not len(line): # ignore comments and blank lines continue elif "(" not in line: # ignore authors that don't list a GitHub username continue # finally, map one or multiple GH usernames to the author name author, usernames = line.split("(") for user in usernames.rstrip(")").split(","): ghUsers[user.strip()] = author.strip() return ghUsers def buildScrListing(pastCommit: str, thisPrNum: int = -1): """Helper method to build an RST-formatted lists of all SCRs, by category. Parameters ---------- pastCommit : str The shortened commit hash for a past reference commit. (This is the last commit of the last release. It will not be included.) thisPrNum : int The number of this PR. If this is not a PR, the default is -1. Returns ------- str RST-formatted list content. """ # 1. Get a list of all the commits between this one and the reference txt = "" for num in range(100, 2001, 100): print(f"Looking back {num} commits...") gitCmd = f"git log -n {num} --pretty=oneline --all".split(" ") txt = subprocess.check_output(gitCmd).decode("utf-8") if pastCommit in txt: break if not txt or pastCommit not in txt: return f"Could not find commit in git log: {pastCommit}" # 2. Parse commit history to get the PR numbers prNums = set() if thisPrNum > 0: # in case the docs are not being built from a PR prNums.add(thisPrNum) for ln in txt.split("\n"): line = ln.strip() if pastCommit in line: # do not include the reference commit break elif line.endswith(")") and "(#" in line: # get the PR number try: prNums.add(int(line.split("(#")[-1].split(")")[0])) except ValueError: # This is not a PR. Someone unwisely put some trash in the commit message. pass # 3. Build a list of GitHub Users ghUsers = parseAuthorsFile() # 4. Build a list for each SCR data = {"docs": [], "features": [], "fixes": [], "trivial": []} for prNum in sorted(prNums): if not isMainPR(prNum): continue row, scrType = _buildScrLine(str(prNum), ghUsers) data[scrType].append(row) # 5. Build final RST for all four lists, to return to the docs content = "" for typ in ["features", "fixes", "trivial", "docs"]: if len(data[typ]): print(f"Found {len(data[typ])} SCRs in the {typ} category") content += _buildHeader(typ) for line in data[typ]: content += line content += "\n\n" content += "\n\n" print(content) return content if __name__ == "__main__": main() ================================================ FILE: doc/.static/cleanup_test_results.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Docs build helper script, used to clean up the test-results file so it is easier to read in HTML and PDF.""" from sys import argv CLASS_NAME = 'classname="' SKIPPED = "</skipped>" def main(): assert len(argv) == 2, "No input file provided" filePath = argv[1] cleanup_test_results(filePath) def cleanup_test_results(filePath: str): """Clean up the test-results file so it is easier to read in HTML and PDF. Parameters ---------- filePath : str Path to junit pytest test results XML file. """ txt = open(filePath, "r").read() bits = txt.split(CLASS_NAME) newTxt = bits[0] for i in range(1, len(bits)): # split the line up into bits, using quotes assert '"' in bits[i], f"Something is wrong with the file: {bits[i]}" row = bits[i].split('"') # just grab the test class name, not the whole import path row[0] = row[0].split(".")[-1] # skipped tests include a long file path we want to remove if row[-1].startswith(">/home/runner/") and SKIPPED in row[-1]: row[-1] = ">" + SKIPPED + row[-1].split(SKIPPED)[-1] # Add the classname we split on back into this line newTxt += CLASS_NAME + '"'.join(row) with open(filePath, "w") as f: f.write(newTxt) if __name__ == "__main__": main() ================================================ FILE: doc/.static/css/theme_fixes.css ================================================ @import 'theme.css'; /* override table width restrictions */ @media screen and (min-width: 767px) { .wy-table-responsive table td { white-space: normal !important; } .wy-table-responsive { overflow: visible !important; } } img { max-width: 100% !important; } /* sphinx-needs */ table.need.need.need > tbody > tr > td { padding: 0.5em .5em !important; } .rst-content .line-block { line-height: 1em !important; } /* log files and raw data dumps */ .rst-content .linenodiv pre, .rst-content div[class^=highlight] pre, .rst-content pre.literal-block { line-height: 12px !important; font-size: 10pt !important; } /* long tables */ .rst-content table.docutils { margin: 0; padding: 0; font-size: 10pt !important; } .rst-content table.docutils td, .rst-content table.docutils th, .rst-content table.field-list td, .rst-content table.field-list th, .wy-table td, .wy-table th { margin: 0; padding: 1px; } html.writer-html5 .rst-content table.docutils td>p, html.writer-html5 .rst-content table.docutils th>p { font-size: 10px !important; } /* code snippets */ .rst-content .linenodiv pre, .rst-content div[class^=highlight] pre, .rst-content pre.literal-block { ssp-tiny !important; } /* move equation numbers to right side of the equation */ span.eqno { float: right; } /* Style test needs by their result */ tr.needs_passed td { background-color: rgba(0,250,0,0.2) !important; } tr.needs_failure td { background-color: rgba(250,0,0,0.2) !important; } tr.needs_skipped td { background-color: rgba(0,0,0,0.1) !important; } ================================================ FILE: doc/.static/dochelpers.py ================================================ # Copyright 2024 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helpers for Sphinx documentation.""" def escapeSpecialCharacters(s): """Escapes RST special characters in inputted string. Special characters include: ``*|_``. More to be added when found troublesome. Parameters ---------- s : str String with characters to be escaped. Returns ------- str Input string with special characters escaped. """ news = s[:] for char in ["*", "|", "_"]: news = news.replace(char, "\\" + char) return news def createTable(rst_table, caption=None, label=None, align=None, widths=None, width=None): """ This method is available within ``.. exec::``. It allows someone to create a table with a caption. The ``rst_table`` """ rst = [".. table:: {}".format(caption or "")] if label: rst += [" :name: {}".format(label)] if align: rst += [" :align: {}".format(align)] if width: rst += [" :width: {}".format(width)] if widths: rst += [" :widths: {}".format(widths)] rst += [""] rst += [" " + line for line in rst_table.split("\n")] return "\n".join(rst) def createListTable(rows, caption=None, align=None, widths=None, width=None, klass=None): """Take a list of data, and produce an RST-type string for a list-table. Parameters ---------- rows: list List of input data (first row is the header). align: str "left", "center", or "right" widths: str "auto", "grid", or a list of integers width: str length or percentage of the line, surrounded by backticks klass: str Should be "class", but that is a reserved keyword. "longtable", "special", or something custom Returns ------- str RST list-table string """ # we need valid input data assert len(rows) > 1, "Not enough input data." len0 = len(rows[0]) for row in rows[1:]: assert len(row) == len0, "Rows aren't all the same length." # build the list-table header block rst = [".. list-table:: {}".format(caption or "")] rst += [" :header-rows: 1"] if klass: rst += [" :class: {}".format(klass)] if align: rst += [" :align: {}".format(align)] if width: rst += [" :width: {}".format(width)] if widths: rst += [" :widths: " + " ".join([str(w) for w in widths])] rst += [""] # build the list-table data for row in rows: rst += [f" * - {row[0]}"] rst += [f" - {word}" for word in row[1:]] return "\n".join(rst) def generateParamTable(klass, fwParams, app=None): """ Return a string containing one or more restructured text list tables containing parameter descriptions for the passed ArmiObject class. Parameters ---------- klass : ArmiObject subclass The Class for which parameter tables should be generated fwParams : ParameterDefinitionCollection A parameter definition collection containing the parameters that are always defined for the passed ``klass``. The rest of the parameters come from the plugins registered with the passed ``app`` app : App, optional The ARMI-based application to draw plugins from. Returns ------- str RST-formatted string table """ from armi import apps if app is None: app = apps.App() defs = {None: fwParams} app = apps.App() for plugin in app.pluginManager.get_plugins(): plugParams = plugin.defineParameters() if plugParams is not None: pDefs = plugParams.get(klass, None) if pDefs is not None: defs[plugin] = pDefs headerContent = """ .. container:: break_before ssp-landscape .. list-table:: {} Parameters from {{}} :class: ssp-tiny :widths: 30 40 30 :header-rows: 1 * - Name - Description - Units """.format(klass.__name__) content = [] for plugin, pdefs in defs.items(): srcName = plugin.__name__ if plugin is not None else "Framework" content.append(f".. _{srcName}-{klass.__name__}-param-table:") pluginContent = headerContent.format(srcName) for pd in pdefs: pluginContent += f""" * - {pd.name} - {escapeSpecialCharacters(str(pd.description))} - {escapeSpecialCharacters(pd.units)} """ content.append(pluginContent + "\n") return "\n".join(content) ================================================ FILE: doc/.static/looseCouplingIllustration.dot ================================================ digraph looseCoupling { label="Loose Coupling" layout="dot"; rankdir=TB; a [label="Temp.", shape="Rec", style="rounded,filled", color="white"] a1 [label="Temp.", shape="Rec", style="rounded,filled", color="white"] a2 [label="Temp.", shape="Rec", style="rounded,filled", color="white"] b [label="Power", shape="Rec", style="rounded,filled", color="white"] b1 [label="Power", shape="Rec", style="rounded,filled", color="white"] b2 [label="Power", shape="Rec", style="rounded,filled", color="white"] c [label="Cross Sections", shape="Rec", style="rounded,filled", color="white"] c1 [label="Cross Sections", shape="Rec", style="rounded,filled", color="white"] c2 [label="Cross Sections", shape="Rec", style="rounded,filled", color="white"] d [label="...", shape="plaintext"] subgraph cluster_c00n00{ label="Cycle 0, Node 0"; style="rounded,filled"; color=lightblue; c -> b b -> a [constraint=false] } a -> c1 //[constraint=false] subgraph cluster_c00n01{ label="Cycle 0, Node 1" style="rounded,filled"; color=lightblue; c1 -> b1 b1 -> a1 [constraint=false] } a1 -> c2 //[constraint=false] subgraph cluster_c00n02{ label="Cycle 0, Node 2" style="rounded,filled"; color=lightblue; c2 -> b2 b2 -> a2 [constraint=false] } a2 -> d //[constraint=false] } ================================================ FILE: doc/.static/tightCouplingIllustration.dot ================================================ digraph tightCoupling { label="Tight Coupling" layout="dot"; rankdir=TB; e [label="Converged?", shape="diamond", style="filled", color="white"] e1 [label="Converged?", shape="diamond", style="filled", color="white"] e2 [label="Converged?", shape="diamond", style="filled", color="white"] a [label="Temp.", shape="Rectangle", style="rounded,filled", color="white"] a1 [label="Temp.", shape="Rectangle", style="rounded,filled", color="white"] a2 [label="Temp.", shape="Rectangle", style="rounded,filled", color="white"] b [label="Power", shape="Rectangle", style="rounded,filled", color="white"] b1 [label="Power", shape="Rectangle", style="rounded,filled", color="white"] b2 [label="Power", shape="Rectangle", style="rounded,filled", color="white"] c [label="Cross Sections", shape="Rectangle", style="rounded,filled", color="white"] c1 [label="Cross Sections", shape="Rectangle", style="rounded,filled", color="white"] c2 [label="Cross Sections", shape="Rectangle", style="rounded,filled", color="white"] d [label="...", shape="plaintext"] subgraph cluster_c00n00{ label="Cycle 0, Node 0"; style="rounded,filled"; color=lightblue; c -> b b -> a a -> e [constraint=false] e -> c [constraint=false, label="no"] } e -> c1 [label="yes"] subgraph cluster_c00n01{ label="Cycle 0, Node 1" style="rounded,filled"; color=lightblue; c1 -> b1 b1 -> a1 a1 -> e1 [constraint=false] e1 -> c1 [constraint=false, label="no"] } e1 -> c2 [label="yes"] subgraph cluster_c00n02{ label="Cycle 0, Node 2" style="rounded,filled"; color=lightblue; c2 -> b2 b2 -> a2 a2 -> e2 [constraint=false] e2 -> c2 [constraint=false, label="no"] } e2 -> d [label="yes"] } ================================================ FILE: doc/Makefile ================================================ # Minimal makefile for Sphinx documentation for BASH Linux # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new "make mode" option. # $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) ================================================ FILE: doc/__init__.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This is the documentation package: not part of the codebase.""" ================================================ FILE: doc/conf.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ARMI documentation build configuration file. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. """ # ruff: noqa: E402 import datetime import inspect import os import pathlib import re import shutil import subprocess import sys import warnings import sphinx_rtd_theme # noqa: F401 from docutils import nodes, statemachine from docutils.parsers.rst import Directive, directives from sphinx.domains.python import PythonDomain from sphinx_gallery.sorting import ExplicitOrder, FileNameSortKey from sphinx_needs.api import add_dynamic_function from doc.getTestResults import getTestResult # handle python import locations for this execution PYTHONPATH = os.path.abspath("..") sys.path.insert(0, PYTHONPATH) # Also add to os.environ which will be used by the nbsphinx extension environment os.environ["PYTHONPATH"] = PYTHONPATH # Add dochelpers.py and automateScr.py from doc/.static/ directory sys.path.insert(0, ".static") from armi import apps, context, disableFutureConfigures, meta from armi import configure as armi_configure from armi.bookkeeping import tests as bookkeepingTests from armi.utils import safeCopy context.Mode.setMode(context.Mode.BATCH) # Configure the baseline framework "App" for framework doc building armi_configure(apps.App()) disableFutureConfigures() APIDOC_REL = ".apidocs" SOURCE_DIR = os.path.join("..", "armi") STATIC_DIR = ".static" _TUTORIAL_FILES = [fName for fName in bookkeepingTests.TUTORIAL_FILES if "ipynb" not in fName] class PatchedPythonDomain(PythonDomain): def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): if "refspecific" in node: del node["refspecific"] return super(PatchedPythonDomain, self).resolve_xref(env, fromdocname, builder, typ, target, node, contnode) class ExecDirective(Directive): """ Execute the specified python code and insert the output into the document. The code is used as the body of a method, and must return a ``str``. The string result is interpreted as reStructuredText. Error handling informed by https://docutils.sourceforge.io/docs/howto/rst-directives.html#error-handling The self.error function should both inform the documentation builder of the error and also insert an error into the built documentation. Warning ------- This only works on a single node in the doctree, so the rendered code may not contain any new section names or labels. They will result in ``WARNING: Unexpected section title`` warnings. """ has_content = True def run(self): try: # clean the content, then put back into a list cleancode = inspect.cleandoc("\n".join(self.content)).split("\n") code = "def usermethod():\n " + "\n ".join(cleancode) globals = {} exec(code, globals) result = globals["usermethod"]() if result is None: raise self.error( "Return value needed! The body of your `.. exec::` is a function call that must return a value." ) para = nodes.container() lines = statemachine.StringList(result.split("\n")) self.state.nested_parse(lines, self.content_offset, para) return [para] except Exception as e: docname = self.state.document.settings.env.docname raise self.error(f"Unable to execute embedded doc code at {docname}:{self.lineno}\n{str(e)}") class PyReverse(Directive): """Runs pyreverse to generate UML for specified module name and options. The directive accepts the same arguments as pyreverse, except you should not specify ``--project`` or ``-o`` (output format). These are automatically specified. If you pass ``-c`` to this, the figure generated is forced to be the className.png like. For .gitignore purposes, this is a pain. Thus, we auto-prefix ALL images generated by this directive with ``pyrev_``. """ has_content = True required_arguments = 1 optional_arguments = 50 option_spec = { "alt": directives.unchanged, "height": directives.length_or_percentage_or_unitless, "width": directives.length_or_percentage_or_unitless, "align": lambda arg: directives.choice(arg, ("left", "right", "center")), "filename": directives.unchanged, } def run(self): try: args = list(self.arguments) args.append("--project") args.append(f"{args[0]}") args.append("-opng") # NOTE: cannot use "pylint.pyreverse.main.Run" because it calls `sys.exit`. fig_name = self.options.get("filename", "classes_{}.png".format(args[0])) command = [sys.executable, "-m", "pylint.pyreverse.main"] print("Running {}".format(command + args)) env = dict(os.environ) # apply any runtime path mods to the pythonpath env variable (e.g. sys.path mods made during doc confs) env["PYTHONPATH"] = os.pathsep.join(sys.path) subprocess.check_call(command + args, env=env) try: os.remove(os.path.join(APIDOC_REL, fig_name)) except OSError: pass shutil.move(fig_name, APIDOC_REL) # add .gitignore helper prefix shutil.move( os.path.join(APIDOC_REL, fig_name), os.path.join(APIDOC_REL, f"pyr_{fig_name}"), ) new_content = [f".. figure:: /{APIDOC_REL}/pyr_{fig_name}"] # assume we don't need the packages, and delete. try: os.remove("packages_{}.png".format(args[0])) except OSError: pass # pass the other args through (figure args like align) for opt, val in self.options.items(): if opt in ("filename",): continue new_content.append(" :{}: {}\n".format(opt, val)) new_content.append("\n") for line in self.content: new_content.append(" " + line) para = nodes.container() lines = statemachine.StringList(new_content) self.state.nested_parse(lines, self.content_offset, para) return [para] except Exception as e: docname = self.state.document.settings.env.docname # add the error message directly to the built documentation and also tell the builder raise self.error( "Unable to execute embedded doc code at {}:{} ... {}\n{}".format( docname, self.lineno, datetime.datetime.now(), str(e) ) ) def autodoc_skip_member_handler(app, what, name, obj, skip, options): """Manually exclude certain methods/functions from docs.""" # exclude special methods from unittest excludes = ["setUp", "setUpClass", "tearDown", "tearDownClass"] try: # special logic to fix inherited docstrings from yamlize.Attribute s = str(obj).strip() if s.startswith("<Attribute") and "_yamlized_" in s: return True except Exception: pass return name.startswith("_") or name in excludes def setup(app): """Method to make `make html` generate api documentation.""" app.connect("autodoc-skip-member", autodoc_skip_member_handler) app.add_domain(PatchedPythonDomain, override=True) app.add_directive("exec", ExecDirective) app.add_directive("pyreverse", PyReverse) add_dynamic_function(app, getTestResult, "get_test_result") # making tutorial data dir dataDir = pathlib.Path("user") / ".." / "anl-afci-177" if not os.path.exists(dataDir): os.mkdir(dataDir) # Copy resources needed to build the tutorial notebooks. nbsphinx_link needs the working directory for running the # notebooks to be the directory of the link itself. for path in _TUTORIAL_FILES: safeCopy(path, dataDir) # If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. # If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx # (named 'sphinx.ext.*') or your custom ones. extensions = [ "nbsphinx", "nbsphinx_link", "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.doctest", "sphinx.ext.extlinks", "sphinx.ext.ifconfig", "sphinx.ext.imgconverter", # to convert GH Actions badge SVGs to PNG for LaTeX "sphinx.ext.inheritance_diagram", "sphinx.ext.intersphinx", "sphinx.ext.mathjax", "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", "sphinx_gallery.gen_gallery", "sphinx_needs", # needed for requirements tracking "sphinx_rtd_theme", # needed here for loading jquery in sphinx 6 "sphinxcontrib.apidoc", "sphinxcontrib.jquery", # see https://github.com/readthedocs/sphinx_rtd_theme/issues/1452 "sphinxcontrib.plantuml", "sphinxcontrib.test_reports", "sphinxext.opengraph", ] # Our API should make sense without documenting private/special members. autodoc_default_options = { "members": True, "private-members": False, "undoc-members": True, "ignore-module-all": True, } autodoc_member_order = "bysource" # this line removes huge numbers of false and misleading, inherited docstrings autodoc_inherit_docstrings = False autoclass_content = "both" autodoc_mock_imports = ["wx"] apidoc_module_dir = SOURCE_DIR apidoc_module_first = True apidoc_output_dir = APIDOC_REL apidoc_separate_modules = True # Napoleon settings listed here so we know what's configurable and can track changes (for numpy docstrings) napoleon_google_docstring = False napoleon_include_init_with_doc = False napoleon_include_private_with_doc = False napoleon_include_special_with_doc = False napoleon_numpy_docstring = True napoleon_use_admonition_for_examples = False napoleon_use_admonition_for_notes = True napoleon_use_admonition_for_references = False napoleon_use_ivar = True napoleon_use_param = True napoleon_use_rtype = True nbsphinx_kernel_name = "python3" ogp_site_url = "https://terrapower.github.io/armi/" ogp_image = "https://terrapower.github.io/armi/_static/armiSchematicView.png" ogp_site_name = "Advanced Reactor Modeling Interface" # Add any paths that contain templates here, relative to this directory. templates_path = [".templates"] # The suffix of source filenames. source_suffix = ".rst" # The top-level toctree document. root_doc = "index" # General information about the project. copyright = f"2009-{datetime.datetime.now().year}, TerraPower, LLC" project = "ARMI" # Use the pre-existing version definition. release = meta.__version__ version = meta.__version__ # List of patterns, relative to doc directory, that match files and directories to ignore when looking for source files. exclude_patterns = [ "**.ipynb_checkpoints", "**_reqs.rst", # needed so included reqs files render ".DS_Store", "_build", "gallery/**/*.ipynb", # prevent sphinx-gallery from causing duplicate source file errors "gallery/**/*.json", "gallery/**/*.md5", "gallery/**/*.zip", "gallery/analysis/index.html", "gallery/framework/index.html", "logs", "Thumbs.db", ] rst_epilog = r""" .. |keff| replace:: k\ :sub:`eff`\ """ wiki = { "GitHub Discussions": ( "https://github.com/terrapower/armi/discussions" + "%s", None, ) } # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. modindex_common_prefix = ["armi."] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. html_theme = "sphinx_rtd_theme" # (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24). Path should be relative to the ``.static`` # files directory. html_logo = os.path.join(STATIC_DIR, "armiicon_24x24.ico") # Theme options are theme-specific and customize the look and feel of a theme further. html_theme_options = { "display_version": True, "logo_only": False, "prev_next_buttons_location": "bottom", "style_external_links": True, "style_nav_header_background": "#233C5B", # TP blue looks better than green "vcs_pageview_mode": "", # Toc options "collapse_navigation": True, "includehidden": True, "navigation_depth": 4, "sticky_navigation": True, "titles_only": False, } # as long as this file @import's the theme's main css it won't break anything html_style = "css/theme_fixes.css" # The name of an image file (within the static path) to use as favicon of the docs. This file should be a icon file # (.ico) being 16x16 or 32x32 pixels large. html_favicon = os.path.join(STATIC_DIR, "armiicon_16x16.ico") # Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are # copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [STATIC_DIR] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, using the given strftime format. html_last_updated_fmt = "%Y-%m-%d" # Output file base name for HTML help builder. htmlhelp_basename = "ARMIdoc" html_context = { "conf_py_path": "/doc/", # Path in the checkout to the docs root "display_github": True, # Integrate GitHub "github_repo": "armi", # Repo name "github_user": "terrapower", # Username "github_version": "main", # Version } # -- Options for LaTeX output -------------------------------------------------- latex_engine = "xelatex" # Additional stuff for the LaTeX preamble. latex_elements = { "papersize": "letterpaper", "pointsize": "10pt", "preamble": r"""\usepackage{amsmath} \usepackage{wasysym} """, } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual], toctree_only). latex_documents = [ ( "index", "ARMI.tex", "Advanced Reactor Modeling Interface (ARMI) Manual", "TerraPower, LLC", "manual", False, ) ] # The name of an image file (relative to this directory) to place at the top of the title page. latex_logo = os.path.join(STATIC_DIR, "armi-logo.png") # For "manual" documents, if this is true, then toplevel headings are parts, not chapters. latex_toplevel_sectioning = "part" # If true, show page references after internal links. latex_show_pagerefs = True # If true, show URL addresses after external links. latex_show_urls = "inline" # Documents to append as an appendix to all manuals. latex_appendices = [] # If false, no module index is generated. latex_domain_indices = ["py-modindex"] # Configuration for the sphinx-gallery sphinx_gallery_conf = { "examples_dirs": ["gallery-src"], "filename_pattern": re.escape(os.sep) + "run_", "gallery_dirs": ["gallery"], "line_numbers": False, "download_all_examples": False, "nested_sections": False, "subsection_order": ExplicitOrder( [ os.path.join("gallery-src", "framework"), os.path.join("gallery-src", "analysis"), ] ), "within_subsection_order": FileNameSortKey, "default_thumb_file": os.path.join(STATIC_DIR, "TerraPowerLogo.png"), } suppress_warnings = ["autoapi.python_import_resolution", "config.cache"] # Filter out this warning which shows up in sphinx-gallery builds. This is suggested in the sphinx-gallery example but # doesn't actually work? warnings.filterwarnings( "ignore", category=UserWarning, message="Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.", ) intersphinx_mapping = {"python": ("https://docs.python.org/3", None)} # These are defaults in Windows in more recent versions of the imgconverter plugin and can be removed if/when we # upgrade Sphinx beyond 2.2. Otherwise, 'convert' from system32 folder is used. if sys.platform.startswith("win"): image_converter = "magick" image_converter_args = ["convert"] # sphinx-needs settings needs_statuses = [ dict(name=None, description="No status yet; not in any reviews"), dict( name="preliminary", description="Requirement that will have its wording reviewed and/or does not have implementation/testing yet.", ), dict( name="accepted", description="Requirement that either has completed or will undergo TP-ENG-PROC-0013 Appendix D Part 1 review.", ), ] needs_extra_options = [ "acceptance_criteria", "basis", "subtype", ] needs_extra_links = [ dict(option="tests", incoming="testing", outgoing="requirements"), dict(option="implements", incoming="implementations", outgoing="requirements"), ] needs_layouts = { "test_layout": { "grid": "simple", "layout": { "head": [ '<<meta("type_name")>>: **<<meta("title")>>** <<meta_id()>> <<collapse_button("meta", ' 'collapsed="icon:arrow-down-circle", visible="icon:arrow-right-circle", initial=False)>> ' ], "meta": [ "signature: <<meta('signature')>>", "<<meta_links_all()>>", ], }, }, "req_hide_links": { "grid": "simple", "layout": { "head": [ '<<meta("type_name")>>: **<<meta("title")>>** <<meta_id()>> <<collapse_button("meta", ' 'collapsed="icon:arrow-down-circle", visible="icon:arrow-right-circle", initial=False)>> ' ], "meta": [ "<<meta_all(no_links=True, exclude=['layout'])>>", ], }, }, } needs_global_options = { # Defaults for test tags "layout": ("test_layout", "type=='test'"), "result": ("[[get_test_result()]]", "type=='test'"), } # Formats need roles (reference to a req in text) as just the req ID needs_role_need_template = "{id}" ================================================ FILE: doc/developer/documenting.rst ================================================ .. _armi-docing: **************** Documenting ARMI **************** ARMI uses the `Sphinx <https://www.sphinx-doc.org/en/master/>`_ documentation system to compile the ARMI documentation into HTML and PDF from in-code docstrings and hand-created `ReStructedText files <https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_. This provides several benefits: * We can revise and track the documentation in lock-step with the code itself, in the same source code repository * We can make use of hyperlinked cross-references that stay up to date as the code is expanded or refactored. * We can run specific code tests during documentation building to ensure the documentation examples remain valid * We can auto-generate class diagrams based on the latest status of the code * Every Pull Request (PR) generates HTML and PDF versions of the documentation for the PR Author and Reviewer We use some special Sphinx plugins that run the tutorial jupyter notebooks during documentation build with the most up-to-date code. Building the Documentation ========================== Before building documentation, ensure that you have installed the documentation requirements into your ARMI virtual environment with: .. code-block:: bash pip install -e .[docs] You also need to have the following utilities available in your PATH: * `Graphviz <https://graphviz.org/>`_ * `Pandoc <https://pandoc.org/>`_ If you want to build the documentation into a PDF using the Sphinx LaTeX builder, you also need: * LaTeX (`MikTeX <https://miktex.org/>`_ on Windows) * `ImageMagick <https://imagemagick.org/>`_ The documentation depends on at least one submodule as well, so you must be sure it is available in your source tree with: .. code-block:: bash git submodule update --init To build the ARMI documentation as HTML. The ARMI docs expect a bunch of custom unit test outputs to be present. You can either run these test commands: .. code-block:: bash pytest --junit-xml=test_results.xml -v -n 4 armi > pytest_verbose.log mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi1.xml armi/tests/test_mpiFeatures.py > pytest_verbose_mpi1.log mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi2.xml armi/tests/test_mpiParameters.py > pytest_verbose_mpi2.log mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi3.xml armi/utils/tests/test_directoryChangersMpi.py > pytest_verbose_mpi3.log python doc/.static/cleanup_test_results.py test_results.xml Or, if you just want to build the docs locally and aren't interested in building a full test report, you can just do this to inject placeholder test results files instead: .. code-block:: bash python doc/skip_str.py Either way, you eventually go to the ``doc`` folder and type this to build the docs: .. code-block:: bash make html This will invoke Sphinx and generate a series of html files in the ``_build/html`` folder. Open up ``index.html`` to see the documentation from there. A copy of the documentation is hosted online at https://terrapower.github.io/armi/. You can suggest a change to the documentation by opening an ARMI PR. Documentation for ARMI plugins ============================== The following subsections apply to documentation for ARMI plugins. Linking to ARMI documentation from plugins ------------------------------------------ ARMI plugin documentation can feature rich hyperlinks to the ARMI API documentation with the help of the `intersphinx Sphinx plugin <http://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html>`_. The ARMI plugin documentation config file should add ``"sphinx.ext.intersphinx",`` to its active Sphinx plugin list, and change the default config to read:: intersphinx_mapping = { "python": ("https://docs.python.org/3", None), "armi": ("https://terrapower.github.io/armi/", None), } Now you can link to the ARMI documentation with links like:: :doc:`armi:developer/documenting` :py:mod:`armi.physics.executers` Automatically building apidocs of namespace packages ---------------------------------------------------- Activating the ``"sphinxcontrib.apidoc",`` `Sphinx plugin <https://github.com/sphinx-contrib/apidoc>`_ enables plugin API documentation to be built with the standard ``make html`` Sphinx workflow. If your ARMI plugin is a namespace package, the following extra config is required:: apidoc_extra_args = ["--implicit-namespaces"] Updating the Gallery ==================== The `ARMI example gallery <https://terrapower.github.io/armi/gallery/index.html>`_ is a great way to quickly highlight neat features and uses of ARMI. To add a new item to the gallery, add your example code (including the required docstring) to the ``doc/gallery-src`` folder in the ARMI source tree. The example will be added to the gallery during the next documentation build. Using Jupyter Notebooks ======================= For interactive tutorials, it's convenient to build actual Jupyter notebooks and commit them to the documentation to be rendered by Sphinx using the nbsphinx plugin. When this is done, notebooks without any output should be committed to the repository so that Sphinx actually executes the notebooks with the up-to-date code when the documentation is built. To do this, you can clean the output with: .. code-block:: bash jupyter nbconvert --ClearOutputPreprocessor.enabled=True --inplace mynotebook.ipynb This should clear the output and overwrite the file. If this doesn't work, you can clear all output cells in the notebook web interface itself before committing the file. ================================================ FILE: doc/developer/entrypoints.rst ================================================ ************ Entry Points ************ **Entry Points** are like the verbs that your App can *do*. The :py:mod:`built-in entry points <armi.cli>` offer basic functionality, like :py:class:`running a case <armi.cli.run.RunEntryPoint>` or :py:class:`opening up the GUI <armi.cli.gridGui.GridGuiEntryPoint>`, but the real joy of an application comes when you add your own project-specific entry points that do the actions that you commonly need done. To make a new EntryPoint, first make a new module and subclass :py:class:`~armi.cli.entryPoint.EntryPoint`. Set the class attributes as follows: ``name`` What the user types on the CLI to invoke this entry point. ``settingsArgument`` * ``"required"`` if a settings input file must be provided, * ``"optional"`` if it may be provided but not required, * ``None`` if no settings input is allowed .. tip:: ARMI apps often collect EntryPoints in a ``cli/`` directory (Command Line Interface) Next, implement the :py:meth:`~armi.cli.entryPoint.EntryPoint.addOptions` method. Here you can both: * turn various Settings into command-line arguments with :py:meth:`~armi.cli.entryPoint.EntryPoint.createOptionFromSetting` * add arbitrary command-line arguments using the standard :py:mod:`python:argparse` library. The values of the non-setting arguments will become attributes in ``self.args`` for later use. Finally, implement the :py:meth:`~armi.cli.entryPoint.EntryPoint.invoke` method with the code you'd like to run upon invocation of this entry point. .. code-block:: python :caption: Example entry point from armi import cases from armi.cli import entryPoint class SampleEntryPoint(entryPoint.EntryPoint): """ Entry point title here. Long description of entry point here. This will get picked up and used as the help text on the command line! """ name = "do-my-thing" settingsArgument = "required" def addOptions(self): self.createOptionFromSetting(CONF_CYCLE_LENGTH) self.createOptionFromSetting(CONF_BURN_STEPS) self.parser.add_argument( "--post-process", "-p", action="store_true", default=False, help="Just post-process an existing suite; don't run", ) def invoke(self): inputCase = cases.Case(cs=self.cs) print(f"The case is {inputCase}") if self.args.post_process: print("Post processing...") When you run your app, you will have this as an option, and you can invoke it with:: python -m myapp do-my-thing --post-process settingsFile.yaml or (if ``myapp`` is not in your ``PYTHONPATH``):: python path/to/myapp do-my-thing --post-process settingsFile.yaml .. tip:: The settings file will be read into a ``Settings`` object. This ``Settings`` object will be passed widely around the code. Please do not edit these settings during a run. The idea of "run settings" is a lot simpler to understand when they don't change. And such changes tend to hide data from other developers. To add entry points, ``ArmiPlugin``s can subclass the ``defineEntryPoints`` method. ARMI has an extensive :py:class:`~armi.cli.EntryPointsPlugin` that comes with several CLI entry points. It is important to note that if you are building your own ARMI ``Application``, the ``EntryPointsPlugin`` must be registered to access these entry points either by registration in the application or subclassing :py:class:`~armi.apps.App`. If you do not want them or if you only want some of them you can build your own list in a custom ``defineEntryPoints`` method. ================================================ FILE: doc/developer/first_time_contributors.rst ================================================ ***************************** First Time Contributors Guide ***************************** The ARMI team strongly encourages developers to contribute to the codebase. The ARMI framework code is open source, and your contributions will become open source. Although fewer laws apply to open source materials because they are publicly-available, you still must comply with all applicable laws and regulations. Help Wanted =========== There are a lot of places you can get started to help the ARMI project and team: * Better :ref:`armi-docing` * Better test coverage * Many more type annotations are desired. Type issues cause lots of bugs. * Targeted speedups (e.g. informed by a profiler) * Additional relevance to thermal reactors Naturally, you can also look at the open `ARMI issues <https://github.com/terrapower/armi/issues>`_ to see what work needs to be done. In particular, check out the `help wanted tickets <https://github.com/terrapower/armi/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22>`_ and `good first issue tickets <https://github.com/terrapower/armi/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22>`_. Testing ======= Any contribution must pass all included unit tests. You will frequently have to fix tests your code changes break. And you should definitely add tests to cover anything new your code does. The ARMI tests are meant to be run using `pytest <https://docs.pytest.org/en/8.0.x/>`_ locally :: $ pip install -e .[test] $ pytest -n 4 armi Submitting Changes ================== To submit a change to ARMI, you will have to open a Pull Request (PR) on GitHub.com. The process for opening a PR against ARMI goes something like this: 1. `Fork the ARMI repo <https://docs.github.com/en/get-started/quickstart/fork-a-repo>`_ 2. `Create a new branch <https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-and-deleting-branches-within-your-repository>`_ in your repo 3. Make your code changes to your new branch 4. Submit a Pull Request against `ARMIs main branch <https://github.com/terrapower/armi/pull/new/main>`_ a. See `GitHubs general guidance on Pull Requests <https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request>`_ b. See ARMIs specific guidance on what makes a "good" Pull Request: :ref:`armi-tooling`. 5. Actively engage with your PR reviewer's questions and comments. > Note that a bot will require that you sign our `Contributor License Agreement <https://github.com/terrapower/armi/blob/main/CONTRIBUTING.md>`_ before we can accept a pull request from you. See our published documentation for a complete guide to our coding standards and practices: :ref:`armi-stds`. Also, please check out our (quick) synopsis on good commit messages: :ref:`armi-tooling`. Licensing of Tools ================== Be careful when including any dependency in ARMI (say in the ``pyproject.toml`` file) not to include anything with a license that supersedes our Apache license. For instance, any third-party Python library included in ARMI with a GPL license will make the whole project fall under the GPL license. But a lot of potential users of ARMI will want to keep some of their work private, so we can't allow any GPL tools. For that reason, it is generally considered best-practice in the ARMI ecosystem to only use third-party Python libraries that have MIT or BSD licenses. ================================================ FILE: doc/developer/guide.rst ================================================ ********************** Framework Architecture ********************** What follows is a discussion of the high-level elements of the ARMI framework. Throughout, links to the API docs will be provided for additional details. The Reactor Data Model ====================== The ARMI framework represents a nuclear reactor via a reactor data model, which is defined in the :py:mod:`~armi.reactor` package. Each physical piece of the nuclear reactor is defined by a Python object, called an :py:class:`ArmiObject <armi.reactor.composites.ArmiObject>`. Each ``ArmiObject`` has associated data like: shape, material, or other physical values. The physical values can be nearly anything, and are attached to the data model via ARMI's :py:mod:`Parameter <armi.reactor.parameters>` system. Example parameters might be: ``keff``, ``flow rates``, ``power``, ``flux``, etc. The reactor data model is a hierarchical model, following the `Composite Design Pattern <http://en.wikipedia.org/wiki/Composite_pattern>`_. The top of the data model is the :py:class:`Reactor <armi.reactor.reactors.Reactor>`, which contains one :py:class:`Core <armi.reactor.cores.Core>` object and a collection of zero or more :py:class:`ExcoreStructures <armi.reactor.excoreStructure.ExcoreStructure>`. An example ``ExcoreStructure`` might be a :py:class:`SpentFuelPool <armi.reactor.spentFuelPool.SpentFuelPool>`. For now, the ``Core`` object in ARMI assumes it contains **Assembly** objects, which are in turn made up as a collection of **Block** objects. The leaves of the the Composite Model in the ARMI framework are called :py:class:`Component <armi.reactor.components.component.Component>`. .. figure:: /.static/armi_reactor_objects.png :align: center The primary data containers in ARMI Time-evolving the parameters on the reactor composite hierarchy is what most modelers and analysts will want from the ARMI framework. Review the data model :ref:`armi-tutorials` section for examples exploring a populated instance of the ``Reactor`` data model. Finding objects in a model -------------------------- Under most circumstances a :py:class:`armi.reactor.reactors.Reactor` instance will have a ``.core`` attribute, which is an instance of :py:class:`armi.reactor.reactors.Core`. While the Composite pattern discussed above can be used very generally, the ``Core`` class enforces a couple of constraints that can be very useful: * A ``Core`` is a 2-D arrangement of :py:class:`armi.reactor.assemblies.Assembly` objects. * Each ``Assembly`` is a 1-D arrangement of :py:class:`armi.reactor.blocks.Block` objects. * Blocks are :py:class:`armi.reactor.composites.Composite` objects with some extra parameter bindings, utility functions, and other implementation details that let them play nicely with their containing ``Assembly``. In many scenarios, one wants to access specific assemblies or blocks from a core. There are a few ways to get the objects that you're interested in: * The `r.core.childrenByLocator` dictionary maps :py:class:`armi.reactor.grids.IndexLocation` objects to whichever assembly is at that location. For example :: >>> loc = r.core.spatialGrid[i, j, 0] >>> a = r.core.childrenByLocator[loc] To access the ``k`` -th block in an assembly, try:: >>> b = a[k] * `r.core.getAssemblies()` loops through all assemblies in the core for when you need to do something to all assemblies. Parameters ---------- One of the main benefits to ARMI is that it enables simple interfaces to extract data from the reactor, do something with it, and add new results to the reactor. This enables specialized developers to write code that uses ARMI as input and output. Most data is stored in ARMI as :py:mod:`~armi.reactor.parameters`. Most parameters will become persistent, meaning they will be saved to the database during database interactions, and therefore it will also be loaded when a database is loaded. Details of the use and design can be found at :py:mod:`~armi.reactor.parameters`. Converters ---------- The :py:mod:`~armi.reactor.converters` subpackage contains a variety of utilities that can convert a reactor model in various ways. Some converters change designs at the block level, adjusting pin dimensions or fuel composition. Others adjust the reactor geometry at large, changing a 1/3-symmetric model to a full core, or changing a hexagonal geometry to a R-Z geometry. Converters are used for parameter sweeps as well as during various physics operations. For example, some lattice physics routines convert the full core to a 2D R-Z model and compute flux with thousands of energy groups to properly capture the spectral-spatial coupling in a core/reflector interface. The converters are used heavily in these operations. Blueprints ---------- As seen in the User Guide, :py:mod:`~armi.reactor.blueprints` are how reactor models are defined. During a run, they can be used to create new instances of reactor model pieces, such as when a new assembly is fabricated during a fuel management operation in a later cycle. Operators ========= Operators conduct the execution sequence of an ARMI run. They basically contain the main loop. When any operator is instantiated, several actions occur: 1. Some environmental detail is printed out, 2. A Reactor object is instantiated 3. Loading and geometry input files are processed and the reactor object is populated with assemblies, 4. The **interfaces** are instantiated and placed in the **Interface Stack** during the :py:meth:`createInterfaces method<armi.operators.Operator.createInterfaces>` call, 5. The ``interactInit`` method is called on all interfaces, and 6. Restart information is processed (if this is a restart run). After that, depending on the type of Operator at hand, one of several operational loops will begin via ``operate()``. Operator types are chosen by the ``runType`` setting, which is featured on the first tab of the ARMI GUI. The Standard Operator --------------------- The two primary types of operators are the Standard Operator (along with its parallel version, the :py:class:`OperatorMPI <armi.operators.OperatorMPI>`), and the :py:class:`OperatorSnapshots <armi.operators.OperatorSnapshots>`. The former runs a typical operational loop, which calls all the interfaces through their interaction hooks in a sequential manner, marching from beginning-of-life through the number of cycles requested. This is how most quasistatic fuel cycle calculations are performed, which inform much of the analysis done during reactor design. The main code for this loop is found in the :py:meth:`mainOperate method <armi.operators.Operator.mainOperate>`. This operator supports restart/continuation of past runs from an arbitrary time step. The Snapshots Operator ---------------------- Alternatively, OperatorSnapshots is designed to allow for additional analyses at specific time steps. It simply loops through all snapshots that have been requested via the Snapshot Request functionality (Lists -> Edit snapshot requests in the GUI). At each snapshot request, the state is loaded from a previous case, as determined by the ``reloadDBName`` setting and then the BOC, EveryNode, and EOC interaction hooks are executed from all the interfaces. Snapshots are intended to analyze an exact reactor configuration. Therefore, interfaces which would significantly change the reactor configuration (such as Fuel management, and depletion) are disabled. The Interface Stack ------------------- *Interfaces* (:py:class:`armi.interfaces.Interface`) operate upon the Reactor Model to do analysis. They're designed to allow expansion of the code in a natural and well-organized manner. Interfaces are useful to link external codes to ARMI as well for adding new internal physics into the rest of the system. As a result, very many aspects of ARMI are contained within interfaces. The flow of any ARMI calculation depends on the order of the interfaces, which is set at initialization according to the user settings and the corresponding ``ORDER`` attributes in interface modules. The collection of the interfaces is known as the **Interface Stack** and is prominently featured at the beginning of the standard output of each run, like this:: [R 0] ---------------------------------------------------------- [R 0] *** Interface Stack Report *** [R 0] NUM TYPE NAME ENABLED BOL EOL ORDER [R 0] ---------------------------------------------------------- [R 0] 00 Main main Yes No Reversed [R 0] 01 Software Testing tests Yes No Reversed [R 0] 02 ReportInterface report Yes No Reversed [R 0] 03 FuelHandler fuelHandler Yes No Normal [R 0] 04 Depletion depletion Yes Yes Normal [R 0] 05 MC2-2 mc2 Yes No Normal [R 0] 06 DIF3D dif3d Yes No Normal [R 0] 07 Thermo thermo Yes No Normal [R 0] 08 OrificedOptimized orificer Yes Yes Normal [R 0] 09 AlchemyLite alchemyLite Yes No Normal [R 0] 10 Alchemy alchemy Yes No Normal [R 0] 11 Economics economics Yes No Normal [R 0] 12 History history Yes No Normal [R 0] 13 Database database Yes Yes Normal [R 0] ---------------------------------------------------------- Any interface that exists on the interface stack is accessible from the ``operator`` or from any other interface object through the :py:meth:`getInterface method <armi.operators.Operator.getInterface>`. Interface Interaction Hooks --------------------------- Various interfaces need to interact with ARMI at various times. The point at which routines are called during a run set by developers in interface *hooks*, as seen below. At each point in the flow chart, interfaces are interacted with one-by-one as the interface stack is traversed in order. .. figure:: /.static/armi_general_flowchart.png :align: center The computational flow of the interface hooks For example, input checking routines would run at beginning-of-life (BOL), calculation modules might run at every time node, etc. To accommodate these various needs, interface hooks include: * :py:meth:`interactInit <armi.interfaces.Interface.interactInit>` occurs right after all interfaces are initialized. * :py:meth:`interactBOL <armi.interfaces.Interface.interactBOL>` -- Beginning of life. Happens once as the run is starting up. * :py:meth:`interactBOC <armi.interfaces.Interface.interactBOC>` -- Beginning of cycle. Happens once per cycle. * :py:meth:`interactEveryNode <armi.interfaces.Interface.interactEveryNode>` -- Happens after every node step/flux calculation. * :py:meth:`interactEOC <armi.interfaces.Interface.interactEOC>` -- End of cycle. * :py:meth:`interactEOL <armi.interfaces.Interface.interactEOL>` -- End of life. * :py:meth:`interactError <armi.interfaces.Interface.interactError>` -- When an error occurs, this can run to clean up or print debugging info. * :py:meth:`interactCoupled <armi.interfaces.Interface.interactCoupled>` -- Happens after every node step/flux calculation, if tight physics coupling is active. * :meth:`~armi.interfaces.Interface.interactRestart` -- Happens when restarting from a previous run. Called prior to :meth:`~armi.interfaces.Interface.interactBOL` These interaction points are optional in every interface, and you may override one or more of them to suit your needs. You should not change the arguments to the hooks. Each interface has a ``enabled`` flag. If this is set to ``False``, then the interface's hook code will not be called even though the interface exists in the problem. This is useful for interfaces that use code from other interfaces. For example, if ``subchan`` is activated, it still uses some code in the ``thermo`` module to compute the fuel temperatures, so the ``thermo`` interface must be available in a ``getInterface`` call. Adding a new interface ---------------------- When using the Operators that come with ARMI, Interfaces are discovered using the :py:mod:`Plugin API <armi.plugins>` and inserted into the interface stack during the :py:meth:`createInterfaces <armi.operators.operator.Operator.createInterfaces>` method. How interfaces get called ------------------------- The hooks of interfaces are called during the main loop in :py:meth:`armi.operators.Operator.mainOperate`. There are a few special operator calls in there to methods like :py:meth:`armi.operators.Operator.interactAllBOL` that loop through the interface stack and call each enabled interface's ``interactBOL()`` method. If you override ``mainOperate`` in a custom operator, you will need to add these calls as deemed necessary to have the interfaces work properly. To use interfaces in parallel, please refer to :py:mod:`armi.mpiActions`. Plugins ======= Plugins are higher-level objects that can add things to the simulations like Interfaces, settings definitions, parameters, validations, etc. They are documented in :ref:`armi-app-making` and :py:mod:`armi.plugins`. Entry Points ------------ ARMI has a set of :py:mod:`Entry Points <armi.cli.entryPoint.EntryPoint>` that can run cases, launch the GUI, and perform various testing and utility operations. When you invoke ARMI with ``python -m armi run``, the ``__main__.py`` file is loaded and all valid Entry Points are dynamically loaded. The proper entry point (in this case, :py:class:`armi.cli.run.RunEntryPoint`) is invoked. As ARMI initializes itself, settings are loaded into a :py:class:`Settings <armi.settings.caseSettings.Settings>` object. From those settings, an :py:class:`Operator <armi.operators.operator.Operator>` subclass is built by a factory and its ``operate`` method is called. This fires up the main ARMI analysis loop and its interface stack is looped over as indicated by user input. ================================================ FILE: doc/developer/index.rst ================================================ ############## Developer Docs ############## This guide will get you started as an ARMI developer. It will teach you how to develop within ARMI and also guide you through some of the structure of the ARMI code. The intended audience for this section is reactor design engineers and computer scientists who want to integrate ARMI into their workflow and/or enhance ARMI for the community. ------------- .. toctree:: :maxdepth: 2 :numbered: :glob: guide making_armi_based_apps entrypoints parallel_coding testing documenting profiling * ================================================ FILE: doc/developer/making_armi_based_apps.rst ================================================ .. _armi-app-making: ********************** Making ARMI-based Apps ********************** Loading a reactor into the ARMI Framework is just the first step in pushing the envelope of reactor design and analysis. Activating a powerful collection of plugins and interfaces to automate your work is the next step to unlocking ARMI's potential. .. admonition:: Heads up A full tutorial on :ref:`armi-make-first-app` is here. To really make ARMI your own, you will need to understand a couple of concepts that enable developers to adapt and extend ARMI to their liking: * **Plugins**: An ARMI plugin is a collection of code that registers new functionality with the ARMI Framework. This can include new Interfaces, Settings, Parameter definitions, custom Components, Materials, Operators, and others. For a more complete reference, see the :py:mod:`Plugin API <armi.plugins>` documentation. It is typical for a plugin to provide related components to some specific type of physics or a specific external physics code or the like. Keeping the scope of a plugin limited helps users to understand where all of their settings and interfaces and parameters are coming from. * **ARMI-Based Applications**: A collection of plugins, along with application-specific customizations, working together with the ARMI Framework constitutes an "ARMI-Based Application". As an example, the TerraPower proprietary tool for modeling and analyzing sodium-cooled fast reactors is just such an application. It is from an Application that ARMI gets its collection of active plugins, which in turn dictate much of the ARMI Framework's behavior. Both of these concepts are discussed in depth below. ARMI Plugins ============ An ARMI Plugin is the primary means by which a developer or qualified analyst can go about building specific capability on top of the ARMI Framework. Even some of the functionalities that ship with the Framework are implemented internally using the Plugin system! The :py:mod:`armi.plugins` module contains all of the plugin "hook" definitions and their associated documentation. It is recommended to peruse those docs before getting started to get an idea of what is available. Some implementation details --------------------------- Plugins are designed to make it easy to build a plugin by copy/pasting from an existing plugin. However, having a deeper understanding of what is going on may be useful. Feel free to skip this section. The plugin system is built on top of a Python library called `pluggy <https://github.com/pytest-dev/pluggy>`_. Unless you plan on doing development within the ARMI Framework itself, it is unlikely that you will need to be overly familiar with it, but understanding how it works may be beneficial. Looking at the code in :py:class:`armi.plugins.ArmiPlugin`, you might notice that all of the methods are decorated with ``@HOOKSPEC`` (short for "hook specification"); this is how the Framework itself defines the interfaces that a plugin implementation can provide. This is a feature of ``pluggy``. You might also notice that all of the methods are **static methods**. This is because we do not actually expect an instance of an ``ArmiPlugin``; rather, we currently only use the class as a namespace to collect whatever hook implementations a Plugin provides. While ``pluggy`` is happy with any Python namespace containing hook implementations (e.g. module, class, object, function, etc.), we chose to make a base ``ArmiPlugin`` class for a couple of reasons: - Wrapping the specifications in a class allows you to implement them in a subclass, which enables tools like ``ruff`` to check your work and complain early if you do certain things wrong. - While we assume all plugins are stateless (hence all ``@staticmethods``), we may introduce stateful/configurable plugins later on. Starting out with a base class will make this transition easier. Making your own Plugin ---------------------- To get started on your own plugin you will want to subclass the :py:class:`armi.plugins.ArmiPlugin` class, and implement whichever Plugin APIs that you want your Plugin to provide. Mark each of your implementations with an ``@armi.plugins.HOOKIMPL`` decorator. Take a look at :py:class:`armi.physics.neutronics.NeutronicsPlugin` for an example. Make sure that in your implementation, you follow any rules or guidelines that are provided in the docstring for that Plugin API method. Failure to do so will lead to bugs and crashes in any ARMI-based Application that might use your plugin. .. important:: We do not actually instantiate Plugin classes. Plugins are currently assumed to be stateless (notice that all of the ``@staticmethods`` on all of the hook specifications). See the above section for why. It is likely that your Plugin class itself is only the tip of the iceberg that is the functionality provided by it. All of the various Interfaces, Settings, Parameters, etc. that your Plugin exposes to the Framework will likely live in other modules, which are imported and returned through your hook implementations. Again, see the Neutronics Plugin as an example. All of the other code will need to accompany your Plugin class somehow in a cohesive package. Packaging Python projects is beyond the scope of this document, but see `this page <https://docs.python-guide.org/writing/structure/>`_ for some guidance. Once you have a plugin together, continue reading to see how to plug it into the ARMI Framework as part of an Application. ARMI-Based Applications ======================= On its own, ARMI doesn't *do* much. Plugins provide more functionality, but even they aren't particularly useful on their own either. The magic really happens when you collect a handful of Plugins and plug them into the ARMI Framework. Such a collection is called an **ARMI-Based Application**. Once you have a collection of Plugins that you want to use, creating an ARMI-based Application is very easy. Start by creating a subclass of the :py:class:`armi.apps.App` class, and write its ``__init__()`` function to register whichever plugins you need with the app's ``_pm`` ``PluginManager`` object. Calling the base :py:class:`armi.apps.App` will start you out with the default Framework Plugins, but you are free to discard any of these that you wish. Optionally, you can implement the :py:meth:`armi.apps.App.splashText` property to render a custom header to be printed whenever your application is used. Example: :: >>> class MyApp(armi.apps.App): ... def __init__(self): ... # Adopt the base Framework Plugins. After calling ... # __init__(), they are in self._pm. ... armi.apps.App.__init__(self) ... ... # Register our own plugins ... from myapp.pluginA import PluginA ... from myapp.pluginB import PluginB ... ... self._pm.register(PluginA) ... self._pm.register(PluginB) ... ... @property ... def splashText(self): ... return """ ... =============================== ... == My First ARMI Application == ... =============================== ... """ Once you have defined your ``App`` class, you need to configure the ARMI Framework to use it. To do this, call the :py:func:`armi.configure()` function, passing an instance of your ``App`` class as the only argument. It is usually best to do this in your application's ``__init__.py`` or ``__main__.py``. Notice that in :py:mod:`armi.__main__`, ARMI configures `itself` with the base :py:class:`armi.apps.App` class! Example: :: >>> import armi >>> armi.configure(MyApp()) ================================================ FILE: doc/developer/parallel_coding.rst ================================================ ********************* Parallel Code in ARMI ********************* ARMI simulations can be parallelized using the `mpi4py <https://mpi4py.readthedocs.io/en/stable/mpi4py.html>`_ module. You should go there and read about collective and point-to-point communication if you want to understand everything in-depth. The OS-level ``mpiexec`` command is used to run ARMI on, say, 10 parallel processors. This fires up 10 identical and independent runs of ARMI; they do not share memory. If you change the reactor on one process, the reactors don't change on the others. Never fear. You can communicate between these processes using the Message Passing Interface (MPI) driver via the Python ``mpi4py`` module. In fact, ARMI is set up to do a lot of the MPI work for you, so if you follow these instructions, you can have your code working in parallel in no time. In ARMI, there's the primary processor (which is the one that does most of the organization) and then there are the worker processors, which do whatever you need them to in parallel. MPI communication crash course ============================== First, let's do a crash course in MPI communications. We'll only discuss a few important ideas, you can read about more on the ``mpi4py`` web page. The first method of communication is called the ``broadcast``, which happens when the primary processor sends information to all others. An example of this would be when you want to sync up the settings object (``self.cs``) among all processors. An even more common example is when you want to send a simple string command to all other processors. This is used all the time to inform the workers what they are expected to do next. Here is an example:: from armi import context cmd = f"val{context.MPI_RANK}" if context.MPI_RANK == 0: # The primary node will send the string 'bob' to all others cmd = "bob" context.MPI_COMM.bcast(cmd, root=0) else: # These are the workers. # They receive a value and set it to the variable cmd context.MPI_COMM = comm.bcast(None, root=0) Note that the ``comm`` object is from the ``mpi4py`` module that deals with the MPI drivers. The value of cmd on the worker before and after the ``bcast`` command are shown in the table. +--------------+-------+--------+--------+--------+ | | Proc0 | Proc1 | Proc2 | Proc3 | +--------------+-------+--------+--------+--------+ | Before bcast | "bob" | "val1" | "val2" | "val3" | +--------------+-------+--------+--------+--------+ | After bcast | "bob" | "bob" | "bob" | "bob" | +--------------+-------+--------+--------+--------+ The second important type of communication is the ``scatter``/``gather`` combo. These are used when you have a big list of work you'd like to get done in parallel and you want to farm it off to a bunch of processors. To do this, set up a big list of work to get done on the primary. Some real examples are that the list contains things like run control parameters, assemblies, or blocks. For a trivial example, let's add a bunch of values in parallel. First, let's create 1000 random numbers to add:: import random workList = [(random.random(), random.random()) for _i in range(1000)] Now we want to distribute this work to each of the worker processors (and take one for the primary too, so it's not just sitting around waiting). This is what ``scatter`` will do. But ``scatter`` requires a list that has length exactly equal to the number of processors available. You have some options here. Assuming there are 10 CPUs, you can either pass the first 10 values out of the list and keep sending groups of 10 values until they are all sent (multiple sets of transmissions) or you can split the data up into 10 evenly-populated groups (single transmission to each CPU). This is called *load balancing*. ARMI has utilities that can help called :py:func:`armi.utils.iterables.chunk` and :py:func:`armi.utils.iterables.flatten`. Given an arbitrary list, ``chunk`` breaks it up into a certain number of chunks and ``unchunk`` does the opposite to reassemble the original list after processing. Let's look at an example script:: """mpi_example.py""" from random import random from armi import context from armi.utils import iterables # Generate a list of random number pairs: [[(v1,v2),(v3,v4),...]] workList = [(random(), random()) for _i in range(1000)] if context.MPI_RANK == 0: # Primary Process: Split the data and send it to the workers balanced = iterables.split(workList, context.MPI_SIZE) myValsToAdd = context.MPI_COMM.scatter(balanced, root=0) else: # Worker Process: Receive data, pass a dummy value to scatter myValsToAdd = context.MPI_COMM.scatter(None, root=0) # All processes do their bit of this work (adding) results = [] for num1, num2 in myValsToAdd: results.append(num1 + num2) # All processes call gather to send their results back to the # root process. (The result lists above are simply added to make # one list with MPI_SIZE sub-lists.) allResultsLoadBalanced = context.MPI_COMM.gather(results, root=0) # Primary Process: Flatten the multiple lists # (from each process), and sum them. if context.MPI_RANK == 0: # Flatten the MPI_SIZE number of sub lists into one list allResults = iterables.flatten(allResultsLoadBalanced) # Sum the final list, and print the result print("The total sum is: {0:10.5f}".format(sum(allResults))) Remember that this code is running on all processors. So it's just the ``if rank == 0`` statements that differentiate between the primary and the workers. To really understand what this script is doing, try to run it in parallel and see what it prints out:: mpiexec -n 4 python mpi_example.py MPI Communication within ARMI ============================= Now that you understand the basics, here's how you should get your :py:class:`armi.interfaces.Interface` to run things in parallel in ARMI. You don't have to worry too much about the ranks, etc. because ARMI will set that up for you. Basically, the interfaces are executed by the primary node unless you say otherwise. All workers are stalled in an ``MPI.bcast`` waiting for your command! The best coding practice is to create an :py:class:`~armi.mpiActions.MpiAction` subclass and override the :py:meth:`~armi.mpiActions.MpiAction.invokeHook` method. `MpiActions` can be broadcast, gathered, etc. and within the :py:meth:`~armi.mpiActions.MpiAction.invokeHook` method have ``o``, ``r``, and ``cs`` attributes. .. warning:: When communicating raw Blocks or Assemblies all references to parents are lost. If a whole reactor is needed use ``DistributeStateAction`` and ``syncMpiState`` (shown in last example). Additionally, note that if a ``self.r`` exists on the ``MpiAction`` prior to transmission it will be removed when ``invoke()`` is called. If you have a bunch of blocks that you need independent work done on, always remember that unless you explicitly MPI transmit the results, they will not survive on the primary node. For instance, if each CPU computes and sets a block parameter (e.g. ``b.p.paramName = 10.0)``, these **will not** be set on the primary! There are a few mechanisms that can help you get the data back to the primary reactor. .. note:: If you want similar capabilities for objects that are not blocks, take another look at :py:func:`armi.utils.iterables.chunk`. Example using ``bcast`` ----------------------- Some actions that perform the same task are best distributed through a broadcast. This makes sense for if your are parallelizing code that is a function of an individual assembly, or block. In the following example, the interface simply creates an ``Action`` and broadcasts it as appropriate:: from armi import context class SomeInterface(interfaces.Interface): def interactEverNode(self, cycle, node): action = BcastAction() context.MPI_COMM.bcast(action) results = action.invoke(self.o, self.r, self.cs) # allResults is a list of len(self.r) for aResult in results: a.p.someParam = aResult class BcastAction(mpiActions.MpiAction): def invokeHook(self): # do something with the local self.r, self.o, and self.cs. # in this example... do stuff for assemblies. results = [] for a in self.mpiIter(self.r): results.append(someFunction(a)) # in this usage, it makes sense to gather the results allResults = self.gather(results) # Only primary node has allResults if allResults: # Flatten results returns the original order after # having made lists of mpiIter results. return self.mpiFlatten(allResults) .. warning:: Currently, there is no guarantee that the reactor state is the same across all nodes. Consequently, the above code should really contain a ``mpiActions.DistributeStateAction.invokeAsMaster`` call prior to broadcasting the ``action``. See example below. Example using ``scatter`` ------------------------- When trying two independent actions at the same time, you can use ``scatter`` to distribute the work. The following example shows how different operations can be performed in parallel:: class SomeInterface(interfaces.Interface): def interactEveryNode(self, cycle, node): actions = [] # pseudo code for getting a bunch of different actions for opt in self.cs['someSetting']: actions.append(factory(opt)) distrib = mpiActions.DistributeStateAction() distrib.broadcast() # this line any existing reactor on workers to ensure consistency distrib.invoke(self.o, self.r, self.cs) # the 3 lines above are equivalent to: # mpiActions.DistributeStateAction.invokeAsMaster(self.o, self.r, self.cs) results = mpiActions.runActions(self.o, self.r, self.cs, actions) # do something to apply the results. for bi, b in enumerate(self.r.getBlocks(): b.p.what = extractBlockResult(results, bi) def factory(opt): if opt == 'WHAT': return WhatAction() class WhatAction(mpiActions.MpiAction): def invokeHook(self): # does something # somehow gathers results. return self.gather(results) A simplified approach --------------------- Transferring state to and from a Reactor can be complicated and add a lot of code. An alternative approach is to ensure that the reactor state is synchronized across all nodes, and then use the reactor instead of raw data:: class SomeInterface(interfaces.Interface): def interactEveryNode(self, cycle, node): actions = [] # pseudo code for getting a bunch of different actions for opt in self.cs['someSetting']: actions.append(factory(opt)) mpiActions.DistributeStateAction.invokeAsMaster(self.o, self.r, self.cs) results = mpiActions.runActions(self.o, self.r, self.cs, actions) class WhatAction(mpiActions.MpiAction): def invokeHook(self): # do something for a in self.generateMyObjects(self.r): a.p.someParam = func(a) for b in a: b.p.someParam = func(b) # notice we don't return an value, but instead just sync # the state, which updates the primary node with the # params that the workers changed. self.r.syncMpiState() .. warning:: Only parameters that are set are synchronized to the primary node. Consequently if a mutable parameter (e.g. ``b.p.depletionMatrix`` which is of type ``BurnMatrix``) is changed, it will not natively be synced. To flag it to be synced, ``b.p.paramName`` must be set, even if it is to the same object. For this reason, setting parameters to mutable objects should be avoided. Further, if the mutable object has a reference to a large object, such as a composite or cross section library, it can be very computationally expensive to pass all this data to the primary node. See also: :py:mod:`armi.reactor.parameters` ================================================ FILE: doc/developer/profiling.rst ================================================ ************** Profiling ARMI ************** Python in slow, so it's important to profile code to keep it running reasonably quickly. Using the basic `Python profiler <https://docs.python.org/3/library/profile.html>`_ is the best way to get started. Once you have a ``.stats`` file, however, we highly recommend using a visualizer. The profiler visualizer `gprof2dot <http://code.google.com/p/jrfonseca/wiki/Gprof2Dot#Windows_users>`_ is an invaluable tool for taking a look at the profiler traces. You have to install graphvis also, which contains the ``dot`` program. The basic commands to run are:: python -m gprof2dot -f pstats <mystatsfile>.stats | dot -Tpng -o <mydesiredimagename>.png This produces images like this: .. figure:: /.static/buildMacros.png :align: center An example of the profiler output rendered to a png. ================================================ FILE: doc/developer/standards_and_practices.rst ================================================ .. _armi-stds: ********************************** Standards and Practices for Coding ********************************** The ARMI coding standards are a set of guidelines for helping to create a more consistent and clear code base. Subpart 2.7 402 of `NQA-1 <http://nqa-1.com/files/NQA-1%20Nuclear%20Quality%20Manual.pdf>`_ states, "Software design verification shall evaluate... the design approach and ensure internal completeness, consistency, clarity and correctness." While these are required by NQA-1, the idea is that an ARMI developer, who is familiar with these coding standards, should be able to jump from one module to another without changing their coding style. .. tip :: :class: warning The overall theme is: **Balance clarity with conciseness.** Just try to be as clear as possible, while using as few words as possible. .. important :: Most of the guidelines can be broken, but all deviations need to be justified. It is up to the code reviewers to determine whether the justification was adequate. Developers and reviewers should consult the standards/guidelines while writing and reviewing code to ensure consistency. Code reviewers should make sure to be familiar with the standards, so that their comments are consistent with other reviewers. Code formatting with ruff ========================= ARMI uses the Python code formatter `ruff <https://docs.astral.sh/ruff/>`_. So while developing code in ARMI it is important to remember to us the ``ruff`` formatter before pushing any code to the repo. All changes pushed to ARMI on github.com will be automatically checked to see if they conform to the ``ruff`` code formatter standards. The ``ruff`` formatter provides 100% consistency in ARMI for: whitespace, line length, trailing commas, and string formatting. And it is easy to run on the command line: .. code-block:: bash ruff format . Code linting with ruff ====================== ARMI also uses the amazing Python linter `ruff <https://docs.astral.sh/ruff/>`_. Again, any new code you add must have zero ``ruff`` warnings or errors. This is very easy to run on the command line: .. code-block:: bash ruff check . --fix Remove commented-out code ========================= If you were testing code and you commented out a block, delete it before sending it in for code review/production. If you want to see the old code later, it will still be in the Git history. Avoid hard-coding run parameters ================================ Use the global settings object ``self.cs`` for most user-setable parameters that determine the run environment, etc. This will help keep the amount of repeated code down. Also, do not **ever** code the following things into the code: user names, passwords, or file paths on your computer. Use environmental variables where possible and user-configurable settings elsewhere. You can also use the ``armi.ROOT`` variable (for the active code directory) or ``armi.RES``, and some other useful root-level variables. Avoid the global keyword ======================== At all costs, avoid use of the ``global`` keyword in your code. Using this keyword can, and usually does, create extremely fragile code that is nigh-impossible to use a debugger on. Especially as part of object-oriented programming, this is extremely lazy design. A careful reader might notice that there are several files in ARMI that are currently using the ``global`` keyword. These are all schedule for a refactor to remove the use of ``global``. But, for now, changing the code would cause more annoyance for the ARMI ecosystem userbase than fixing it would. Still, all of those instance in ARMI will be fixed soon. No new uses of ``global`` will make it through the ARMI pull request process. Naming conventions ================== .. note:: There is a good argument to make that ARMI's use of ``camelCase`` makes the code less readable than if ARMI used ``snake_case``. Unfortunately, making the switch now would affect such a large percentage of the API that it would be more hassle for our user base than it is worth to change. Use meaningful names -------------------- Use descriptive names for variables, functions, methods, classes, and files. This might mean using a longer name like ``correlationMatrix`` instead of a shorter one like ``cm``. General conventions ------------------- Here are some general naming guidelines that are always applicable, but particularly applicable to public classes, functions, and methods and their signatures (the signature includes the parameters): * Variables that you designate as unused should be prefaced with an underscore (``_``). * Do not use Python `reserved keywords <https://realpython.com/lessons/reserved-keywords/>`_ as variable names. * Try to use names that are pronounceable. (Well-established variable names from equations are acceptable.) * Keep names concise and expressive. (An exception is test method names, which may be longer and more descriptive.) * Avoid abbreviations and acronyms, unless they are well understood by subject-matter experts (e.g. DB for database, XS for cross-sections, BU for burn up). When using acronyms or abbreviations with ``camelCase`` or ``PascalCase``: * Use the same case for two-letter acronyms/abbreviations (e.g. ``diskIO``, ``ioOperation``) * Use different case for acronyms/abbreviations with more than two characters (e.g. ``renderHtml()``, ``path``) For consistency, use the following naming conventions: package names Python packages, i.e. folders with an ``__init__.py``, **shall** use ``camelCase``. module names Python modules, i.e. python files, **shall** use ``camelCase``. **Caveat:** Test modules are prefixed with ``test_``. module constants Module-level "constants" **shall** be all capitals with an underscore separating words. function names Functions **shall** use ``camelCase``. If the function is only intended to be used within that module, prefix it with a single leading underscore to indicate it is "module protected." variable names Use ``camelCase``. In the odd scenario that the variable is not used (e.g. a method returns a tuple and you only want the first item), prefix it with a single leading underscore to indicate it is "module protected." class names Classes **shall** use ``PascalCase``. If the class is only intended to be inherited by other classes within the module, prefix the class name with an underscore to indicate it is "module protected." class attribute, instance attribute and method names Use ``camelCase``. If the method is only intended to be used within that module, prefix it with a single leading underscore to indicate it is "class protected." Naming quick-reference ---------------------- .. list-table:: :widths: 40 30 30 :header-rows: 1 * - Item to be named - Public - Private * - package (folder with an ``__init__.py``) - ``packageName`` - N/A * - module (a ``.py`` file) - ``moduleName`` - N/A * - module constant - ``SPEED_OF_LIGHT_IN_METERS_PER_SECOND`` - ``_ONE_OVER_PI`` * - method or function - ``doSomeAction()`` - ``_doSomeAction()`` * - class or instance attribute - ``assemblies`` - ``_assemblies`` * - variable names - ``linearHeatGenerationRate`` - ``_unusedDescription`` There are not "private" variables, use this for an unused variable. Common naming conventions within ARMI ------------------------------------- Single character variable names are not usually "clear" or "concise"; however, the following variables are a well-established convention within ARMI and should be used by developers: * ``r`` when referring to a reactor, and * ``o`` when referring to a operator Other names are also consistently used throughout ARMI for specific objects: * ``cs`` when referring to a :py:class:``armi.settings.Settings`` class; this should not be confused with the ``.settings`` attribute of ``ArmiObject``. * ``lib`` when referring to a cross section library (would have been better as ``xsLib``) Prefer shorter methods ====================== A method should have one clear purpose. If you are writing a method that does one thing after the other, break it up into multiple methods and have a primary method call them in order. If your method is longer than 100 lines, see if you can't break it up. This does a few things: 1. It makes the code easier to read. 2. It makes the code chunks more reusable. 3. It makes the code easier to test. 4. It makes the code easier to profile, for performance. Avoid repeating code ==================== In other words, don't repeat yourself. (`D. R. Y. <https://en.wikipedia.org/wiki/Don't_repeat_yourself>`_). Repetitious code is harder to read, and harderd for others to update. If you ever find yourself copying and pasting code, consider pulling the repeated code out into its own function, or using a loop. Public methods should have docstrings ===================================== Always create the `proper docstrings <https://numpydoc.readthedocs.io/en/latest/example.html>`_ for all public functions and public classes. Unit tests ========== All ARMI developers are required to write unit tests. .. important :: If you add a new function to the code base, you are required to add unit tests to cover that function. ARMI uses the ``pytest`` library to drive tests, therefore tests need to be runnable from the commandline by ``python -m pytest armi``. Furthermore, for consistency: * Each individual unit test should take under 10 seconds, on a modern laptop. * All unit tests should be placed into a separate module from production code that is prefixed with ``test_``. * All unit tests should be written in object-oriented fashion, inheriting from ``unittest.TestCase``. * All test method names should start with ``test_``. * All test method names should be descriptive. If the test method is not descriptive enough, add a docstring. * Unit tests should have at least one assertion. Import statements ================= Python allows many variations on the import statement, including relative imports, renaming and others. We prefer: #. one import per line, #. no relative imports #. no periods #. explicit module/namespace usage Import ordering --------------- For consistency, import packages in this order: 1. Python built-in packages 2. External third-party packages 3. ARMI modules Place a single line between each of these groups, for example: .. code-block:: python :linenos: import os import math import numpy as np from matplotlib import pyplot from armi import runLog Don't create naked exceptions. ============================== When creating ``try``/``except`` blocks, a naked exception is when the ``except`` command is not followed by a specific exception type. Naked exceptions hide a lot of sins, particularly unexpected bugs. `This article <http://www.wilfred.me.uk/blog/2013/11/03/no-naked-excepts/>`_ explains the concept well, as well as a few exceptions to this general rule. Examples: Bad :: >>> try: >>> stuff() >>> except: >>> runLog.warning('Some error occurred in stuff().') Good (for one exception type) :: >>> try: >>> stuff() >>> except AttributeError: >>> runLog.warning('Some error occurred in stuff().') Good (for multiple exception types) :: >>> try: >>> stuff() >>> except (ZeroDivisionError, FloatingPointError): >>> runLog.warning('Some error occurred in stuff().') Data model ========== Any reactor state information that is created by an ``Interface`` should be stored in the ARMI data model. The goal is that given minimal information (i.e. case settings and blueprints) ARMI should be able to load an entire reactor simulation from a given database. If you add state data to your modeling that isn't stored in the reactor, or add new input files, you will break this paradigm and make everyone's life just a little bit harder. Input files =========== ARMI developers **shall** use one of the following well-defined, Python-supported, input file formats. .json JSON files are used for a variety of data-object representations. There are some limitations of JSON, in that it does not easily support comments. JSON is also very strict. .yaml YAML files are like JSON files but can have comments in them. General do's and don'ts ======================= Do not use ``print`` ARMI code should not use the ``print`` function; use one of the methods within ``armi.runLog``. Do not add new ``TODO`` statements to the repo. If your new ``TODO`` statement is important, it should be a GitHub Issue. Similarly, never mark the code with ``FIXME`` or ``XXX```; open a ticket. Do not link GitHub tickets or PRs in code. The idea in ARMI is that either something is worth documenting well in a docstring, or the docs, or it is not. And just linking a ticket or PR in a docstring is not helpful. ================================================ FILE: doc/developer/testing.rst ================================================ .. _armi-testing: ****************** ARMI Testing Tools ****************** ARMI has many useful tools to streamline tests in the plugins. Included here are some popular ones. If you are trying to write a new unit test, chances are something like it has been done before and you do not need to design it from scratch. Look around ARMI and other plugins for examples of tests. The ``armi.testing`` module is always a good place to start. Testing with runLog =================== Use Case: Test code that prints to stdout While there are some other mocking examples in ARMI, none are as heavily used as ``mockRunLogs``. ``mockRunLogs.BufferLog()`` is used to capture the ``runLog`` output instead of printing it. In `test_comparedb3.py <https://github.com/terrapower/armi/blob/49f357b2a92aaffaf883642f7b86fbe21b0e0272/armi/bookkeeping/db/tests/test_comparedb3.py>`_, there is a (simplified here) use case. A portion of the test for ``_diffSpecialData`` wants to confirm the below printout has happened, so it uses the ``getStdout()`` method to check that the expected printout exists. Example of ``mockRunLogs``: .. code-block:: python from armi.tests import mockRunLogs class TestCompareDB3(unittest.TestCase): # ... def test_diffSpecialData(self): dr = DiffResults(0.01) fileName = "test.txt" with OutputWriter(fileName) as out: with mockRunLogs.BufferLog() as mock: #... skip for clarity: create refData & srcData _diffSpecialData(refData, srcData, out, dr) self.assertEqual(dr.nDiffs(), 0) self.assertIn("Special formatting parameters for", mock.getStdout()) There are examples of this throughout ARMI. Search for ``BufferLog`` or ``getStdout`` in the code to find examples. Self-Cleaning Directories ========================= Use Case: Automatically cleans up tests that create files: .. code-block:: python from armi.utils.directoryChangers import TemporaryDirectoryChanger Two main uses of this class in testing: 1. Standalone test that calls code that creates something (`test_operators.py <https://github.com/terrapower/armi/blob/2bcb03689954ae39f3044f18a9a77c1fb7a0e63b/armi/operators/tests/test_operators.py#L237-L242>`_): .. code-block:: python def test_snapshotRequest(self, fakeDirList, fakeCopy): fakeDirList.return_value = ["mccAA.inp"] with TemporaryDirectoryChanger(): with mockRunLogs.BufferLog() as mock: self.o.snapshotRequest(0, 1) self.assertIn("ISOTXS-c0", mock.getStdout()) 2. Setup and teardown of a testing class, where all/most of the tests create something (`test_comparedb3.py <https://github.com/terrapower/armi/blob/2bcb03689954ae39f3044f18a9a77c1fb7a0e63b/armi/bookkeeping/db/tests/test_comparedb3.py#L36-L52>`_): .. code-block:: python class TestCompareDB3(unittest.TestCase): """Tests for the compareDB3 module.""" def setUp(self): self.td = TemporaryDirectoryChanger() self.td.__enter__() def tearDown(self): self.td.__exit__(None, None, None) def test_outputWriter(self): fileName = "test_outputWriter.txt" with OutputWriter(fileName) as out: out.writeln("Rubber Baby Buggy Bumpers") txt = open(fileName, "r").read() self.assertIn("Rubber", txt) Note that sometimes it is necessary to give the temporary directory change object a non-default root path: .. code-block:: python Include root argument THIS_DIR = os.path.dirname(__file__) # ... def test_something(): with TemporaryDirectoryChanger(root=THIS_DIR): # test something Load a Test Reactor =================== Use Case: You need a full reactor for a unit test .. warning:: This is computationally expensive, and historically over-used for unit tests. Consider whether mocking or BYO components (below) can be used instead. To get the standard ARMI test reactor, import this: .. code-block:: python from armi.reactor.tests.test_reactors import loadTestReactor This function will return a reactor object. And it takes various input arguments to allow you to customize that reactor: .. code-block:: python def loadTestReactor( inputFilePath=TEST_ROOT, customSettings=None, inputFileName="armiRun.yaml", ): So many interfaces and methods require an operator or a reactor, and ``loadTestReactor`` returns both. From there you can use the whole reactor or just grab a single ARMI object, like a `fuel block <https://github.com/terrapower/armi/blob/58b0e8198d2f8a217c1db84e97127adfe7e91c09/armi/reactor/tests/test_blocks.py#L3030-L3036>`_: .. code-block:: python _o, r = loadTestReactor( os.path.join(TEST_ROOT, "smallestTestReactor"), inputFileName="armiRunSmallest.yaml", ) # grab a pinned fuel block b = r.core.getFirstBlock(Flags.FUEL) If you need a full reactor for a unit test, always try to start with the ``smallestTestReactor.yaml`` shown above first. Your tests will run faster if you pick the smallest possible reactor that meets your needs. Less is more. Sidebar: Speed up Test Reactor Tests ------------------------------------ Maybe you do need an entire reactor for your unit test, but you don't need a very large one. In that case, ARMI comes with a few standard tools: #. ``from armi.testing import reduceTestReactorRings`` - Reduce the size of the test reactor you are using. #. ``from armi.testing import getEmptyCartesianReactor`` - Provides a test cartesian reactor with no assemblies or blocks inside. #. ``from armi.testing import getEmptyHexReactor`` - Provides a test hex reactor with no assemblies or blocks inside. Test Blocks and Assemblies ========================== Use Case: Your unit test needs some ARMI objects, but not a full test reactor. ARMI provides several helpful tools for generating simple blocks and assemblies for unit tests: * ``from armi.reactor.tests.test_assemblies import buildTestAssemblies`` - Two hex blocks. * ``from armi.reactor.tests.test_blocks import buildSimpleFuelBlock`` - A simple hex block containing fuel, clad, duct, and coolant. * ``from armi.reactor.tests.test_blocks import loadTestBlock`` - An annular test block. ================================================ FILE: doc/developer/tooling.rst ================================================ .. _armi-tooling: ************************** Tooling and Infrastructure ************************** Good Commit Messages ==================== The ARMI project follows a few basic rules for "good" commit messages: * The purpose of the message is to explain to the changes you made to a stranger 5 years from now. * Keep your writing short and to the point. * The first line of each commit must be shorter than 50 characters. * Commit messages should be active voice, present tense. * Multi-line comments are allowed, but make sure the second line of the commit is blank: .. code-block:: Adding this commit for REASONS. Here is some super important extra info. Oh, there is so much extra info. This section * is * totally * optional. Good Pull Requests ================== A good commit is like a sentence; it expresses one complete thought. In that context, a good Pull Request (PR) is like a paragraph; it contains a few sentences that contain one larger thought. A good PR is *not* a chapter or an entire book! It should not contain multiple independent ideas. One Idea = One PR ----------------- .. important :: If you *can* break a PR into smaller PRs, containing unrelated changes, please do. It is a discourtesy to your reviewers to make them review a PR with multiple, unrelated changes. It forces them to look at every line of diff in your PR and figure out which change it belongs to. They are busy people, and it will save them time and effort if your PR only has one main idea. If your PRs are smaller, you will notice a great increase in the quality of the reviews you get. Don't open until it is ready ---------------------------- .. important :: Wait until your PR is complete to open it. Your PR isn't complete when the code works, it is complete when the code is polished and all the tests are written and working. The idea here is: as soon as you open a PR, people will start spending their time looking at it. And their time is valuable. Even though GitHub allows you to `open a Draft PR <https://github.blog/2019-02-14-introducing-draft-pull-requests/>`_, this is not the default option in ARMI. It should not be your workflow to open a Draft PR by default. We prefer to keep the PR list as short as possible. A good rule of thumb is: don't open a PR until you think it is ready for final review. Test It ------- .. important :: If a PR doesn't have any changes to testing, it probably isn't complete. Unless a PR is just documentation or linting, it almost certainly needs testing to be complete. For example: * If a PR adds new code, that code needs new tests to prove it is working. * If a PR changes existing code, there needs to be test changes to prove the code still works. * If a PR fixes a bug, there needs to be a test to prove the bug is fixed. If the changes in the PR are worth the time to make, they are worth the time to test. Help your reviewer by proving your code works. Document It ----------- .. important :: If it isn't documented, it doesn't exist. We auto-document the API, so don't worry about that. But when it comes to documentation, write it for somebody who is new to the code base 3 years from now, who needs to understand it in nitty- gritty detail to fix a bug without you. Think about variable names, comments, and docstrings. Also consider (if you are making a major change) that you might be making something in the docs out-of- date. Watch for Requirements ---------------------- When you are touching code in ARMI, watch out for the docstrings in the methods, classes, or modules you are editing. These docstrings might have bread crumbs that link back to requirements. Such breadcrumbs will look like: .. code-block:: """ .. test: This is a requirement test breadcrumb. .. impl: This is an requirement implementation breadcrumb. """ If you touch any code that has such a docstring, even at the top of the file, you are going to be responsible for not breaking that code/functionality. And you will be required to explicitly call out that you touch such a code in your PR. Your PR reviewer will take an extra look at any PR that touches a requirement test or implementation. And you will need to add a special note in your PR description, under a field called "One-line Impact on Requirements". This note can be as long as it needs to be, but can only be on one line. Packaging and dependency management =================================== There are many ways to manage and package a Python project. We try to centralize as much of this as possible in a ``pyproject.toml``, following existing conventions. In particular, we follow `the official Python packaging guidance <https://packaging.python.org/en/latest/>`_. pyproject.toml -------------- As much as possible, the ARMI team will try to centralize our installation and build systems through the top-level ``pyproject.toml`` file. The only exception will be our documentation, which has much customization done through the Sphinx ``doc/conf.py`` file. The packages listed in the ``install_requires`` argument to ``setup()`` are meant to express, as abstractly as possible, the packages that need to be installed **somehow** for the package to work. In addition, ``extras_require`` are used to specify other packages that are not strictly required, but if installed enable extra functionality, like unit testing or building documentation. Third-Party Licensing --------------------- Be careful when including any dependency in ARMI (say in the ``pyproject.toml`` file) not to include anything with a license that supersedes our Apache license. For instance, any third-party Python library included in ARMI with a GPL license will make the whole project fall under the GPL license. But a lot of potential users of ARMI will want to keep some of their work private, so we can't allow any GPL tools. For that reason, it is generally considered best-practice in the ARMI ecosystem to only use third- party Python libraries that have MIT or BSD licenses. Releasing a New Version of ARMI =============================== We use the common ``major.minor.bump`` version scheme where a version string might look like ``0.1.7``, ``1.0.0``, or ``12.3.123``. Each number has a specific meaning: * ``major`` - Revved for major milestones of the ARMI project. * ``minor`` - Revved for the usual release, real feature work completed. * ``bump`` - Revved for very small releases, but still well-tested with a stable API. **NOTE**: Changes to documentation or testing probably do not deserve a version bump. **Any change to a major or minor version is considered a release.** Only a core member of the ARMI team may release a new version, or add a tag of any kind to the repository. The rule is *the only tags in the ARMI repo are for official versions*. If you want to release a version of ARMI, you will need admin privileges to multiple TerraPower repos on GitHub. Every release should follow this process: 1. Ensure all unit tests pass and the documentation is building correctly. 2. Create a release PR: - Bump the ``version`` string in ``pyproject.toml``. - Now that the release is done, hard-copy the SCR information into the last releases RST file, so we don't keep regenerating it: ``doc/qa_docs/scr/x.y.rst``. - Update the commit in ``doc/qa_docs/scr/latest_scr.rst`` to the release commit. 3. Tag the commit after it goes into the repo: - From this commit: ``git tag -a 1.0.0 -m "Release v1.0.0"`` - Or from another commit: ``git tag -a 1.0.0 <commit-hash> -m "Release v1.0.0"`` - Pushing to the repo: ``git push origin 1.0.0`` - **NOTE** - The ONLY tags in the ARMI repo are for official version releases. 4. Also add the release notes on `the GitHub UI <https://github.com/terrapower/armi/releases>`__. 5. Follow the instructions `here <https://github.com/terrapower/terrapower.github.io>`_ to archive the new documentation. 6. Tell everyone! Logging with runLog =================== ARMI provides a logging tool, ``runLog``, to be used in place of ``print`` for all logging during a simulation. It is very easy to use: .. code-block:: python from armi import runLog runLog.debug("This will only be seen if you run in debug mode.") runLog.info("Default log level.") runLog.error("The run will die, or the results are invalid.") .. note:: Calling ``runLog.error()`` is not the same as calling Python's ``raise error``; a log statement does not kill a run, or raise an error, it just puts some text in the log. When an ARMI simulation is run, it will be run at a particular log level. All log messages that are at or above that log level will be seen during the simulation and in the final log files. To control the log level of an ARMI run, you use the setting ``verbosity`` in your settings file. You will probably be running ARMI in a parallel mode, and if you want the child processes to have a different log level than the main process, you can set ``branchVerbosity`` to the desired verbosity of all the child processes. For reference, here are the log levels that ARMI supports: .. list-table:: :widths: 20 20 60 :header-rows: 1 * - Level - Value - When to Use * - debug - 10 - This will only be seen if the simulation is run in debug mode. * - extra - 15 - More detailed than will normally be seen in a usual simulation. * - info - 20 - Use only for things that important enough to be visible during every normal simulation. * - important - 25 - More important than the default log level, but not a problem or issue. * - prompt - 27 - RESERVED for the ARMI CLI. * - warning - 30 - Use ONLY for issues that may or may not invalidate the simulation results. * - error - 40 - Use ONLY for problems that halt the program or invalidate the simulation results. * - header - 100 - Use ONLY to define major sections in the log files. Blocking Duplicate Logs ----------------------- Sometimes you want to add a log message, but based on program logic it might pop up in the final log file multiple times, even thousands of times. And probably you do not want that. Happily, the ``runLog`` tool provides a simple argument that will stop a single log line from being logged more than once. Here is a (silly) example of a heavily duplicate log message: .. code-block:: python for _i in range(1000): runLog.warning("Something wicked this way comes.") That log message gets printed 1,000 times, but we can ensure it is only printed once: .. code-block:: python for _i in range(1000): runLog.warning("Something wicked this way comes.", single=True) Obviously, this will not be useful in every scenario. But it is a handy tool to clean up your log files. Module-Level Logging -------------------- The ``runLog`` tool also allows for you to log one module differently from the rest of the code base. For instance, you could set the log level to "debug" in just one Python file, to help testing during development. That functionality is provided by what might look like a bare Python logging import, but is actually calling the same underlying ``armi`` logging tooling: .. code-block:: python import logging runLog = logging.getLogger(__name__) In either case, you can then log using the same, easy interface: .. code-block:: python runLog.info('Normal stuff.') runLog.error('Oh no!') Finally, you can change the logging level in the above scenario by doing: .. code-block:: python runLog.setVerbosity(logging.DEBUG) # or runLog.setVerbosity('debug') ================================================ FILE: doc/gallery-src/README.rst ================================================ ####### Gallery ####### This section demonstrates some capabilities and offer quick reference for common use cases. Tutorials with more explanatory narratives are available in :doc:`/tutorials/index`. .. tip:: Many of the examples build ARMI objects from test cases to be concise. You are expected to define your own objects for your reactors in inputs and then use these examples on things relevant to you. ================================================ FILE: doc/gallery-src/analysis/README.rst ================================================ Analysis -------- This section contains various examples for performing analyses using the ARMI framework's data model. ================================================ FILE: doc/gallery-src/analysis/run_blockMcnpMaterialCard.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Write MCNP Material Cards ========================= Here we load a test reactor and write each component of one fuel block out as MCNP material cards. Normally, code-specific utility code would belong in a code-specific ARMI plugin. But in this case, the need for MCNP materials cards is so pervasive that it made it into the framework """ from armi import configure from armi.reactor.flags import Flags from armi.reactor.tests import test_reactors from armi.utils.densityTools import formatMaterialCard # configure ARMI configure(permissive=True) _o, r = test_reactors.loadTestReactor() bFuel = r.core.getBlocks(Flags.FUEL)[0] for ci, component in enumerate(bFuel, start=1): ndens = component.getNumberDensities() # convert nucName (str) keys to nuclideBase keys ndensByBase = {r.nuclideBases.byName[nucName]: dens for nucName, dens in ndens.items()} print("".join(formatMaterialCard(ndensByBase, matNum=ci))) ================================================ FILE: doc/gallery-src/analysis/run_hexBlockToRZConversion.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Hex block to RZ geometry conversion =================================== Often, parts of a reactor model must be transformed to a different geometry in order to perform a certain type of physics calculation. For example, in some fast reactor lattice physics calculations, detailed descriptions of control assemblies must be mapped to equivalent 1-D cylindrical models. This example shows how a control assembly defined in full hex-pin detail can be automatically converted to an equivalent 1-D RZ case, including an outer ring of fuel to drive the case. This conversion includes rings for control material, gap, cladding (on both sides of each ring of control material), coolant, duct, and fuel. The color of the plot is proportional to the mass density. Given this transformation, a 1-D lattice physics solver can be executed to compute accurate cross sections. By automating these kinds of geometry conversions, ARMI allows core designers to maintain the design in real geometry while still performing appropriate approximations for efficient analysis. .. warning:: This uses :py:mod:`armi.reactor.converters.blockConverters`, which currently only works on a constrained set of hex-based geometries. For your systems, consider these an example and starting point and build your own converters as appropriate. """ from armi import configure from armi.reactor.converters import blockConverters from armi.reactor.flags import Flags from armi.reactor.tests import test_reactors # configure ARMI configure(permissive=True) _o, r = test_reactors.loadTestReactor() # fully heterogeneous bFuel = r.core.getBlocks(Flags.FUEL)[0] bControl = r.core.getBlocks(Flags.CONTROL)[0] converter = blockConverters.HexComponentsToCylConverter(sourceBlock=bControl, driverFuelBlock=bFuel, numExternalRings=1) converter.convert() converter.plotConvertedBlock() # partially heterogeneous converter = blockConverters.HexComponentsToCylConverter(sourceBlock=bFuel, ductHeterogeneous=True) converter.convert() converter.plotConvertedBlock() ================================================ FILE: doc/gallery-src/analysis/run_hexReactorToRZ.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Hex reactor to RZ geometry conversion ===================================== This shows how an entire reactor specified in full hex detail can be automatically converted to a 2-D or 3-D RZ case with conserved mass. .. warning:: This uses :py:mod:`armi.reactor.converters.geometryConverters`, which will only work on a constrained set of hex-based geometries. For your systems, consider these an example and starting point and build your own converters as appropriate. """ import math import matplotlib.pyplot as plt from armi import configure from armi.reactor.converters import geometryConverters from armi.reactor.tests import test_reactors from armi.utils import plotting # configure ARMI configure(permissive=True) o, r = test_reactors.loadTestReactor() kgFis = [a.getHMMass() for a in r.core] plotting.plotFaceMap(r.core, data=kgFis, labelFmt="{:.1e}") converterSettings = { "radialConversionType": "Ring Compositions", "axialConversionType": "Axial Coordinates", "uniformThetaMesh": True, "thetaBins": 1, "axialMesh": [50, 100, 150, 175], "thetaMesh": [2 * math.pi], } converter = geometryConverters.HexToRZConverter(o.cs, converterSettings) # makes new reactor in converter.convReactor converter.convert(r) figs = converter.plotConvertedReactor() plt.show() ================================================ FILE: doc/gallery-src/framework/README.rst ================================================ Framework --------- This section provides a range of examples for utilizing the ARMI framework and its data model to explore the state of a reactor. ================================================ FILE: doc/gallery-src/framework/run_blockVolumeFractions.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -*- coding: utf-8 -*- """ Computing Component Volume Fractions on a Block with Automatic Thermal Expansion ================================================================================ Given an :py:mod:`Block <armi.reactor.blocks.Block>`, compute the component volume fractions. Assess the change in volume of these components within the block as the temperatures of the fuel and structure components are uniformly increased. Note: Thermal expansion is automatically considered with material data defined within :py:mod:`materials <armi.materials>`. """ # ruff: noqa: E402 import collections import matplotlib.pyplot as plt from armi import configure configure(permissive=True) from armi.reactor.flags import Flags from armi.reactor.tests.test_blocks import buildSimpleFuelBlock from armi.utils import tabulate def writeInitialVolumeFractions(b): """Write out the initial temperatures and component volume fractions.""" headers = ["Component", "Temperature, °C", "Volume Fraction"] data = [(c, c.temperatureInC, volFrac) for c, volFrac in b.getVolumeFractions()] print(tabulate.tabulate(data=data, headers=headers) + "\n") def plotVolFracsWithComponentTemps(b, uniformTemps): """Plot the percent change in vol. fractions as fuel/structure temperatures are uniformly increased.""" # Perform uniform temperature modifications of the fuel and structural components. componentsToModify = b.getComponents([Flags.FUEL, Flags.CLAD, Flags.DUCT]) initialVols = {} relativeVols = collections.defaultdict(list) for tempInC in uniformTemps: print(f"Updating fuel/structure components to {tempInC} °C") # Modify the fuel/structure components to the same uniform temperature for c in componentsToModify: c.setTemperature(tempInC) writeInitialVolumeFractions(b) # Iterate over all components and calculate the mass and volume fractions for c in b: # Set the initial volume fractions at the first uniform temperature if tempInC == uniformTempsInC[0]: initialVols[c] = c.getVolume() relativeVols[c].append((c.getVolume() - initialVols[c]) / initialVols[c] * 100.0) fig, ax = plt.subplots() for c in b.getComponents(): ax.plot(uniformTempsInC, relativeVols[c], label=c.name) ax.set_title("Component Volume Fractions with Automatic Thermal Expansion") ax.set_ylabel(f"% Change in Volume from {uniformTempsInC[0]} °C") ax.set_xlabel("Uniform Fuel/Structure Temperature, °C") ax.legend() ax.grid() plt.show() uniformTempsInC = [300.0, 400.0, 500.0, 600.0, 700.0] b = buildSimpleFuelBlock() writeInitialVolumeFractions(b) plotVolFracsWithComponentTemps(b, uniformTempsInC) ================================================ FILE: doc/gallery-src/framework/run_chartOfNuclides.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Plot a chart of the nuclides ============================ Use the nuclide directory of ARMI to plot a chart of the nuclides coloring the squares with the natural abundance. """ import matplotlib.pyplot as plt from armi import configure from armi.nucDirectory.nuclideBases import NuclideBases configure(permissive=True) xyc = [] for name, base in NuclideBases().byName.items(): if not base.a: continue xyc.append((base.a - base.z, base.z, base.abundance or 0.5)) x, y, c = zip(*xyc) plt.figure(figsize=(12, 8)) plt.scatter(x, y, c=c, marker="s", s=6) plt.title("Chart of the nuclides") plt.xlabel("Number of neutrons (N)") plt.ylabel("Number of protons (Z)") plt.show() ================================================ FILE: doc/gallery-src/framework/run_computeReactionRates.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Computing Reaction Rates on a Block. ==================================== In this example, a set of 1-group reaction rates (in #/s) are evaluated for a dummy fuel block containing UZr fuel, HT9 structure, and sodium coolant. A dummy multigroup flux is applied. This example also demonstrates how to build a reactor model from code alone rather than relying upon input files. """ import matplotlib.pyplot as plt import numpy as np from armi import configure, settings from armi.materials import ht9, sodium, uZr from armi.nuclearDataIO.cccc import isotxs from armi.reactor import assemblies, blocks, geometry, grids, reactors from armi.reactor.components import Circle, DerivedShape, Hexagon from armi.reactor.flags import Flags from armi.tests import ISOAA_PATH configure(permissive=True) def _addFlux(b): """Add dummy 33-group flux to the block.""" # fmt: off b.p.mgFlux = [ 1.6e+11, 2.3e+12, 1.1e+13, 2.6e+13, 4.6e+13, 7.9e+13, 1.4e+14, 2.2e+14, 2.3e+14, 2.7e+14, 2.2e+14, 1.7e+14, 1.3e+14, 1.4e+14, 7.5e+13, 3.2e+13, 2.2e+13, 6.3e+12, 2.2e+13, 1.2e+13, 5.2e+12, 1.5e+12, 1.4e+12, 2.9e+11, 7.4e+10, 5.5e+10, 1.9e+10, 5.0e+09, 3.6e+09, 8.8e+08, 4.3e+09, 1.3e+09, 6.0e+08 ] # fmt: on def createDummyReactor(): """ Create a dummy reactor with a single fuel assembly and a single fuel block. Often, a reactor model like this is built directly from input files rather than from code as done here. """ from armi.reactor.blueprints import Blueprints bp = Blueprints() cs = settings.Settings() r = reactors.Reactor("Reactor", bp) r.add(reactors.Core("Core")) r.core.spatialGrid = grids.HexGrid.fromPitch(1.0) r.core.spatialGrid.symmetry = geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC) r.core.spatialGrid.geomType = geometry.GeomType.HEX r.core.spatialGrid.armiObject = r.core r.core.setOptionsFromCs(cs) # Create a single fuel assembly a = assemblies.HexAssembly("fuel assembly") a.spatialGrid = grids.AxialGrid.fromNCells(1) a.spatialLocator = r.core.spatialGrid[1, 0, 0] # Create a single fuel block b = blocks.HexBlock("fuel block") b.setType("fuel") # Create a single fuel component with UZr fuel. dims = {"Tinput": 20, "Thot": 900, "id": 0.0, "od": 2.9, "mult": 7} c = Circle("fuel", uZr.UZr(), **dims) b.add(c) # Create a single structure component with HT9. dims = {"Tinput": 20, "Thot": 600, "op": 16.0, "ip": 15.0, "mult": 1} c = Hexagon("structure", ht9.HT9(), **dims) b.add(c) # Fill in the rest of the block with sodium coolant. dims = {"Tinput": 600, "Thot": 600} c = DerivedShape("coolant", sodium.Sodium(), **dims) b.add(c) a.add(b) r.core.add(a) _addFlux(b) return r # Create a dummy reactor with the function defined above. r = createDummyReactor() # Add an example cross section library to the reactor core r.core.lib = isotxs.readBinary(ISOAA_PATH) b = r.core.getFirstBlock(Flags.FUEL) b.expandElementalToIsotopics(r.nuclideBases.byName["NA"]) # Iterate over a few nuclides/elements in the XS library # and collect the total reaction rates in #/s. allRates = [] nucNames = ["U235", "U238", "FE", "NA23"] for nucName in nucNames: rateData = b.getReactionRates(nucName) rateLabels = sorted(rateData.keys()) # will be constant allRates.append([rateData[k] for k in rateLabels]) # plot the reaction rates as a bar graph fig, ax = plt.subplots() width = 1.0 / len(rateLabels) offset = 0.0 for nucName, nucRates in zip(nucNames, allRates): ax.bar( np.arange(len(rateLabels)) + width + offset, nucRates, width=width, label=nucName, ) offset += width ax.set_xticks(np.arange(len(rateLabels)) + 0.5) ax.set_xticklabels(rateLabels) # Add little divider lines between reactions for clarity for border in np.arange(len(rateLabels) - 1): ax.axvline(border + 1, ls="--", alpha=0.4, color="k") ax.set_xlim([0, len(rateLabels)]) plt.yscale("log") plt.legend() plt.title("Reaction rates") plt.xlabel("Reaction type") plt.ylabel("Reaction rate (1/s)") plt.show() ================================================ FILE: doc/gallery-src/framework/run_fuelManagement.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fuel management in a LWR. ========================= Demo of locating and swapping assemblies in a core with Cartesian geometry. Given a burnup distribution, this swaps high burnup assemblies with low ones. Assembly selection for moving and swapping is very flexible using the ARMI API and the high-level language features of Python. This allows highly complex fuel management algorithms to be expressed and parameterized. Because the ARMI framework does not come with a LWR global flux/depletion solver, actual flux/depletion results would need to be provided by a physics plugin before actually using ARMI to do fuel management. Thus, this example applies a dummy burnup distribution for demonstration purposes. """ import math from armi import configure from armi.physics.fuelCycle import fuelHandlers from armi.reactor.flags import Flags from armi.reactor.tests import test_reactors from armi.utils import plotting # configure ARMI configure(permissive=True) o, reactor = test_reactors.loadTestReactor(inputFileName="refTestCartesian.yaml") # Apply a dummy burnup distribution roughly in a cosine for b in reactor.core.getBlocks(Flags.FUEL): x, y, z = b.spatialLocator.getGlobalCoordinates() d = math.sqrt(x**2 + y**2) b.p.percentBu = 5 * math.cos(d * math.pi / 2 / 90) # show the initial burnup distribution plotting.plotFaceMap(reactor.core, param="percentBu") fuelHandler = fuelHandlers.FuelHandler(o) candidateAssems = reactor.core.getAssemblies(Flags.FUEL) criterion = lambda a: a.getMaxParam("percentBu") candidateAssems.sort(key=criterion) for num in range(12): # swap the 12 highest burnup assemblies with the 12 lowest burnup ones high = candidateAssems.pop() low = candidateAssems.pop(0) fuelHandler.swapAssemblies(high, low) # re-filter the remaining candidates for more complex selections candidateAssems = [a for a in candidateAssems if a.getMaxParam("percentBu") < 4.0] for num in range(8): high = candidateAssems.pop() low = candidateAssems.pop(0) fuelHandler.swapAssemblies(high, low) # show final burnup distribution plotting.plotFaceMap(reactor.core, param="percentBu") ================================================ FILE: doc/gallery-src/framework/run_grids1_hex.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Make a hex grid. ================ This uses a grid factory method to build an infinite 2-D grid of hexagons with pitch equal to 1.0 cm. Learn more about :py:mod:`grids <armi.reactor.grids>`. """ import math import matplotlib.patches as mpatches import matplotlib.pyplot as plt from matplotlib.collections import PatchCollection from armi import configure from armi.reactor import grids configure(permissive=True) hexes = grids.HexGrid.fromPitch(1.0) polys = [] fig, ax = plt.subplots() ax.set_aspect("equal") ax.set_axis_off() for hex_i in hexes.generateSortedHexLocationList(127): x, y, z = hex_i.getGlobalCoordinates() ax.text(x, y, f"{hex_i.i},{hex_i.j}", ha="center", va="center", fontsize=8) polys.append(mpatches.RegularPolygon((x, y), numVertices=6, radius=1 / math.sqrt(3), orientation=math.pi / 2)) patches = PatchCollection(polys, fc="white", ec="k") ax.add_collection(patches) # create a bounding box around patches with a small margin (2%) bbox = patches.get_datalim(ax.transData) bbox = bbox.expanded(1.02, 1.02) ax.set_xlim(bbox.xmin, bbox.xmax) ax.set_ylim(bbox.ymin, bbox.ymax) ax.set_title("(i, j) indices for a hex grid") plt.show() ================================================ FILE: doc/gallery-src/framework/run_grids2_cartesian.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Make a Cartesian grid. ====================== This builds a Cartesian grid with squares 1 cm square, with the z-coordinates provided explicitly. It is also offset in 3D space to X, Y, Z = 10, 5, 5 cm. Learn more about :py:mod:`grids <armi.reactor.grids>`. """ import itertools import matplotlib.pyplot as plt from armi import configure from armi.reactor import grids configure(permissive=True) fig = plt.figure() zCoords = [1, 4, 8] cartesian_grid = grids.CartesianGrid( unitSteps=((1, 0), (0, 1)), bounds=(None, None, zCoords), offset=(10, 5, 5), ) xyz = [] # the grid is infinite in i and j so we will just plot the first 10 items for i, j, k in itertools.product(range(10), range(10), range(len(zCoords) - 1)): xyz.append(cartesian_grid[i, j, k].getGlobalCoordinates()) ax = fig.add_subplot(1, 1, 1, projection="3d") x, y, z = zip(*xyz) ax.scatter(x, y, z) plt.show() ================================================ FILE: doc/gallery-src/framework/run_grids3_rzt.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Make a Theta-R-Z grid. ====================== This builds a 3-D grid in Theta-R-Z geometry by specifying the theta, r, and z dimension bounds explicitly. Learn more about :py:mod:`grids <armi.reactor.grids>`. """ import itertools import matplotlib.pyplot as plt import numpy as np from armi import configure from armi.reactor import grids configure(permissive=True) fig = plt.figure() theta = np.linspace(0, 2 * np.pi, 10) rad = np.linspace(0, 10, 10) z = np.linspace(5, 25, 6) rz_grid = grids.ThetaRZGrid(bounds=(theta, rad, z)) xyz = [] for i, j, k in itertools.product(range(len(theta) - 1), range(len(rad) - 1), range(len(z) - 1)): xyz.append(rz_grid[i, j, k].getGlobalCoordinates()) ax = fig.add_subplot(1, 1, 1, projection="3d") x, y, z = zip(*xyz) ax.scatter(x, y, z) plt.show() ================================================ FILE: doc/gallery-src/framework/run_isotxs.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Plotting Multi-group XS from ISOTXS. ==================================== In this example, several cross sections are plotted from an existing binary cross section library file in :py:mod:`ISOTXS <armi.nuclearDataIO.isotxs>` format. """ import matplotlib.pyplot as plt from armi import configure from armi.nuclearDataIO.cccc import isotxs from armi.physics.neutronics import energyGroups from armi.tests import ISOAA_PATH configure(permissive=True) gs = energyGroups.getGroupStructure("ANL33") lib = isotxs.readBinary(ISOAA_PATH) fe56 = lib.getNuclide("FE", "AA") u235 = lib.getNuclide("U235", "AA") u238 = lib.getNuclide("U238", "AA") b10 = lib.getNuclide("B10", "AA") plt.step(gs, fe56.micros.nGamma, label=r"Fe (n, $\gamma$)") plt.step(gs, u235.micros.fission, label="U-235 (n, fission)") plt.step(gs, u238.micros.nGamma, label=r"U-238 (n, $\gamma$)") plt.step(gs, b10.micros.nalph, label=r"B-10 (n, $\alpha$)") plt.xscale("log") plt.yscale("log") plt.xlabel("Neutron Energy, eV") plt.ylabel("Cross Section, barns") plt.grid(alpha=0.2) plt.legend() plt.show() ================================================ FILE: doc/gallery-src/framework/run_isotxs2_matrix.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Plotting a multi-group scatter matrix. ====================================== Here we plot scatter matrices from an ISOTXS microscopic cross section library. We plot the inelastic scatter cross section of U235 as well as the (n,2n) source matrix. See Also: :py:mod:`ISOTXS <armi.nuclearDataIO.isotxs>` format. """ import matplotlib.pyplot as plt from armi import configure from armi.nuclearDataIO import xsNuclides from armi.nuclearDataIO.cccc import isotxs from armi.tests import ISOAA_PATH configure(permissive=True) lib = isotxs.readBinary(ISOAA_PATH) u235 = lib.getNuclide("U235", "AA") xsNuclides.plotScatterMatrix(u235.micros.inelasticScatter, "U-235 inelastic") plt.figure() xsNuclides.plotScatterMatrix(u235.micros.n2nScatter, "U-235 n,2n src") ================================================ FILE: doc/gallery-src/framework/run_materials.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Listing of Material Library. ============================ This is a listing of all the elements in all the materials that are included in the ARMI material library. Many of the materials in this library are academic in quality and contents. Some have temperature dependent properties, but some don't. You can provide your own proprietary material properties via a plugin. More info about the materials here: :py:mod:`armi.materials`. """ import matplotlib.pyplot as plt import numpy as np from armi import configure, materials from armi.nucDirectory.nuclideBases import NuclideBases MAX_Z = 98 # stop at Californium configure(permissive=True) materialNames = [] mats = list(materials.iterAllMaterialClassesInNamespace(materials)) numMats = len(mats) zVals = np.zeros((numMats, MAX_Z)) nuclideBases = NuclideBases() for mi, matCls in enumerate(mats): m = matCls() materialNames.append(m.name) for nucName, frac in m.massFrac.items(): nb = nuclideBases.byName[nucName] idx = mi, nb.z - 1 try: zVals[idx] += frac except IndexError: # respect the MAX_Z bounds pass fig, ax = plt.subplots(figsize=(16, 12)) im = ax.imshow(zVals, cmap="YlGn") ax.set_xticks(np.arange(MAX_Z)) ax.set_yticks(np.arange(numMats)) ax.set_xticklabels(np.arange(MAX_Z) + 1, fontsize=6) ax.set_yticklabels(materialNames) ax.set_xlabel("Proton number (Z)") ax.grid(alpha=0.2, ls="--") ax.set_title("Mass fractions in the ARMI material library") plt.show() ================================================ FILE: doc/gallery-src/framework/run_programmaticReactorDefinition.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Build Reactor Inputs Programmatically. ====================================== Sometimes it's desirable to build input definitions for ARMI using code rather than by writing the textual input files directly. In ARMI you can either make the ARMI reactor objects directly, or you can define Blueprints objects. The benefit of making Blueprints objects is that they can in turn be used to create both ARMI reactor objects as well as textual input itself. This is nice when you want to have traceable input files associated with a run that was developed programmatically (e.g. for parameter sweeps). This example shows how to make Blueprints objects programmatically completely from scratch. """ # ruff: noqa: E402 import matplotlib.pyplot as plt from armi import configure # configure ARMI configure(permissive=True) from armi import cases from armi.reactor import blueprints from armi.reactor.blueprints import ( assemblyBlueprint, blockBlueprint, componentBlueprint, gridBlueprint, isotopicOptions, reactorBlueprint, ) from armi.settings import caseSettings from armi.utils import plotting def buildCase(): """Build input components and a case.""" bp = blueprints.Blueprints() bp.customIsotopics = isotopicOptions.CustomIsotopics() bp.nuclideFlags = isotopicOptions.genDefaultNucFlags() components = buildComponents() bp.blockDesigns = buildBlocks(components) bp.assemDesigns = buildAssemblies(bp.blockDesigns) bp.gridDesigns = buildGrids() bp.systemDesigns = buildSystems() cs = caseSettings.Settings() cs.path = None cs.caseTitle = "scripted-case" case = cases.Case(cs=cs, bp=bp) return case def buildComponents(): ISOTHERMAL_TEMPERATURE_IN_C = 450.0 fuel = componentBlueprint.ComponentBlueprint() fuel.name = "fuel" fuel.shape = "Circle" fuel.mult = 217 fuel.material = "UZr" fuel.Tinput = ISOTHERMAL_TEMPERATURE_IN_C fuel.Thot = ISOTHERMAL_TEMPERATURE_IN_C fuel.id = 0.0 fuel.od = 0.4 clad = componentBlueprint.ComponentBlueprint() clad.name = "clad" clad.mult = "fuel.mult" clad.shape = "Circle" clad.material = "HT9" clad.Tinput = ISOTHERMAL_TEMPERATURE_IN_C clad.Thot = ISOTHERMAL_TEMPERATURE_IN_C clad.id = 0.508 clad.od = 0.5842 gap = componentBlueprint.ComponentBlueprint() gap.name = "gap" gap.shape = "Circle" gap.mult = "fuel.mult" gap.material = "Void" gap.Tinput = ISOTHERMAL_TEMPERATURE_IN_C gap.Thot = ISOTHERMAL_TEMPERATURE_IN_C gap.id = "fuel.od" gap.od = "clad.id" wire = componentBlueprint.ComponentBlueprint() wire.name = "wire" wire.mult = "fuel.mult" wire.shape = "Helix" wire.material = "HT9" wire.Tinput = ISOTHERMAL_TEMPERATURE_IN_C wire.Thot = ISOTHERMAL_TEMPERATURE_IN_C wire.id = 0.0 wire.od = 0.14224 wire.axialPitch = 30.48 wire.helixDiameter = 0.72644 duct = componentBlueprint.ComponentBlueprint() duct.name = "duct" duct.mult = 1 duct.shape = "Hexagon" duct.material = "HT9" duct.Tinput = ISOTHERMAL_TEMPERATURE_IN_C duct.Thot = ISOTHERMAL_TEMPERATURE_IN_C duct.ip = 11.0109 duct.op = 11.6205 intercoolant = componentBlueprint.ComponentBlueprint() intercoolant.name = "intercoolant" intercoolant.mult = 1 intercoolant.shape = "Hexagon" intercoolant.material = "Sodium" intercoolant.Tinput = ISOTHERMAL_TEMPERATURE_IN_C intercoolant.Thot = ISOTHERMAL_TEMPERATURE_IN_C intercoolant.ip = "duct.op" intercoolant.op = 12.01420 coolant = componentBlueprint.ComponentBlueprint() coolant.name = "coolant" coolant.shape = "DerivedShape" coolant.material = "Sodium" coolant.Tinput = ISOTHERMAL_TEMPERATURE_IN_C coolant.Thot = ISOTHERMAL_TEMPERATURE_IN_C componentBlueprints = {c.name: c for c in [fuel, gap, clad, wire, duct, intercoolant, coolant]} return componentBlueprints def buildBlocks(components): """Build block blueprints.""" blocks = blockBlueprint.BlockKeyedList() fuel = blockBlueprint.BlockBlueprint() fuel.name = "fuel" for cname, c in components.items(): fuel[cname] = c blocks[fuel.name] = fuel reflector = blockBlueprint.BlockBlueprint() reflector.name = "reflector" reflector["coolant"] = components["coolant"] reflector["duct"] = components["duct"] blocks[reflector.name] = reflector return blocks def buildAssemblies(blockDesigns): """Build assembly blueprints.""" fuelBock, reflectorBlock = blockDesigns["fuel"], blockDesigns["reflector"] assemblies = assemblyBlueprint.AssemblyKeyedList() fuelAssem = assemblyBlueprint.AssemblyBlueprint() fuelAssem.name = "Fuel" fuelAssem.specifier = "IC" fuelAssem.blocks = blockBlueprint.BlockList() fuelAssem.blocks.extend([reflectorBlock, fuelBock, fuelBock, fuelBock, reflectorBlock]) fuelAssem.height = [10, 20, 20, 20, 10] fuelAssem.xsTypes = ["A"] * 5 fuelAssem.axialMeshPoints = [1] * 5 assemblies[fuelAssem.name] = fuelAssem reflectorAssem = assemblyBlueprint.AssemblyBlueprint() reflectorAssem.name = "Reflector" reflectorAssem.specifier = "RR" reflectorAssem.blocks = blockBlueprint.BlockList() reflectorAssem.blocks.extend([reflectorBlock] * 5) reflectorAssem.height = [10, 20, 20, 20, 10] reflectorAssem.xsTypes = ["A"] * 5 reflectorAssem.axialMeshPoints = [1] * 5 assemblies[reflectorAssem.name] = reflectorAssem return assemblies def buildGrids(): """Build the core map grid.""" coreGrid = gridBlueprint.GridBlueprint("core") coreGrid.geom = "hex" coreGrid.symmetry = "third periodic" coreGrid.origin = gridBlueprint.Triplet() coreGrid.latticeMap = """ RR RR IC RR IC IC RR""" grids = gridBlueprint.Grids() grids["core"] = coreGrid return grids def buildSystems(): """Build the core system.""" systems = reactorBlueprint.Systems() core = reactorBlueprint.SystemBlueprint("core", "core", gridBlueprint.Triplet()) systems["core"] = core return systems if __name__ == "__main__": case = buildCase() # build ARMI objects o = case.initializeOperator() fig = plotting.plotAssemblyTypes( list(case.bp.assemblies.values()), None, showBlockAxMesh=True, ) plt.show() # also write input files case.writeInputs() ================================================ FILE: doc/gallery-src/framework/run_reactorFacemap.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Plot a reactor facemap. ======================= Load a test reactor from the test suite and plot a dummy power distribution from it. You can plot any block parameter. """ from armi import configure from armi.reactor.tests import test_reactors from armi.utils import plotting # configure ARMI configure(permissive=True) operator, reactor = test_reactors.loadTestReactor() reactor.core.growToFullCore(None) # set dummy power for b in reactor.core.getBlocks(): x, y, z = b.spatialLocator.getGlobalCoordinates() b.p.pdens = x**2 + y**2 + z**2 plotting.plotFaceMap(reactor.core, param="pdens", labelFmt="{0:.1e}") ================================================ FILE: doc/gallery-src/framework/run_transmutationMatrix.py ================================================ # Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Transmutation and decay reactions. ================================== This plots some of the transmutation and decay pathways for the actinides and some light nuclides using the burn chain definition that is included with ARMI. Note that many of these reactions are shortcut for reactor analysis. For example, a U-238 capture goes directly to NP-239 rather than first going to U-239. Some (n,2n) reactions quickly beta decay, so the transmutation goes right to the product. For the decays, the arrow has been adjusted in width based on the branching ratio. The transmutations are all constant since their rates would depend on the neutron spectrum being modeled. This is mostly a demo of more features of the :py:mod:`armi.nucDirectory` subpackage. Users can input their own transmutation matrix or use this one. A Bateman equation/matrix exponential solver is required to actually *solve* transmutation and decay problems, which can be provided via a plugin. """ import math import os import matplotlib.patches as mpatch import matplotlib.pyplot as plt from matplotlib.collections import PatchCollection from armi.context import RES from armi.nucDirectory.nuclideBases import NuclideBases def plotNuc(nb, ax): """Make a square patch for a single nuclide base.""" patch = mpatch.Rectangle((nb.a - nb.z - 0.5, nb.z - 0.5), 1.0, 1.0) rx, ry = patch.get_xy() cx = rx + patch.get_width() / 2.0 # bump label down for metastable nuclides cy = ry + (3 - 2 * nb.state) * patch.get_height() / 4.0 ax.annotate( nb.name, (cx, cy), color="k", weight="normal", fontsize=10, ha="center", va="center", ) return patch def plotAll(xlim, ylim): """Plot all nuclides and transformations.""" # load the burn chain input that comes with ARMI nuclideBases = NuclideBases() with open(os.path.join(RES, "burn-chain.yaml")) as burnChainStream: nuclideBases.imposeBurnChain(burnChainStream) nbs = nuclideBases.instances fig, ax = plt.subplots(figsize=(15, 10)) patches = [] for nb in nbs: if not nb.trans and not nb.decays: # skip nuclides without any transmutations defined pass patch = plotNuc(nb, ax) patches.append(patch) # loop over all possible transmutations and decays and draw arrows for ti, trans in enumerate(nb.trans + nb.decays): product = nuclideBases.fromName(trans.productNuclides[0]) if product.z == 0: # skip lumped fission products and DUMP nuclides continue # add index-based y-offset to minimize overlaps x, y, xp, yp = ( nb.a - nb.z, nb.z + ti * 0.05, product.a - product.z, product.z + ti * 0.05, ) if trans in nb.trans: color = "deeppink" else: color = "orangered" ax.annotate( "", (xp, yp), (x, y), arrowprops=dict(width=2 * trans.branch, shrink=0.1, alpha=0.4, color=color), ) # add reaction label towards the middle of the arrow xlabel = xp - (xp - x) * 0.5 ylabel = yp - (yp - y) * 0.5 # pretty up the labels a bit with some LaTeX and rotations rxnType = ( trans.type.replace("nGamma", r"n,$\gamma$") .replace("nalph", r"n,$\alpha$") .replace("ad", r"$\alpha$") .replace("bmd", r"$\beta^-$") .replace("bpd", r"$\beta^+$") ) if xp != x: # rotate the nuclide type label to sit right on the arrow rotation = math.atan((yp - y) / (xp - x)) * 180 / math.pi else: rotation = 0 ax.text(xlabel, ylabel, rxnType, color="grey", ha="center", rotation=rotation) pc = PatchCollection(patches, facecolor="mistyrose", alpha=0.2, edgecolor="black") ax.add_collection(pc) ax.set_xlim(xlim) ax.set_ylim(ylim) ax.set_aspect("equal") ax.set_xlabel("Neutrons (N)") ax.set_ylabel("Protons (Z)") ax.set_title("Transmutations and Decays (with branching)") plt.show() ================================================ FILE: doc/getTestResults.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os import xml.etree.ElementTree as ET THIS_DIR = os.path.dirname(os.path.abspath(__file__)) RESULTS_DIR = os.path.join(THIS_DIR, "..") TEST_RESULTS = [] def parseTestXML(file): """Parse the test result XML file to gather results in a list of dictionaries. Parameters ---------- file : path Path of XML file to be parsed Returns ------- list Dictionaries containing: - File location of the test: 'file' - Class signature of test: 'class' - Method signature of test: 'method' - Runtime of test: 'time' - The result of the test: 'result' (passed, skipped, failure) - Console message when skipped or failed: 'info' """ tree = ET.parse(file) results = [] for testcase in tree.getroot().iter("testcase"): cn = testcase.attrib.get("classname", "unknown") tc_dict = { "file": "/".join(cn.split(".")[:-1]) + ".py", "class": cn.split(".")[-1], "method": testcase.attrib.get("name", "unknown"), "time": float(testcase.attrib.get("time", -1)), "result": "passed", "info": None, } if testcase.find("skipped") is not None: tc_dict["result"] = "skipped" tc_dict["info"] = testcase.find("skipped").attrib["message"] elif testcase.find("failure") is not None: tc_dict["result"] = "failure" tc_dict["info"] = testcase.find("failure").text results.append(tc_dict) return results def getTestResult(app, need, needs): """Dynamic function used by sphinx-needs to gather the result of a test tag.""" if not need["signature"]: return "none" # Get all the tests that match the method signature results = [test_case["result"] for test_case in TEST_RESULTS if need["signature"] == test_case["method"]] # Logic is as follows if there are multiple matches: # - If one is a "failure", then return "failure" # - If all are "skipped", then return "skipped" # - Otherwise, return "passed" if results: if "failure" in results: return "failure" elif "passed" in results: return "passed" else: return "skipped" # Things get a little more complicated when the test tag has a class-level signature. # Basically we have to determine if all the methods in the class passed or if any of skipped/failed. # First, gather all the results related to the class signature from the tag and categorize by method results = {} for test_case in TEST_RESULTS: if need["signature"] == test_case["class"]: if test_case["method"] in results: results[test_case["method"]].append(test_case["result"]) else: results[test_case["method"]] = [test_case["result"]] # If we haven't found the test by now, we never will if not results: return "none" # Apply logic from before for each method in the class for m, r in results.items(): if "failure" in r: results[m] = "failure" elif "passed" in r: results[m] = "passed" else: results[m] = "skipped" # Now for the class logic # - If any of the methods failed, return "failure" # - If any of the methods skipped, return "skipped" # - If all of the methods passed, return "passed" if "failure" in results.values(): return "failure" elif "skipped" in results.values(): return "skipped" else: return "passed" # Here is where we fill out all the test results, so it is only done once for file in glob.glob(os.path.join(RESULTS_DIR, "*.xml")): TEST_RESULTS.extend(parseTestXML(file)) if __name__ == "__main__": # Prints results of all the tests found in the repo in a pytest-like way colors = { "passed": "\033[92m", "skipped": "\033[93m", "failure": "\033[91m", "end": "\033[0m", } for testcase in TEST_RESULTS: print( "{} {} {} {}::{}::{}".format( colors[testcase["result"]], testcase["result"].upper(), colors["end"], testcase["file"], testcase["class"], testcase["method"], ) ) ================================================ FILE: doc/glossary.rst ================================================ Glossary ======== Here we define a few specialized terms used in this documentation. .. glossary:: ANL Argonne National Laboratory ARMI The Advanced Reactor Modeling Interface is a software system for nuclear reactor design and analysis. assembly A basic structural unit in the reactor core that is stacked together by a list of blocks. Assemblies typically move together in fuel management. block a vertical segment of the assembly consisting of components. burnup Amount of energy that has been extracted from fuel. Can be measured in megawatt-days per kilogram of heavy metal (MWd/kgHM) or in percent of fissionable atoms that have fissioned. BOC Beginning-of-cycle; the state of the core after an outage BOL Beginning-of-life; the fresh-core state of the reactor cladding Material that surrounds nuclear fuel in pins, keeping radionuclides contained. CLI Command Line Interface. The method of interacting with software from a command line. component the basic primitive geometrical body, such as a circle, hex, helix, etc. These have dimensions, temperatures, material properties, and isotopic composition. FIMA Fissions per initial metal atom. This is a unit of measuring burnup as a fraction of the fissionable nuclides that have fissioned. grid plate A reactor structure in a sodium-cooled fast reactor that all the fuel assemblies sit on. GUI Graphical User Interface. The method of interacting with software through a visual display. interface Also named *code interface*; linked to an external program or an internal ARMI module to perform a specific calculation function. An example is the DIF3D interface that makes use of DIF3D diffusion code for core physics calculation. Interfaces are building blocks of ARMI calculations In-Use Tests Automated software test that shows many modules working together in a way that a user would typically use them. Liner A thin layer of material between fuel and cladding intended to impede chemical corrosion and wastage. LWR Light Water Reactor. The predominant kind of commercial nuclear plant in operation today. material an object that contains isotopic mass fractions and intrinsic material properties MPI Message passing interface. This is a protocol for exchanging data around a network to run a code in parallel. node A specific point in time in a ARMI case. operator An object that controls the calculation sequence for a specific purpose e.g. a multi-cycle quasi-static depletion calculation. Operators trigger interfaces. parameter A state variable on a reactor, assembly, block, or component object. plenum An empty space inside the cladding tube above the fuel that holds fission gasses and other things that are produced during irradiation. reactor an object consisting of a core full of assemblies and possibly other structures reactor state An instantaneous representation of the physical condition of all components of a reactor, including dimensions, temperatures, composition, material, shape, flux, dose, stress, strain, arrangement, orientation, and so on. smear density A term used to characterize how much room exists inside the cladding for the fuel to expand into. It is defined as the fraction of fuel area divided by total space inside the cladding. TWR Traveling wave reactor: a reactor that uses a breed-and-burn process to achieve most fast reactor advantages without requiring a reprocessing plant. Unit Tests Software tests that check small units of software. V&V Validation and Verification. Validation is showing that code results match physical reality (comparisons with known answers or experiments), and verification is demonstrating that software is built in a way that satisfies its requirements. XTVIEW A TerraPower-developed visualization tool that graphically shows ARMI results that have been added to a database. ================================================ FILE: doc/index.rst ================================================ ==== ARMI ==== .. image:: .static/armi-logo.png .. toctree:: :hidden: :maxdepth: 2 readme installation user/index developer/index gallery/index tutorials/index release/index qa_docs/index glossary API Docs <.apidocs/modules> * :doc:`glossary` * :ref:`genindex` * :ref:`modindex` * :ref:`search` ================================================ FILE: doc/installation.rst ================================================ ############ Installation ############ .. include:: user/user_install.rst :start-line: 4 ================================================ FILE: doc/make.bat ================================================ @ECHO OFF pushd %~dp0 REM Windows command file for Sphinx documentation for ARMI REM This can be run locally with make html. if "%PYTHON%" == "" ( set PYTHON=python ) set SOURCEDIR=. if "%BUILDDIR%" == "" ( set BUILDDIR=_build ) if "%PYTHONPATH%" == "" ( set PYTHONPATH=.. ) REM Graphviz and Pandoc binaries are required for auto-generating figures and running notebooks REM during doc building if NOT "%GRAPHVIZ%" == "" ( set PATH="%PATH%";%GRAPHVIZ% ) if NOT "%PANDOC%" == "" ( set PATH="%PATH%";%PANDOC% ) if "%1" == "" goto help %PYTHON% -m sphinx >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx' package was not found. Make sure you have Sphinx installed, then set the echo.SPHINXBUILD environment variable to point to the full path of the 'sphinx-build' echo.executable. Alternatively you may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from: http://sphinx-doc.org/ exit /b 1 ) @ECHO ON %PYTHON% -m sphinx -b %1 %SOURCEDIR% %BUILDDIR%\%1 %SPHINXOPTS% @ECHO OFF goto end :help %PYTHON% -m sphinx -h :end popd ================================================ FILE: doc/qa_docs/index.rst ================================================ ################ QA Documentation ################ This is the Quality Assurance (QA) documentation for the Advanced Reactor Modeling Interface (ARMI) framework. This document includes the Software Requirements Specification Document (SRSD), the Software Design and Implementation Document (SDID), and the Software Test Report (STR). ------------- .. toctree:: :maxdepth: 3 :numbered: srsd sdid str scr/index ================================================ FILE: doc/qa_docs/scr/0.1.rst ================================================ Release Notes for ARMI 0.1 ========================== These are the release notes for past versions of ARMI, created before our SCR process. They are preserved here for historical record. ARMI v0.1.7 ----------- Release Date: 2021-08-09 API changes ^^^^^^^^^^^ #. ``geomType`` arguments in most places has been changed to accept a ``GeomType`` enumeration, instead of a string. Some places will still attempt to implicitly convert strings into enum values, but this will eventually be deprecated. #. The ``SystemLayoutInput`` class has been moved into its own module. Instances of the ``SystemLayoutInput`` (usually named ``geom``) often participate in various function signatures. These will be removed soon, as grids now serve this purpose, and ``geom`` objects are largely vestigial. ``SystemLayoutInput`` will be retained to facilitate input migrations. #. Changed block default names so that they are no longer constrained by axial characters. They now are named ``B{assemNum}-{axialIndex}`` to allow arbitrary numbers of blocks. This will invalidate any user setting that includes a block name (e.g. detail assemblies) #. Changed location string labels to be numerical (``001-002-005``) rather than alphanumeric to eliminate a limitation on how many i-indices and k-indices were allowed. This will invalidate any user setting value that includes a location label (e.g. in ``detailAssemsByBOLLocation``). A migration script may be used to assist in migration. #. Removed the ``localization`` module, and shifted most of that exception handling to less custom exception types. Though, there were also some functions moved to: ``armi.utils.customExceptions.py``. #. ``Settings`` are now immutable (or nearly so). Bug fixes ^^^^^^^^^ #. Fix bug in loading from databases when multi-index locations are used. ARMI v0.1.6 ----------- Release Date: 2020-12-17 #. Add capability to map flags to current meaning when loading from database. Previously, loading would fail if the meanings of written and current flags did not match exactly. #. Numerous documentation improvements. #. Add support for XDMF visualization file output. #. Add optional flag to ``armi.configure()`` to permit repeated configuration. This aids in certain testing and demonstration contexts. #. Allow for fully-qualified material names in blueprints. Materials take the form of ``module.import.path:MaterialClassName``. #. Disable the use of the fast path in interactive sessions. #. Define ``ARMITESTBASE`` environment variable when configuring ``pytest``. This allows tests to spawn new processes and still find the ARMI test suite. #. Enable full-core expansion of core grid blueprints. Deprecations ^^^^^^^^^^^^ #. Removed ``dumpLocationSnapshot`` setting and related functionality. This is replaced by features of Database, version 3. Database 3 supports history tracking from the database file, and whole reactor models can be loaded for any stored time step, obviating the need for special logic in snapshots. #. Removed ``None`` option to XS ``"geometry"`` setting. #. Removed ``Location`` classes. These were made redundant with grids/spatial locators. #. Removed ``Block.isAnnular()``. #. Remove old "XTView" database format support. Migrating older databases will require checking out an older version of the code. Bugfixes ^^^^^^^^ #. Apply YAML ``!input`` resolution before writing blueprints to database. #. Change default App ``name`` to "armi" instead of "ARMI". This allows ARMI to re-invoke itself, and produce accurate help messages. #. Conform R-Z-Theta grid ring/position indices to be 1-based like other grid types. #. Add a check that an ISOTXS library exists before attempting to calculate flux-based reaction rates on mesh conversions. Prior to this, performing mesh conversions without an ISOTXS would lead to a crash. #. Hide ``FAST_PATH`` behind ``context.getFastPath()`` function, allowing it to change. The avoids bugs where code is sensitive to changes to the fast path at runtime. ARMI v0.1.5 ----------- Release Date: 2020-10-15 User-facing enhancements ^^^^^^^^^^^^^^^^^^^^^^^^ #. Add location-based history tracking to Database3. #. Add grid-editor GUI (``grids`` entry point). #. Add support for converting Database files to general-purpose visualization formats (currently supported are VTK and XDMF). #. Add generic fuel-performance plugin. #. Update Hastelloy N and Incoloy 800 materials. #. Add holed rectangle, square component types. #. Add ``syncDbAfterWrite`` setting. #. Add support for explicit Flags input in Blueprints. #. Add glob option to directory changer file retrieval. #. Add Cartesian plotting capabilities. #. Add support for importing unstable nuclides from the RIPL-3 database. #. Numerous documentation and tutorial enhancements. #. Add ``run-suite`` entry point. #. Improve/generalize and make extensible from Plugins the setting rename capability. #. Improve merging behavior of GAMISO and PMATRX files. #. Add ``doTH`` setting. #. Add ``mpiActionRequiresReset`` plugin hook. #. Remove unused entry points (``back-up-db``, ``copy-db``). #. Add thermal-scattering metadata to materials. #. Improve ASCII map capabilities. #. Add the ability to define ex-core Core-like structures in blueprints. This is good for things like spent-fuel pools. #. Minor improvements to ``SuiteBuilder``. Bugfixes ^^^^^^^^ #. Fix issues in uniform mesh conversion. #. Fix order-of-operations issues with string-to-Flags conversions. #. Fix issues with circular ring hex-to-RZ mesh conversion. #. Fix bug in HT9 material, which was not properly converting between C and K. Backend changes ^^^^^^^^^^^^^^^ #. Better-formalize Executer classes. #. Improve consistency global flux code. #. Various performance enhancements. #. Add packing/unpacking of Multi-Index Locations in the database. #. Remove deprecated old Settings. #. Remove armiAbsDirFromName. #. Reduce numerical diffusion in mesh mapping operations. #. No longer auto-apply DEPLETABLE Flag when flags explicitly specified. #. Improve behavior of delated neutron fraction settings and parameter values. #. Change assembly ordering to be based on (i, j) indices rather than (ring, pos). #. Remove ``Block.getEnrichment()``, since it is redundant with the ``Composite`` version. #. Remove old block-homogenized number density params. These are still whipped up on the fly when writing to DB. #. Add explicit ``CartesianGrid`` class. #. Remove some unused or design- and physics-related functions from ``Block``. #. Merge ``addComponent()`` with base ``add()``. #. Fix issues with Be material properties. #. Allow setting ``Block`` heights to zero. #. Add a Setting class for handling lists of Flags. #. Greatly improve support for CCCC file reading/writing. ARMI v0.1.4 ----------- Release Date: 2020-02-27 Bugfixes ^^^^^^^^ #. Fix minor output date/time bug. #. Copy Interface inputs in a manner consistent with standard inputs when cloning Cases. ARMI v0.1.3 ----------- Release Date: 2020-02-25 User-facing enhancements ^^^^^^^^^^^^^^^^^^^^^^^^ #. Improved flexibility of nuclide flags input by adding an ``expandTo`` section so users can control precisely which isotopes the elements get expanded into. #. Improved migration system, accessible with ``python -m armi migrate-inputs`` #. Added new material modifications for inputting fuels made of mixtures of two custom isotopic vectors. #. Add YAML ``!include`` support to blueprints files. #. Remove ``latticeFile`` section to grid blueprints. #. Allow modification of linked dimensions in ``SuiteBuilder``. Bugfixes ^^^^^^^^ #. SuiteBuilder handles smear density dimension changes for parameter sweeps again #. Fixed broken documentation printout of Flags. #. Ensure that Cases do not think of themselves as their own dependencies. Backend changes ^^^^^^^^^^^^^^^ #. Number fractions are now maintained across elemental expansion subsets. This slightly changes the isotopic composition when, for example, Tungsten is expanded to 4 out of the 5 natural isotopes. #. Add BOL HM mass block parameter. #. Add support for custom parameter serializers for database interaction. #. Formalize Flag reading and writing from/to the database. #. Improve handling of large HDF5 attributes in Database3. ARMI v0.1.2 ----------- Release Date: 2019-11-16 Hotfixes ^^^^^^^^ #. Fixed dependency issue with pympler ARMI v0.1.1 ----------- Release Date: 2019-11-15 User-facing enhancements ^^^^^^^^^^^^^^^^^^^^^^^^ #. Added C5G7 sample LWR inputs #. Slightly improved installation documentation #. Improved ability to input subassembly geometric details by adding grid definitions to blueprints files #. Demoted ``mpi4py`` to an optional requirement to ease installation process. Backend changes ^^^^^^^^^^^^^^^ #. Removed need for geometry object for Reactor construction #. Pushed symmetry and geomType metadata onto spatialGrids #. Turned off auto-conversion of HDF5 DBs to a previous format ARMI v0.1.0 ----------- Release Date: 2019-10-31 Initial public release. ================================================ FILE: doc/qa_docs/scr/0.2.rst ================================================ Release Notes for ARMI 0.2 ========================== These are the release notes for past versions of ARMI, created before our SCR process. They are preserved here for historical record. ARMI v0.2.9 ----------- Release Date: 2023-09-27 What's new in ARMI ^^^^^^^^^^^^^^^^^^ #. Moved the ``Reactor`` assembly number from the global scope to a ``Parameter``. (`PR#1383 <https://github.com/terrapower/armi/pull/1383>`_) #. Removed the global ``Settings`` object, ``getMasterCs()``, and ``setMasterCs()``. (`PR#1399 <https://github.com/terrapower/armi/pull/1399>`_) #. Moved the Spent Fuel Pool (``sfp``) from the ``Core`` to the ``Reactor``. (`PR#1336 <https://github.com/terrapower/armi/pull/1336>`_) #. Made the ``sfp`` a child of the ``Reactor`` so it is stored in the database. (`PR#1349 <https://github.com/terrapower/armi/pull/1349>`_) #. Broad cleanup of ``Parameters``: filled in all empty units and descriptions, removed unused params. (`PR#1345 <https://github.com/terrapower/armi/pull/1345>`_) #. Updated some parameter definitions and defaults. (`PR#1355 <https://github.com/terrapower/armi/pull/1355>`_) #. Removed redundant ``Material.name`` variable. (`PR#1335 <https://github.com/terrapower/armi/pull/1335>`_) #. Added ``powerDensity`` as a high-level alternative to ``power`` to configure a ``Reactor``. (`PR#1395 <https://github.com/terrapower/armi/pull/1395>`_) #. Added SHA1 hashes of XS control files to the welcome text. (`PR#1334 <https://github.com/terrapower/armi/pull/1334>`_) Build changes ^^^^^^^^^^^^^ #. Moved from ``setup.py`` to ``pyproject.toml``. (`PR#1409 <https://github.com/terrapower/armi/pull/1409>`_) #. Add python 3.11 to ARMI's CI testing GH actions. (`PR#1341 <https://github.com/terrapower/armi/pull/1341>`_) #. Put back ``avgFuelTemp`` block parameter. (`PR#1362 <https://github.com/terrapower/armi/pull/1362>`_) #. Make cylindrical component block collection less strict about pre-homogenization checks. (`PR#1347 <https://github.com/terrapower/armi/pull/1347>`_) #. Updated some parameter definitions and defaults. (`PR#1355 <https://github.com/terrapower/armi/pull/1355>`_) #. Make the SFP a child of the reactor so it is stored in database. (`PR#1349 <https://github.com/terrapower/armi/pull/1349>`_) #. Update black to version 22.6. (`PR#1396 <https://github.com/terrapower/armi/pull/1396>`_) #. Added Python 3.11 to ARMI's CI on GH actions. (`PR#1341 <https://github.com/terrapower/armi/pull/1341>`_) #. Updated ``black`` to version 22.6. (`PR#1396 <https://github.com/terrapower/armi/pull/1396>`_) #. Add a _getNucTempHelper method for CylindricalComponentsAverageBlockCollection. (`PR#1363 <https://github.com/terrapower/armi/pull/1363>`_) Bug fixes ^^^^^^^^^ #. Fixed ``_processIncludes()`` to handle ``StringIO`` input. (`PR#1333 <https://github.com/terrapower/armi/pull/1333>`_) #. Fixed logic for computing thermal expansion factors for axial expansion. (`PR#1342 <https://github.com/terrapower/armi/pull/1342>`_) ARMI v0.2.8 ----------- Release Date: 2023-06-21 What's new in ARMI ^^^^^^^^^^^^^^^^^^ #. Added ``Composite.sort()`` to allow the user to recursively sort any part of the ``Reactor``. (`PR#1280 <https://github.com/terrapower/armi/pull/1280>`_) #. Switching from ``pylint`` to the ``ruff`` linter. (`PR#1296 <https://github.com/terrapower/armi/pull/1296>`_) #. Move cross section group manager Interface stack position to be just before lattice physics. (`PR#1288 <https://github.com/terrapower/armi/pull/1288>`_) #. Add ``interactCoupled`` method for ``SnapshotInterface``. (`PR#1294 <https://github.com/terrapower/armi/pull/1294>`_) #. Calculate weighted-average percent burnup of ``BlockCollections``. (`PR#1265 <https://github.com/terrapower/armi/pull/1265>`_) #. Add method ``sortAssemsByRing`` to sort ``Reactor`` assemblies by spatial location (interior first). (`PR#1320 <https://github.com/terrapower/armi/pull/1320>`_) Bug fixes ^^^^^^^^^ #. Changed ``units.FLOAT_DIMENSION_DECIMALS`` from 10 to 8. (`PR#1183 <https://github.com/terrapower/armi/pull/1183>`_) #. Improved ``HexBlock.getWettedPerimeter()`` to include wire. (`PR#1299 <https://github.com/terrapower/armi/pull/1299>`_) #. Fixed a bug in the ISOTXS file name used for snapshots. (`PR#1277 <https://github.com/terrapower/armi/pull/1277>`_) #. Fix a bug in uniform mesh decusping when assemblies of same type have drastically different height. (`PR#1282 <https://github.com/terrapower/armi/pull/1282>`_) #. Sort ``Components`` on ``representativeBlock`` for consistency check. (`PR#1275 <https://github.com/terrapower/armi/pull/1275>`_) ARMI v0.2.7 ----------- Release Date: 2023-05-24 What's new in ARMI ^^^^^^^^^^^^^^^^^^ #. The method ``Material.density3`` is now called ``density``, and the old ``density`` is now called ``pseudoDensity``. (`PR#1163 <https://github.com/terrapower/armi/pull/1163>`_) #. Removed ``metadata`` setting section, and created ``versions``. (`PR#1274 <https://github.com/terrapower/armi/pull/1274>`_) #. Use ``minimumNuclideDensity`` setting when generating macroscopic XS. (`PR#1248 <https://github.com/terrapower/armi/pull/1248>`_) #. Introduce new ``LatticePhysicsFrequency`` setting to control lattice physics calculation. (`PR#1239 <https://github.com/terrapower/armi/pull/1239>`_) #. Added new setting ``assemFlagsToSkipAxialExpansion`` to enable users to list flags of assemblies to skip axial expansion. (`PR#1235 <https://github.com/terrapower/armi/pull/1235>`_) #. Added documentation for the thermal expansion approach used in ARMI. (`PR#1204 <https://github.com/terrapower/armi/pull/1204>`_) #. Use ``TemporaryDirectoryChanger`` for ``executer.run()`` so dirs are cleaned up during run. (`PR#1219 <https://github.com/terrapower/armi/pull/1219>`_) #. New option ``copyOutput`` for globalFluxInterface to not copy output back to working directory. (`PR#1218 <https://github.com/terrapower/armi/pull/1218>`_, `PR#1227 <https://github.com/terrapower/armi/pull/1227>`_) #. `Executer` class has a ``dcType`` attribute to define the type of ``DirectoryChanger`` it will use. (`PR#1228 <https://github.com/terrapower/armi/pull/1228>`_) #. Enabling one-way (upwards) axial expansion of control assemblies. (`PR#1226 <https://github.com/terrapower/armi/pull/1226>`_) #. Implement control rod decusping option for uniform mesh converter. (`PR#1229 <https://github.com/terrapower/armi/pull/1229>`_) #. ``createRepresentativeBlocksFromExistingBlocks`` now returns the mapping of original to new XS IDs. (`PR#1217 <https://github.com/terrapower/armi/pull/1217>`_) #. Added a capability to prioritize ``MpiAction`` execution and exclusivity. (`PR#1237 <https://github.com/terrapower/armi/pull/1237>`_) #. Improve support for single component axial expansion and general cleanup of axial expansion unit tests. (`PR#1230 <https://github.com/terrapower/armi/pull/1230>`_) #. New cross section group representative block type for 1D cylindrical models. (`PR#1238 <https://github.com/terrapower/armi/pull/1238>`_) #. Store the axial expansion target component name as a block parameter. (`PR#1256 <https://github.com/terrapower/armi/pull/1256>`_) #. When using non-uniform mesh, detailed fission/activation products have cross sections generated to avoid blocks without xs data. (`PR#1257 <https://github.com/terrapower/armi/pull/1257>`_) #. Fix a bug in database comparison. (`PR#1258 <https://github.com/terrapower/armi/pull/1258>`_) #. Introduce new LatticePhysicsFrequency setting to control lattice physics calculation. (`PR#1239 <https://github.com/terrapower/armi/pull/1239>`_) #. Made sure all material classes could be resolved via name. (`PR#1270 <https://github.com/terrapower/armi/pull/1270>`_) #. Read flux directly from output into Gamma uniform mesh instead of mapping it in from block params. (`PR#1213 <https://github.com/terrapower/armi/pull/1213>`_) #. Forced GAMISO/PMATRX file path extensions to be lower case for linux support. (`PR#1216 <https://github.com/terrapower/armi/pull/1216>`_) Bug fixes ^^^^^^^^^ #. Fixed a bug in database comparison. (`PR#1258 <https://github.com/terrapower/armi/pull/1258>`_) #. Fixed an invalid assumption on the lattice physics and cross section manager interfaces when using tight coupling for snapshot runs. (`PR#1206 <https://github.com/terrapower/armi/pull/1206>`_) #. Fixed a bug where the precision used to determine the axial submesh was too small. (`PR#1225 <https://github.com/terrapower/armi/pull/1225>`_) ARMI v0.2.6 ----------- Release Date: 2023-02-09 What's new in ARMI ^^^^^^^^^^^^^^^^^^ #. The ``Material`` class no longer subclasses ``Composite``. (`PR#1062 <https://github.com/terrapower/armi/pull/1062>`_) #. Froze the NumPy version to <= 1.23.5. (`PR#1035 <https://github.com/terrapower/armi/pull/1035>`_) to continue to support NumPy jagged arrays in the DatabaseInterface. #. Split 3 classes in ``database3.py`` into 3 files. (`PR#955 <https://github.com/terrapower/armi/pull/955>`_) #. Split algorithms specific to hex assemblies out of ``FuelHandler``. (`PR#962 <https://github.com/terrapower/armi/pull/962>`_) #. Added 4614 nuclides to decouple the loading of RIPL-3 data from the standard framework run. (`PR#998 <https://github.com/terrapower/armi/pull/998>`_) #. Overhaul of the tight coupling routine in ARMI, and removal of ``looseCoupling`` setting. (`PR #1033 <https://github.com/terrapower/armi/pull/1033>`_) #. Added ``savePhysicsFiles`` setting to copy physics kernel I/O to directories organized by cycle and time step (e.g., c2n1). (`PR#952 <https://github.com/terrapower/armi/pull/952>`_) #. Add ``pinQuantities`` parameter category for block params that have spatial distribution. #. Use ``r.core.p.axialMesh`` instead of ``r.core.refAssem.getAxialMesh()`` for the uniform mesh converter. (`PR#959 <https://github.com/terrapower/armi/pull/959>`_) #. Add group structures for 21- and 94-groups used in photon transport. #. Add block parameter, ``fuelCladLocked``, to track whether or not the fuel and clad are locked. (`PR#1038 <https://github.com/terrapower/armi/pull/1038>`_) #. An explicit fission product modeling option was added. (`PR#1022 <https://github.com/terrapower/armi/pull/1022>`_) #. Axially expand from cold to hot before deepcopy of assemblies into reactor; improving speed. (`PR#1047 <https://github.com/terrapower/armi/pull/1047>`_) #. Add a how-to on restart calculations in the docs. #. General improvements to efficiency in uniform mesh conversion. (`PR#1042 <https://github.com/terrapower/armi/pull/1042>`_) #. Allow MCNP material card number to be defined after the card is written. (`PR#1086 <https://github.com/terrapower/armi/pull/1086>`_) #. Refine logic for ``Block.getNumPins()`` to only count components that are actually pins. (`PR#1098 <https://github.com/terrapower/armi/pull/1098>`_) #. Improve handling of peak/max parameters by the ``UniformMeshConverter`` parameter mapper. (`PR#1108 <https://github.com/terrapower/armi/pull/1108>`_) #. Calculate block kgHM and kgFis on core loading and after shuffling. (`PR#1136 <https://github.com/terrapower/armi/pull/1136>`_) #. Calculate block ``PuFrac`` on core loading and after shuffling. (`PR#1165 <https://github.com/terrapower/armi/pull/1165>`_) #. Add setting ``cyclesSkipTightCouplingInteraction`` to skip coupling interaction on specified cycles. (`PR#1173 <https://github.com/terrapower/armi/pull/1173>`_) #. Remove unused ``HCFcoretype`` setting. (`PR#1179 <https://github.com/terrapower/armi/pull/1179>`_) Bug fixes ^^^^^^^^^ #. Fixed ``referenceBlockAxialMesh`` and ``axialMesh`` during process loading. (`PR#980 <https://github.com/terrapower/armi/pull/980>`_) #. Fixed deadelines in MPI cases due to barriers in temp directory changers. #. Fixed the material namespace order for ``test_axialExpansionChanger.py`` persisting after tests. (`PR#1046 <https://github.com/terrapower/armi/pull/1046>`_) #. Fixed the gaseous fission products not being removed from the core directly, but instead the fission yields within the lumped fission products were being adjusted. (`PR#1022 <https://github.com/terrapower/armi/pull/1022>`_) #. Fixed non-fuel depletable components not being initialized with all nuclides with the ``explicitFissionProducts`` model. (`PR#1067 <https://github.com/terrapower/armi/pull/1067>`_) #. Fixed consistency between cross section group manager and lattice physics interface for tight coupling. (`PR#1118 <https://github.com/terrapower/armi/pull/1118>`_) #. Fixed numerical diffusion in uniform mesh converter that affects number densities and cumulative parameters like DPA. (`PR#992 <https://github.com/terrapower/armi/pull/992>`_) #. Fix the formula to calculate ``b.p.puFrac``. (`PR#1168 <https://github.com/terrapower/armi/pull/1168>`_) #. Fixed ``Material.densityTimesHeatCapacity()``, moving from pseudo-density to physical density. (`PR#1129 <https://github.com/terrapower/armi/pull/1129>`_) #. Fixed ``TD_frac`` modification on UraniumOxide and MOX was not being applied correctly. #. Fixed Magnessium density curve. (`PR#1126 <https://github.com/terrapower/armi/pull/1126>`_) #. Fixed Potassium density curve. (`PR#1128 <https://github.com/terrapower/armi/pull/1128>`_) #. Fixed Concrete density curve. (`PR#1131 <https://github.com/terrapower/armi/pull/1131>`_) #. Fixed Copper density curve. (`PR#1150 <https://github.com/terrapower/armi/pull/1150>`_) #. Fixed ``Component.density``. (`PR#1149 <https://github.com/terrapower/armi/pull/1149>`_) #. Fixed error where a non-float value could be assigned to a material's mass fraction dictionary. (`PR#1199 <https://github.com/terrapower/armi/pull/1199>`_) #. Fixed interface/event ``runLog.header`` for tight coupling. (`PR#1178 <https://github.com/terrapower/armi/pull/1178>`_) #. Fixed circular import bug in ``reactors.py`` caused by importing settings constants. (`PR#1185 <https://github.com/terrapower/armi/pull/1185>`_) ARMI v0.2.5 ----------- Release Date: 2022-10-24 What's new in ARMI ^^^^^^^^^^^^^^^^^^ #. Cleanup of stale ``coveragerc`` file. (`PR#923 <https://github.com/terrapower/armi/pull/923>`_) #. Added `medium` writer style option to ``SettingsWriter``. Added it as arg to modify CLI. (`PR#924 <https://github.com/terrapower/armi/pull/924>`_), and to clone CLI (`PR#932 <https://github.com/terrapower/armi/pull/932>`_). #. Update the EntryPoint class to provide user feedback on required positional arguments. (`PR#922 <https://github.com/terrapower/armi/pull/922>`_) #. Overhaul ``reactor.zones`` tooling and remove application-specific zoning logic. (`PR#943 <https://github.com/terrapower/armi/pull/943>`_) Bug fixes ^^^^^^^^^ #. Adjusted ``density3`` in ``armi/materials/b4C.py`` to include the theoretical density. (`PR#942 <https://github.com/terrapower/armi/pull/942>`_) #. Fixed bug in ``fastFlux`` block parameter mapping in the ``UniformMeshConverter`` by applying it to the ``detailedAxialExpansion`` category. #. Fixed issue where shuffles might duplicate in restart runs. ARMI v0.2.4 ----------- Release Date: 2022-10-03 What's new in ARMI ^^^^^^^^^^^^^^^^^^ #. Added new ``UserPlugin`` functionality. #. Introduced ``axial expansion changer``. #. Greatly improved the ``UniformMeshGeometryConverter``. #. Made the min/max temperatures of ``Material`` curves discoverable. #. Removed the ``PyYaml`` dependency. #. Changed the default Git branch name to ``main``. #. Moved math utilities into their own module. #. Moved ``newReports`` into their final location in ``armi/bookkeeping/report/``. #. Removed ``_swapFluxParam`` method. (`PR#665 <https://github.com/terrapower/armi/pull/665#discussion_r893348409>`__) #. Removed the last usage of ``settingsRules``; now only use ``settingsValidation``. #. Removed separate blueprints in snapshot runs, they must come from the database. (`PR#872 https://github.com/terrapower/armi/pull/872`) #. Added reporting of neutron and gamma energy groups in the XS library ``__repr__``. #. Updated NHFLUX reader to store VARIANT data that was being discarded. #. Store thermally expanded block heights at BOL in ``armi/reactor/reactors.py::Core::processLoading``. #. Added neutronics settings: ``inners`` and ``outers`` for downstream support. #. Removed unused Thermal Hydraulics settings. #. Replaced setting ``stationaryBlocks`` with ``stationaryBlockFlags`` setting. (`PR#665 <https://github.com/terrapower/armi/pull/665>`__)) #. Changed the default value of the ``trackAssems`` setting to ``False``. #. Add setting ``inputHeightsConsideredHot`` to enable thermal expansion of assemblies at BOL. Bug fixes ^^^^^^^^^ #. Fixed issues finding ``ISOXX`` files cross-platform. #. Fixed issues in ``growToFullCore``. #. Fixed issue in the ARMI memory profiler. #. Fixed issue in linear expansion in ``Alloy200``. #. Fixed issue in ``armi/reactor/components/complexShapes.py::Helix::getCircleInnerDiameter`` #. Fixed issue with axial expansion changer in ``armi/reactor/reactors.py::Core::processLoading``. #. Fixed issue in how number densities are initialized for components. #. Fixed issue in ``armi/cases/case.py::copyInterfaceInputs`` #. Fixed issue in ``armi/reactor/components/component.py::getReac`` #. Fixed issue in ``armi/reactor/converters/uniformMesh.py`` was clearing out unchanged param data. #. Fixed issue where components were different if initialized through blueprints vs init. #. Fixed issue where component mass was conserved in axial expansion instead of density. (`PR#846 <https://github.com/terrapower/armi/pull/846>`_) #. Fixed issue in ``HexBlock::rotatePins`` failed to modify ``pinLocation`` param. (`#855 <https://github.com/terrapower/armi/pull/855>`_) #. Fixed issue in ``Core::_applyThermalExpansion`` failed to call ``block.completeInitiaLoading``. (`#885 <https://github.com/terrapower/armi/pull/885>`_) #. Fixed issue where a validator would complain both simple and detailed cycles settings were used. #. Fixed issue where ``getReactionRates()`` was not accounting for burnup-dependent cross-sections. ARMI v0.2.3 ----------- Release Date: 2022-02-08 What's new in ARMI ^^^^^^^^^^^^^^^^^^ #. Upgrading the version of NumPy for a security alert. (`PR#530 <https://github.com/terrapower/armi/pull/530>`_) #. Upgraded ThoriumOxide material. (`PR#548 <https://github.com/terrapower/armi/pull/548>`_) #. Upgraded Lithium material. (`PR#546 <https://github.com/terrapower/armi/pull/546>`_) #. Improved ``Helix`` class. (`PR#558 <https://github.com/terrapower/armi/pull/558>`_) Bug fixes ^^^^^^^^- #. Fixed issue where UML diagrams weren't being generated in docs. (`#550 <https://github.com/terrapower/armi/issues/550>`_) #. Fixed issue with Inconel Alloy 617. (`PR#557 <https://github.com/terrapower/armi/pull/557>`_) ARMI v0.2.2 ----------- Release Date: 2022-01-19 What's new in ARMI v0.2.2 ^^^^^^^^^^^^^^^^^^^^^^^^- #. Improved type hinting. #. Flushed out the ability to build the docs as PDF. #. Material modifications can now be made per-component. #. The ``loadOperator`` method now has the optional ``allowMissing`` argument. Bug fixes ^^^^^^^^^ #. Fixed issue where copying a ``Setting`` with a defined list of options would throw an error. (`PR#540 <https://github.com/terrapower/armi/pull/540>`_) ARMI v0.2.1 ----------- Release Date: 2022-01-13 What's new in ARMI v0.2.1 ^^^^^^^^^^^^^^^^^^^^^^^^^ #. Added new reference data for lumped fission products. (`#507 <https://github.com/terrapower/armi/issues/507>`_) Bug fixes ^^^^^^^^^ #. Fixed issue where grid GUI was not saving lattice maps. (`#490 <https://github.com/terrapower/armi/issues/490>`_) #. Fixed issue where SettingsModifier was using old Settings API. (`#500 <https://github.com/terrapower/armi/issues/500>`_) #. Fixed issue where copying a Setting only copied the default value. (`PR#534 <https://github.com/terrapower/armi/pull/534>`_) ARMI v0.2.0 ----------- Release Date: 2021-11-19 The API has started to solidify, and the number of external-facing changes have started to slow down. This release is a stake in the ground on a stable API. What's new in ARMI v0.2.0 ^^^^^^^^^^^^^^^^^^^^^^^^^ #. Made user settings immutable to avoid confusing runtime behavior. #. Removed the concept of 'facemaps' (now replaced with more general grids). #. Added ability to use module-level logging for more precise debugging. #. Added ability to write full tips-up hex asciimaps. #. Fixed ability to serialize grid blueprints. #. Improved code coverage and linting. #. Added a latin hypercube suite builder for parameter sweeps. #. Added several clarifications, fixes, and updates to documentation. #. Updated units labels on several parameters. #. Added protections against deleting directories. #. Updated spontaneous fission data. #. Removed confusing Charge Fuel Pool from core. #. Sped up YAML reading. #. Removed localization module. #. Added ANL116 energy group structure. #. Added setting to control auto-creation of within-block grids. #. Added new plot/summarizing capabilities. #. Added ability for GUI to save map as image. #. Added C5G7 compositions and dimensions to LWR tutorial. #. Added 1d/2d mesh reading/writing to GEODST. Backwards incompatible changes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ There may be some new errors based on updated input checking. Bug fixes ^^^^^^^^^ #. Fixed centering of full-symmetry Cartesian lattice maps. #. Fixed issues with grids that had multi-index locations. #. Removed test files from coverage check. #. Fixed order of operations issue in ``rotatePins``. #. Fixed incorrect multiplicity for non-grid block components. #. Many additional bugfixes and cleanups (see PR list). ================================================ FILE: doc/qa_docs/scr/0.3.rst ================================================ Release Notes for ARMI 0.3 ========================== These are the release notes for past versions of ARMI, created before our SCR process. They are preserved here for historical record. ARMI v0.3.0 ----------- Release Date: 2024-02-02 What's new in ARMI? ^^^^^^^^^^^^^^^^^^^ #. The ``_copyInputsHelper()`` gives relative path and not absolute after copy. (`PR#1416 <https://github.com/terrapower/armi/pull/1416>`_) #. Attempt to set representative block number densities by component if possible. (`PR#1412 <https://github.com/terrapower/armi/pull/1412>`_) #. Use ``functools`` to preserve function attributes when wrapping with ``codeTiming.timed``. (`PR#1466 <https://github.com/terrapower/armi/pull/1466>`_) #. Remove a number of deprecated block, assembly, and core parameters related to a defunct internal plugin. Bug Fixes ^^^^^^^^^ #. ``StructuredGrid.getNeighboringCellIndices()`` was incorrectly implemented for the second neighbor. (`PR#1614 <https://github.com/terrapower/armi/pull/1614>`_) Quality Work ^^^^^^^^^^^^ #. ARMI now mandates ``ruff`` linting. (`PR#1419 <https://github.com/terrapower/armi/pull/1419>`_) #. Many new references to requirement tests and implementations were added to docstrings. #. Removed all old ARMI requirements, to start the work fresh. (`PR#1438 <https://github.com/terrapower/armi/pull/1438>`_) #. Downgrading Draft PRs as policy. (`PR#1444 <https://github.com/terrapower/armi/pull/1444>`_) ================================================ FILE: doc/qa_docs/scr/0.4.rst ================================================ Release Notes for ARMI 0.4 ========================== These are the release notes for past versions of ARMI, created before our SCR process. They are preserved here for historical record. ARMI v0.4.0 ----------- Release Date: 2024-07-29 New Features ^^^^^^^^^^^^ #. Conserve mass by component in ``assembly.setBlockMesh()``. (`PR#1665 <https://github.com/terrapower/armi/pull/1665>`_) #. Removal of the ``Block.reactor`` property. (`PR#1425 <https://github.com/terrapower/armi/pull/1425>`_) #. System information is now also logged on Linux. (`PR#1689 <https://github.com/terrapower/armi/pull/1689>`_) #. Reset ``Reactor`` data on worker processors after every interaction to free memory from state distribution. (`PR#1729 <https://github.com/terrapower/armi/pull/1729>`_ and `PR#1750 <https://github.com/terrapower/armi/pull/1750>`_) #. Density can be specified for components via ``custom isotopics`` in the blueprints. (`PR#1745 <https://github.com/terrapower/armi/pull/1745>`_) #. Implement a new ``JaggedArray`` class that handles HDF5 interface for jagged data. (`PR#1726 <https://github.com/terrapower/armi/pull/1726>`_) #. Adding temperature dependent representative blocks to cross section group manager. (`PR#1987 <https://github.com/terrapower/armi/pull/1987>`_) API Changes ^^^^^^^^^^^ #. Replacing the concrete material with a better reference. (`PR#1717 <https://github.com/terrapower/armi/pull/1717>`_) #. Adding more detailed time information to logging. (`PR#1796 <https://github.com/terrapower/armi/pull/1796>`_) #. Renaming ``structuredgrid.py`` to camelCase. (`PR#1650 <https://github.com/terrapower/armi/pull/1650>`_) #. Removing unused argument from ``Block.coords()``. (`PR#1651 <https://github.com/terrapower/armi/pull/1651>`_) #. Removing unused method ``HexGrid.allPositionsInThird()``. (`PR#1655 <https://github.com/terrapower/armi/pull/1655>`_) #. Removed unused methods: ``Reactor.getAllNuclidesIn()``, ``plotTriangleFlux()``. (`PR#1656 <https://github.com/terrapower/armi/pull/1656>`_) #. Removed ``armi.utils.dochelpers``; not relevant to nuclear modeling. (`PR#1662 <https://github.com/terrapower/armi/pull/1662>`_) #. Removing old tools created to help people convert to the current database format: ``armi.bookkeeping.db.convertDatabase()`` and ``ConvertDB``. (`PR#1658 <https://github.com/terrapower/armi/pull/1658>`_) #. Removing the unused method ``Case.buildCommand()``. (`PR#1773 <https://github.com/terrapower/armi/pull/1773>`_) #. Removed the variable ``armi.physics.neutronics.isotopicDepletion.ORDER``. (`PR#1671 <https://github.com/terrapower/armi/pull/1671>`_) #. Removing extraneous ``ArmiOjbect`` methods. (`PR#1667 <https://github.com/terrapower/armi/pull/1667>`_) * Moving ``ArmiObject.getBoronMassEnrich()`` to ``Block``. * Moving ``ArmiObject.getPuMoles()`` to ``Block``. * Moving ``ArmiObject.getUraniumMassEnrich()`` to ``Block``. * Removing ``ArmiObject.getMaxUraniumMassEnrich.()``. * Removing ``ArmiObject.getMaxVolume()`` & ``Block.getMaxVolume()``. * Removing ``ArmiObject.getPuFrac()``. * Removing ``ArmiObject.getPuMass()``. * Removing ``ArmiObject.getPuN()``. * Removing ``ArmiObject.getZrFrac()``. * Removing ``ArmiObject.printDensities()``. * Moving ``Composite.isOnWhichSymmetryLine()`` to ``Assembly``. * Removing ``Block.isOnWhichSymmetryLine()``. #. Removing the ``Block.reactor`` property. (`PR#1425 <https://github.com/terrapower/armi/pull/1425>`_) #. Moving several ``ArmiObject`` methods. (`PR#1425 <https://github.com/terrapower/armi/pull/1425>`_) * Moving ``ArmiObject.getNeutronEnergyDepositionConstants`` to ``Block``. * Moving ``ArmiObject.getGammaEnergyDepositionConstants`` to ``Block``. * Moving ``ArmiObject.getTotalEnergyGenerationConstants`` to ``Block``. * Moving ``ArmiObject.getFissionEnergyGenerationConstants`` to ``Block``. * Moving ``ArmiObject.getCaptureEnergyGenerationConstants`` to ``Block``. #. Removing the parameter ``rdIterNum``. (`PR#1704 <https://github.com/terrapower/armi/pull/1704>`_) #. Removing the parameters ``outsideFuelRing`` and ``outsideFuelRingFluxFr``. (`PR#1700 <https://github.com/terrapower/armi/pull/1700>`_) #. Removing the setting ``doOrificedTH``. (`PR#1706 <https://github.com/terrapower/armi/pull/1706>`_) #. Changing the Doppler constant params to ``VOLUME_INTEGRATED``. (`PR#1659 <https://github.com/terrapower/armi/pull/1659>`_) #. Change ``Operator._expandCycleAndTimeNodeArgs`` to be a non-static method. (`PR#1766 <https://github.com/terrapower/armi/pull/1766>`_) #. Database now writes state at the last time node of a cycle rather than during the ``DatabaseInterface.interactEOC`` interaction. (`PR#1090 <https://github.com/terrapower/armi/pull/1090>`_) #. Renaming ``b.p.buGroup`` to ``b.p.envGroup``. Environment group captures both burnup and temperature. (`PR#1987 <https://github.com/terrapower/armi/pull/1987>`_) Bug Fixes ^^^^^^^^^ #. Fixed four bugs with "corners up" hex grids. (`PR#1649 <https://github.com/terrapower/armi/pull/1649>`_) #. Fixed ``safeCopy`` to work on both Windows and Linux with strict permissions. (`PR#1691 <https://github.com/terrapower/armi/pull/1691>`_) #. When creating a new XS group, inherit settings from initial group. (`PR#1653 <https://github.com/terrapower/armi/pull/1653>`_, `PR#1751 <https://github.com/terrapower/armi/pull/1751>`_) #. Fixed a bug with ``Core.getReactionRates``. (`PR#1771 <https://github.com/terrapower/armi/pull/1771>`_) #. Fixed a bug with interactive versus batch mode checking on windows versus linux. (`PR#1786 <https://github.com/terrapower/armi/pull/1786>`_) Quality Work ^^^^^^^^^^^^ #. Creating a single-block test reactor, to speed up unit tests. (`PR#1737 <https://github.com/terrapower/armi/pull/1737>`_) #. Supporting MacOS in CI. (`PR#1713 <https://github.com/terrapower/armi/pull/1713>`_) #. We now enforce a maximum line length of 120 characters, using ``ruff``. (`PR#1646 <https://github.com/terrapower/armi/pull/1646>`_) #. Updating ``ruff`` to version ``0.5.1``. (`PR#1770 <https://github.com/terrapower/armi/pull/1770>`_) #. Move ``.coveragerc`` file information into ``pyproject.toml``. (`PR#1692 <https://github.com/terrapower/armi/pull/1692>`_) Changes that Affect Requirements ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #. Removing unused argument to ``Block.coords()``. (`PR#1651 <https://github.com/terrapower/armi/pull/1651>`_) #. Touched ``HexGrid`` by adding a "cornersUp" property and fixing two bugs. (`PR#1649 <https://github.com/terrapower/armi/pull/1649>`_) #. Very slightly modified the implementation of ``Assembly.add()``. (`PR#1670 <https://github.com/terrapower/armi/pull/1670>`_) ================================================ FILE: doc/qa_docs/scr/0.5.rst ================================================ Release Notes for ARMI 0.5 ========================== These are the release notes for past versions of ARMI, created before our SCR process. They are preserved here for historical record. ARMI v0.5.1 ----------- Release Date: 2025-03-14 This release was all about QA documentation. We open-sourced our QA documentation, even our software requirements. There were also several bug fixes. New Features ^^^^^^^^^^^^ #. Move instead of copy files from ``TemporaryDirectoryChanger``. (`PR#2022 <https://github.com/terrapower/armi/pull/2022>`_) #. Creating the ``armi.testing`` module, to share ARMI testing tools. (`PR#2028 <https://github.com/terrapower/armi/pull/2028>`_) #. Using inner diameter for sorting components when outer diameter is identical. (`PR#1882 <https://github.com/terrapower/armi/pull/1882>`_) #. Invoking ``component.material.density()`` does not log an expensive stack tracefor fluids. (`PR#2075 <https://github.com/terrapower/armi/pull/2075>`_) #. ARMI will now try to use the ``/tmp/`` directory for its temp files, on Linux and MacOS. (`PR#2092 <https://github.com/terrapower/armi/pull/2092>`_) API Changes ^^^^^^^^^^^ #. Removing ``Database3`` from the API, use ``Database``. (`PR#2052 <https://github.com/terrapower/armi/pull/2052>`_) Bug Fixes ^^^^^^^^^ #. Fixing check for jagged arrays during ``_writeParams``. (`PR#2051 <https://github.com/terrapower/armi/pull/2051>`_) #. Fixing BP-section ignoring tool in ``PassiveDBLoadPlugin``. (`PR#2055 <https://github.com/terrapower/armi/pull/2055>`_) #. Making sure SFPs have spatial grids. (`PR#2082 <https://github.com/terrapower/armi/pull/2082>`_) #. Fixing number densities when custom isotopics are combined with Fluid components. (`PR#2071 <https://github.com/terrapower/armi/pull/2071>`_) #. Fixing scaling of volume-integrated parameters on edge assemblies. (`PR#2060 <https://github.com/terrapower/armi/pull/2060>`_) #. Fixing strictness of ``HexGrid`` rough equality check. (`PR#2058 <https://github.com/terrapower/armi/pull/2058>`_) #. Fixing treatment of symmetry factors when calculating component flux and reaction rates. (`PR#2068 <https://github.com/terrapower/armi/pull/2068>`_) Quality Work ^^^^^^^^^^^^ #. Open-Sourcing the ARMI requirements. (`PR#2076 <https://github.com/terrapower/armi/pull/2076>`_) #. Significant revamp of the ARMI requirements. (`PR#2074 <https://github.com/terrapower/armi/pull/2074>`_) #. Adding PDF versions of the ARMI docs. (`PR#2072 <https://github.com/terrapower/armi/pull/2072>`_) #. Update docs build to occur with Python 3.13 and updated docs dependencies. (`PR#2050 <https://github.com/terrapower/armi/pull/2050>`_) #. Removing silent overwrite of ``shutil.copy``. (`PR#2081 <https://github.com/terrapower/armi/pull/2081>`_) ARMI v0.5.0 ----------- Release Date: 2024-12-14 New Features ^^^^^^^^^^^^ #. Supporting Python 3.12. (`PR#1813 <https://github.com/terrapower/armi/pull/1813>`_) #. Supporting Python 3.13. (`PR#1996 <https://github.com/terrapower/armi/pull/1996>`_) #. Adding data models for ex-core structures in ARMI. (`PR#1891 <https://github.com/terrapower/armi/pull/1891>`_) #. Opening some DBs without the ``App`` that created them. (`PR#1917 <https://github.com/terrapower/armi/pull/1917>`_) #. Adding support for ENDF/B-VII.1-based MC2-3 libraries. (`PR#1982 <https://github.com/terrapower/armi/pull/1982>`_) #. Adding setting ``mcnpLibraryVersion`` to chosen ENDF library for MCNP. (`PR#1989 <https://github.com/terrapower/armi/pull/1989>`_) #. Removing the ``tabulate`` dependency by ingesting it to ``armi.utils.tabulate``. (`PR#1811 <https://github.com/terrapower/armi/pull/1811>`_) #. ``HexBlock.rotate`` updates the spatial locator for children of that block. (`PR#1943 <https://github.com/terrapower/armi/pull/1943>`_) #. Provide ``Block.getInputHeight`` for determining the height of a block from blueprints. (`PR#1927 <https://github.com/terrapower/armi/pull/1927>`_) #. Provide ``Parameter.hasCategory`` for quickly checking if a parameter is defined with a given category. (`PR#1899 <https://github.com/terrapower/armi/pull/1899>`_) #. Provide ``ParameterCollection.where`` for efficient iteration over parameters who's definition matches a given condition. (`PR#1899 <https://github.com/terrapower/armi/pull/1899>`_) #. Flags can now be defined with letters and numbers. (`PR#1966 <https://github.com/terrapower/armi/pull/1966>`_) #. Provide utilities for determining location of a rotated object in a hexagonal lattice (``getIndexOfRotatedCell``). (`PR#1846 <https://github.com/terrapower/armi/1846>`_) #. Allow merging a component with zero area into another component. (`PR#1858 <https://github.com/terrapower/armi/pull/1858>`_) #. New plugin hook ``getAxialExpansionChanger`` to customize axial expansion. (`PR#1870 <https://github.com/terrapower/armi/pull/1870>`_) #. New plugin hook ``beforeReactorConstruction`` to process settings before reactor init. (`PR#1945 <https://github.com/terrapower/armi/pull/1945>`_) #. Improving performance in the lattice physics interface by not updating cross sections at ``everyNode`` during coupled calculations. (`PR#1963 <https://github.com/terrapower/armi/pull/1963>`_) #. Allow merging a component with zero area into another component. (`PR#1858 <https://github.com/terrapower/armi/pull/1858>`_) #. Updating ``copyOrWarn`` and ``getFileSHA1Hash`` to support directories. (`PR#1984 <https://github.com/terrapower/armi/pull/1984>`_) #. Improve efficiency of reaction rate calculations. (`PR#1887 <https://github.com/terrapower/armi/pull/1887>`_) #. Adding new options for simplifying 1D cross section modeling. (`PR#1949 <https://github.com/terrapower/armi/pull/1949>`_) #. Adding ``--skip-inspection`` flag to ``CompareCases`` CLI. (`PR#1842 <https://github.com/terrapower/armi/pull/1842>`_) #. Exposing skip inspection options for ``armi.init`` and ``db.loadOperator``. (`PR#2005 <https://github.com/terrapower/armi/pull/2005>`_) #. Exposing ``detailedNDens`` to components. (`PR#1954 <https://github.com/terrapower/armi/pull/1954>`_) #. Adding a method ``getPinMgFluxes`` to get pin-wise multigroup fluxes from a Block. (`PR#1990 <https://github.com/terrapower/armi/pull/1990>`_) API Changes ^^^^^^^^^^^ #. ``nuclideBases.byMcc3ID`` and ``getMcc3Id()`` return IDs consistent with ENDF/B-VII.1. (`PR#1982 <https://github.com/terrapower/armi/pull/1982>`_) #. Moving ``settingsValidation`` from ``operators`` to ``settings``. (`PR#1895 <https://github.com/terrapower/armi/pull/1895>`_) #. Allowing for unknown Flags when opening a DB. (`PR#1844 <https://github.com/terrapower/armi/pull/1835>`_) #. Renaming ``Reactor.moveList`` to ``Reactor.moves``. (`PR#1881 <https://github.com/terrapower/armi/pull/1881>`_) #. Transposing ``pinMgFluxes`` parameters so that leading dimension is pin index. (`PR#1937 <https://github.com/terrapower/armi/pull/1937>`_) #. ``Block.getPinCoordinates`` returns an ``(N, 3)`` array, rather than a list of arrays. (`PR#1943 <https://github.com/terrapower/armi/pull/1943>`_) #. Alphabetizing ``Flags.toString()`` results. (`PR#1912 <https://github.com/terrapower/armi/pull/1912>`_) #. ``copyInterfaceInputs`` no longer requires a valid setting object. (`PR#1934 <https://github.com/terrapower/armi/pull/1934>`_) #. Changing ``synDbAfterWrite`` default to ``True``. (`PR#1968 <https://github.com/terrapower/armi/pull/1968>`_) #. Removing ``Assembly.rotatePins`` and ``Block.rotatePins``. Prefer ``Assembly.rotate`` and ``Block.rotate``. (`PR#1846 <https://github.com/terrapower/armi/1846>`_) #. Removing broken plot ``buVsTime``. (`PR#1994 <https://github.com/terrapower/armi/pull/1994>`_) #. Removing class ``AssemblyList`` and ``assemblyLists.py``. (`PR#1891 <https://github.com/terrapower/armi/pull/1891>`_) #. Removing class ``globalFluxInterface.DoseResultsMapper``. (`PR#1952 <https://github.com/terrapower/armi/pull/1952>`_) #. Removing class ``SmartList``. (`PR#1992 <https://github.com/terrapower/armi/pull/1992>`_) #. Removing flags ``CORE`` and ``REACTOR``. (`PR#1835 <https://github.com/terrapower/armi/pull/1835>`_) #. Removing method ``Assembly.doubleResolution()``. (`PR#1951 <https://github.com/terrapower/armi/pull/1951>`_) #. Removing method ``buildEqRingSchedule``. (`PR#1928 <https://github.com/terrapower/armi/pull/1928>`_) #. Removing method ``prepSearch``. (`PR#1845 <https://github.com/terrapower/armi/pull/1845>`_) #. Removing method ``SkippingXsGen_BuChangedLessThanTolerance``. (`PR#1845 <https://github.com/terrapower/armi/pull/1845>`_) #. Removing setting ``autoGenerateBlockGrids``. (`PR#1947 <https://github.com/terrapower/armi/pull/1947>`_) #. Removing setting ``mpiTasksPerNode`` and renaming ``numProcessors`` to ``nTasks``. (`PR#1958 <https://github.com/terrapower/armi/pull/1958>`_) #. History Tracker: "detail assemblies" are now fuel and control assemblies. (`PR#1990 <https://github.com/terrapower/armi/pull/1990>`_) #. Removing ``Block.breakFuelComponentsIntoIndividuals()``. (`PR#1990 <https://github.com/terrapower/armi/pull/1990>`_) #. Moving ``getPuMoles`` from blocks.py up to composites.py. (`PR#1990 <https://github.com/terrapower/armi/pull/1990>`_) #. Requiring ``buReducingAssemblyRotation`` and ``getOptimalAssemblyOrientation`` to have pin-level burnup. (`PR#2019 <https://github.com/terrapower/armi/pull/2019>`_) Bug Fixes ^^^^^^^^^ #. Fixed spatial grids of pins in Blocks on flats-up grids. (`PR#1947 <https://github.com/terrapower/armi/pull/1947>`_) #. Fixed ``DerivedShape.getArea`` for ``cold=True``. (`PR#1831 <https://github.com/terrapower/armi/pull/1831>`_) #. Fixed error parsing command line integers in ``ReportsEntryPoint``. (`PR#1824 <https://github.com/terrapower/armi/pull/1824>`_) #. Fixed ``PermissionError`` when using ``syncDbAfterWrite``. (`PR#1857 <https://github.com/terrapower/armi/pull/1857>`_) #. Fixed ``MpiDirectoryChanger``. (`PR#1853 <https://github.com/terrapower/armi/pull/1853>`_) #. Changed data type of ``thKernel`` setting from ``bool`` to ``str`` in ``ThermalHydraulicsPlugin``. (`PR#1855 <https://github.com/terrapower/armi/pull/1855>`_) #. Update height of fluid components after axial expansion. (`PR#1828 <https://github.com/terrapower/armi/pull/1828>`_) #. Rotate hexagonal assembly patches correctly on facemap plots. (`PR#1883 <https://github.com/terrapower/armi/pull/1883>`_) #. Material theoretical density is serialized to and read from database. (`PR#1852 <https://github.com/terrapower/armi/pull/1852>`_) #. Removed broken and unused column in ``summarizeMaterialData``. (`PR#1925 <https://github.com/terrapower/armi/pull/1925>`_) #. Fixed hex block rotation in ``plotBlockDiagram``. (`PR#1926 <https://github.com/terrapower/armi/pull/1926>`_) #. Fixed edge case in ``assemblyBlueprint._checkParamConsistency()``. (`PR#1928 <https://github.com/terrapower/armi/pull/1928>`_) #. Fixed wetted perimeter for hex inner ducts. (`PR#1985 <https://github.com/terrapower/armi/pull/1985>`_) #. Fixing number densities when custom isotopics and material properties are combined. (`PR#1822 <https://github.com/terrapower/armi/pull/1822>`_) Quality Work ^^^^^^^^^^^^ #. Removing deprecated code ``axialUnitGrid``. (`PR#1809 <https://github.com/terrapower/armi/pull/1809>`_) #. Refactoring ``axialExpansionChanger``. (`PR#1861 <https://github.com/terrapower/armi/pull/1861>`_) #. Raising a ``ValueError`` when ``Database.load()`` fails. (`PR#1940 <https://github.com/terrapower/armi/pull/1940>`_) #. Making axial expansion-related classes more extensible. (`PR#1920 <https://github.com/terrapower/armi/pull/1920>`_) ================================================ FILE: doc/qa_docs/scr/0.6.rst ================================================ Release Notes for ARMI 0.6 ========================== Here you will find the release notes for previous ARMI releases. ARMI v0.6.4 ----------- Release Date: 2026-03-25 This was a very short-burn release. The biggest new feature is the addition of matProps, the materials library. This tool allows developers to flexibly define materials with properties in several flexible ways. This release also included a lot of clean up work, such as moving several parameters and settings out of ARMI. The HistoryTracker EOL interaction was optimized. And a block converter was added for mixed-pin assemblies. Code Changes, Features ^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2453 <https://github.com/terrapower/armi/pull/2453>`_) Adding matProps: a material library #. (`PR#2405 <https://github.com/terrapower/armi/pull/2405>`_) Remove ruamel.yaml dependency pin maximum #. (`PR#2436 <https://github.com/terrapower/armi/pull/2436>`_) Optimizing the HistoryTracker EOL interaction #. (`PR#2442 <https://github.com/terrapower/armi/pull/2442>`_) Defaulting the Database to read mode #. (`PR#2477 <https://github.com/terrapower/armi/pull/2477>`_) Adding a block converter for mixed pin assemblies #. (`PR#2478 <https://github.com/terrapower/armi/pull/2478>`_) Checking for unrepresented XS IDs #. (`PR#2479 <https://github.com/terrapower/armi/pull/2479>`_) Adding method to DefaultExecuter for final parameter updates Code Changes, Bugs and Fixes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2468 <https://github.com/terrapower/armi/pull/2468>`_) Fixing shuffle file reading in reload runs #. (`PR#2470 <https://github.com/terrapower/armi/pull/2470>`_) Removing unused and broken material UThZr Code Changes, Maintenance, or Trivial ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2422 <https://github.com/terrapower/armi/pull/2422>`_) Removing unused parameter fluxAdjPeak #. (`PR#2430 <https://github.com/terrapower/armi/pull/2430>`_) Moving 27 TH parameters out of ARMI #. (`PR#2451 <https://github.com/terrapower/armi/pull/2451>`_) Moving 7 Neutronics Settings out of ARMI #. (`PR#2456 <https://github.com/terrapower/armi/pull/2456>`_) Anonymizing example user names in IPYNBs #. (`PR#2457 <https://github.com/terrapower/armi/pull/2457>`_) Doing misc cleanup and temporary pyDOE issue #. (`PR#2458 <https://github.com/terrapower/armi/pull/2458>`_) Removing broken pyDOE dep and LatinHyperCubeSuiteBuilder #. (`PR#2459 <https://github.com/terrapower/armi/pull/2459>`_) Removing Core.getAssembliesOfType in favor of iterChildrenWithFlags #. (`PR#2460 <https://github.com/terrapower/armi/pull/2460>`_) Stopping intermittent CI failures #. (`PR#2461 <https://github.com/terrapower/armi/pull/2461>`_) Renaming _Material_Test to AbstractMaterialTest #. (`PR#2462 <https://github.com/terrapower/armi/pull/2462>`_) Switching from coveralls.io to codecov.io #. (`PR#2482 <https://github.com/terrapower/armi/pull/2482>`_) Ensuring codecov.io does not fail if coverage drops a tiny amount #. (`PR#2483 <https://github.com/terrapower/armi/pull/2483>`_) Producing an ARMI wheel with every merge to main Documentation-Only Changes ^^^^^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2441 <https://github.com/terrapower/armi/pull/2441>`_) Moving the README text up in the PDF ToC #. (`PR#2481 <https://github.com/terrapower/armi/pull/2481>`_) Getting nuclide_demo tutorial working again #. (`PR#2443 <https://github.com/terrapower/armi/pull/2443>`_) Starting release cycle for ARMI 0.6.4 ARMI v0.6.3 ----------- Release Date: 2026-02-02 This was a minor release. There were changes made to the temporary directory and unit test tooling to support read-only containers. There were also some improvements to the shuffle logic API. And the documentation had many small cleanup changes. For quality, when accessing a material property outside the temperature range it was defined for, ARMI now raises an exception by default. Code Changes, Features ^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2385 <https://github.com/terrapower/armi/pull/2385>`_) Set materials.FAIL_ON_RANGE to True by default #. (`PR#2399 <https://github.com/terrapower/armi/pull/2399>`_) New exception for mpi4py import #. (`PR#2402 <https://github.com/terrapower/armi/pull/2402>`_) Improving cleanPath and related tooling #. (`PR#2411 <https://github.com/terrapower/armi/pull/2411>`_) Refactoring Shuffling Logic #. (`PR#2423 <https://github.com/terrapower/armi/pull/2423>`_) Fix issue with recent runLog changes #. (`PR#2428 <https://github.com/terrapower/armi/pull/2428>`_) Copying assembly flags when creating a new assembly from type #. (`PR#2432 <https://github.com/terrapower/armi/pull/2432>`_) Adding forceClean to snapshot deletions Code Changes, Maintenance, or Trivial ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2393 <https://github.com/terrapower/armi/pull/2393>`_) Shortening some test class names that are too long #. (`PR#2407 <https://github.com/terrapower/armi/pull/2407>`_) Removing unused Inconel material properties #. (`PR#2420 <https://github.com/terrapower/armi/pull/2420>`_) Adding unit testing to HexBlock.getPinPitch #. (`PR#2421 <https://github.com/terrapower/armi/pull/2421>`_) Renaming misloadSwap to swap #. (`PR#2424 <https://github.com/terrapower/armi/pull/2424>`_) Removing deprecation warning from test #. (`PR#2425 <https://github.com/terrapower/armi/pull/2425>`_) Adding code coverage #. (`PR#2426 <https://github.com/terrapower/armi/pull/2426>`_) Moving DIF3D parameters out of ARMI Documentation-Only Changes ^^^^^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2386 <https://github.com/terrapower/armi/pull/2386>`_) Adding a count of how many settings are in ARMI #. (`PR#2390 <https://github.com/terrapower/armi/pull/2390>`_) Ensuring that SCRs will build correctly during main branch pushes #. (`PR#2395 <https://github.com/terrapower/armi/pull/2395>`_) Cleaning up the docs #. (`PR#2398 <https://github.com/terrapower/armi/pull/2398>`_) Starting version 0.6.3 release cycle #. (`PR#2403 <https://github.com/terrapower/armi/pull/2403>`_) Improving nuclidebases documentation - byMcnpId #. (`PR#2409 <https://github.com/terrapower/armi/pull/2409>`_) Clarifying PIP version in user install docs #. (`PR#2418 <https://github.com/terrapower/armi/pull/2418>`_) Adding descriptions to two impl tags #. (`PR#2419 <https://github.com/terrapower/armi/pull/2419>`_) Ensuring all impl and test tags show in the docs #. (`PR#2433 <https://github.com/terrapower/armi/pull/2433>`_) Adding a high-level Parameter Report to the docs #. (`PR#2439 <https://github.com/terrapower/armi/pull/2439>`_) Adding an AUTHORS file to ARMI ARMI v0.6.2 ----------- Release Date: 2025-12-17 This was a minor release. There was a refactoring cleanup done to NulcideBases. And there were some very minor API-breaking changes to the HistoryTrackerInterface and Interface.function was renamed to Interface.purpose. The coolest new feature is the improvements made to loadTestReactor, which can now cache a variety of test reactors to improve your test performance. Code Changes, Features ^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2335 <https://github.com/terrapower/armi/pull/2335>`_) Moving to use Reactor.nuclideBases, where possible #. (`PR#2374 <https://github.com/terrapower/armi/pull/2374>`_) Changing MPI_COMM to the Pickle Protocol 5 Code Changes, Bugs and Fixes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2364 <https://github.com/terrapower/armi/pull/2364>`_) Fixing HoledHexagon.holeRadFromCenter type Code Changes, Maintenance, or Trivial ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2177 <https://github.com/terrapower/armi/pull/2177>`_) Changing Interface.function to Interface.purpose #. (`PR#2334 <https://github.com/terrapower/armi/pull/2334>`_) Refactoring loadTestReactor to pickle multiple test reactors #. (`PR#2358 <https://github.com/terrapower/armi/pull/2358>`_) Adding code coverage #. (`PR#2359 <https://github.com/terrapower/armi/pull/2359>`_) Cleaning up DB version logic #. (`PR#2360 <https://github.com/terrapower/armi/pull/2360>`_) Cleaning out broken tryPickleOnAllContents3 #. (`PR#2366 <https://github.com/terrapower/armi/pull/2366>`_) Raising instead of returning errors #. (`PR#2369 <https://github.com/terrapower/armi/pull/2369>`_) Moving three test reactors to the testing module #. (`PR#2371 <https://github.com/terrapower/armi/pull/2371>`_) Moving more ARMI testing tools to the testing module #. (`PR#2375 <https://github.com/terrapower/armi/pull/2375>`_) Cleaning unused parts of HistoryTrackerInterface #. (`PR#2387 <https://github.com/terrapower/armi/pull/2387>`_) Removing Five Unused Settings #. (`PR#2389 <https://github.com/terrapower/armi/pull/2389>`_) Improving HexBlock.hasPinPitch Documentation-Only Changes ^^^^^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2351 <https://github.com/terrapower/armi/pull/2351>`_) Updating PR form to use the word Rationale #. (`PR#2363 <https://github.com/terrapower/armi/pull/2363>`_) Starting 0.6.2 release cycle #. (`PR#2365 <https://github.com/terrapower/armi/pull/2365>`_) Improving and correcting the ARMI version semantics #. (`PR#2367 <https://github.com/terrapower/armi/pull/2367>`_) Formatting code in docstrings #. (`PR#2372 <https://github.com/terrapower/armi/pull/2372>`_) Documenting database cycle and node values #. (`PR#2373 <https://github.com/terrapower/armi/pull/2373>`_) Fixing docs deploying across GitHub repos #. (`PR#2383 <https://github.com/terrapower/armi/pull/2383>`_) Clarifying parameter doc headers #. (`PR#2388 <https://github.com/terrapower/armi/pull/2388>`_) Updating SCR to use the term Rationale ARMI v0.6.1 ----------- Release Date: 2025-11-05 This was a minor release. While a lot of technical debt was addressed, no major feature work was done. There were some minor bugs fixed, but again nothing worth a release. This is being tagged as a release because the API is stable and this commit was tested heavily downstream and is trustworthy. Code Changes, Features ^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2303 <https://github.com/terrapower/armi/pull/2303>`_) Encapsulating global nuclide data in classes #. (`PR#2317 <https://github.com/terrapower/armi/pull/2317>`_) Simplify thermal scattering #. (`PR#2320 <https://github.com/terrapower/armi/pull/2320>`_) Providing interface method interactRestart for managing restarts #. (`PR#2321 <https://github.com/terrapower/armi/pull/2321>`_) Adding Core.hasLib to check if there is a XS library #. (`PR#2323 <https://github.com/terrapower/armi/pull/2323>`_) Enabling user-specified distribution of MPI actions across nodes #. (`PR#2324 <https://github.com/terrapower/armi/pull/2324>`_) Adding orientationBOL to full core modifier #. (`PR#2325 <https://github.com/terrapower/armi/pull/2325>`_) Creating a new setting to specify memory requirement for cross section calculation #. (`PR#2344 <https://github.com/terrapower/armi/pull/2344>`_) Changing default for guideTubeTopElevation to zero Code Changes, Bugs and Fixes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2328 <https://github.com/terrapower/armi/pull/2328>`_) Ensuring that users define isotopics for custom materials #. (`PR#2331 <https://github.com/terrapower/armi/pull/2331>`_) Resolve TEST_ROOT path #. (`PR#2336 <https://github.com/terrapower/armi/pull/2336>`_) Checking number of jobs against available ranks in runBatchedActions #. (`PR#2347 <https://github.com/terrapower/armi/pull/2347>`_) Fixing missing f-string in component blueprints #. (`PR#2350 <https://github.com/terrapower/armi/pull/2350>`_) Fixing compareLines for a multiple numbers edge case Code Changes, Maintenance, or Trivial ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2306 <https://github.com/terrapower/armi/pull/2306>`_) Removing volume from getMgFlux() #. (`PR#2309 <https://github.com/terrapower/armi/pull/2309>`_) Moving plotting functions into utils module #. (`PR#2310 <https://github.com/terrapower/armi/pull/2310>`_) Removing unnecessary code from Assembly.getBlocksBetweenElevations #. (`PR#2315 <https://github.com/terrapower/armi/pull/2315>`_) Moving some testing utils to testing module #. (`PR#2327 <https://github.com/terrapower/armi/pull/2327>`_) Adding a unit test to HexGrid.generateSortedHexLocationList #. (`PR#2348 <https://github.com/terrapower/armi/pull/2348>`_) Make singleMixedAssembly more flexible for downstream testing Documentation-Only Changes ^^^^^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2250 <https://github.com/terrapower/armi/pull/2250>`_) Logging redundant material modifications #. (`PR#2280 <https://github.com/terrapower/armi/pull/2280>`_) Improving the new shuffling docs #. (`PR#2311 <https://github.com/terrapower/armi/pull/2311>`_) Starting the ARMI 0.6.1 release cycle #. (`PR#2312 <https://github.com/terrapower/armi/pull/2312>`_) Updating XS group manager requirements #. (`PR#2314 <https://github.com/terrapower/armi/pull/2314>`_) Documenting ARMI’s testing tools for devs #. (`PR#2332 <https://github.com/terrapower/armi/pull/2332>`_) Adding settings header to the settings YAML #. (`PR#2337 <https://github.com/terrapower/armi/pull/2337>`_) Fixing docs build #. (`PR#2339 <https://github.com/terrapower/armi/pull/2339>`_) Adding contributor to SCR doc automation script #. (`PR#2342 <https://github.com/terrapower/armi/pull/2342>`_) Documenting how to use the zonesFile Setting #. (`PR#2343 <https://github.com/terrapower/armi/pull/2343>`_) Improving documentation for zoneDefinions Setting #. (`PR#2354 <https://github.com/terrapower/armi/pull/2354>`_) Clarifying that ARMI materials are of testing quality ARMI v0.6.0 ----------- Release Date: 2025-09-25 This was a big release. A lot of technical debt has been cleaned up (XML geom files are finally gone), but there was a lot a lot of feature work: multi-pin blocks, axial expansion improvements, more powerful shuffle logic, and the ability to more freely load an ARMI database. Code Changes, Features ^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#1995 <https://github.com/terrapower/armi/pull/1995>`_) Improving HexBlock.getFlowArea #. (`PR#2031 <https://github.com/terrapower/armi/pull/2031>`_) Providing better composite iteration methods #. (`PR#2045 <https://github.com/terrapower/armi/pull/2045>`_) Adding a check on the grid/component consistency in the BPs #. (`PR#2092 <https://github.com/terrapower/armi/pull/2092>`_) Allowing ARMI to use tmp dir on Mac/Linux #. (`PR#2105 <https://github.com/terrapower/armi/pull/2105>`_) Removing support for XML geom files #. (`PR#2106 <https://github.com/terrapower/armi/pull/2106>`_) Add Core.iterBlocks and Assembly.iterBlocks #. (`PR#2107 <https://github.com/terrapower/armi/pull/2107>`_) Handing empty string defaults better in copyInterfaceInputs #. (`PR#2109 <https://github.com/terrapower/armi/pull/2109>`_) Store number densities in numpy arrays instead of dictionary #. (`PR#2114 <https://github.com/terrapower/armi/pull/2114>`_) Allowing component area to be queried at arbitrary temp #. (`PR#2118 <https://github.com/terrapower/armi/pull/2118>`_) Adding a FilletedHexagon shape #. (`PR#2121 <https://github.com/terrapower/armi/pull/2121>`_) Supporting growing DBto full core on db load #. (`PR#2135 <https://github.com/terrapower/armi/pull/2135>`_) Retooling single-warnings report as all warnings report #. (`PR#2138 <https://github.com/terrapower/armi/pull/2138>`_) Allowing the BOL orientations to be set in the blueprints #. (`PR#2162 <https://github.com/terrapower/armi/pull/2162>`_) Make axial linking aware of block grids for axial expansion #. (`PR#2173 <https://github.com/terrapower/armi/pull/2173>`_) Improving Core.libs to look for the current cycle and node #. (`PR#2175 <https://github.com/terrapower/armi/pull/2175>`_) Blocking duplicate flags from being added #. (`PR#2198 <https://github.com/terrapower/armi/pull/2198>`_) Updating Axial Expansion Changer for improved mass redistribution #. (`PR#2199 <https://github.com/terrapower/armi/pull/2199>`_) Add 3 nuclides to getDefaultNuclideFlags #. (`PR#2202 <https://github.com/terrapower/armi/pull/2202>`_) Provide Component.pinIndices for helping understand where pins are #. (`PR#2208 <https://github.com/terrapower/armi/pull/2208>`_) Making ParamMapper symmetry-aware #. (`PR#2209 <https://github.com/terrapower/armi/pull/2209>`_) Block collection nuclides #. (`PR#2218 <https://github.com/terrapower/armi/pull/2218>`_) Adding a method to get cycle/node combinations for a time interval #. (`PR#2219 <https://github.com/terrapower/armi/pull/2219>`_) Refactoring Shuffle Logic Inputs to YAML #. (`PR#2221 <https://github.com/terrapower/armi/pull/2221>`_) Update logic for number density arrays and other cleanup #. (`PR#2223 <https://github.com/terrapower/armi/pull/2223>`_) Move the zonesFile setting to the framework and add building of zones to the interface stack #. (`PR#2225 <https://github.com/terrapower/armi/pull/2225>`_) Advancing r.p.time in the Operator #. (`PR#2227 <https://github.com/terrapower/armi/pull/2227>`_) Symmetry testing #. (`PR#2233 <https://github.com/terrapower/armi/pull/2233>`_) Remove axialPowerProfile* parameters #. (`PR#2235 <https://github.com/terrapower/armi/pull/2235>`_) Adding two geometry parameters #. (`PR#2243 <https://github.com/terrapower/armi/pull/2243>`_) Updating wetted perimeter calculation #. (`PR#2251 <https://github.com/terrapower/armi/pull/2251>`_) Comparing special formatting parameters in DBs #. (`PR#2255 <https://github.com/terrapower/armi/pull/2255>`_) Add b10NumFrac attribute to B4C class to allow for flexible setDefaultMassFracs #. (`PR#2266 <https://github.com/terrapower/armi/pull/2266>`_) Allow assembly parameters to be symmetry aware during core transformations and move operations #. (`PR#2269 <https://github.com/terrapower/armi/pull/2269>`_) Track assemblies if discharged to SFP #. (`PR#2272 <https://github.com/terrapower/armi/pull/2272>`_) Adding new Component param enrichmentBOL #. (`PR#2275 <https://github.com/terrapower/armi/pull/2275>`_) Cleaning internal state out of some materials #. (`PR#2277 <https://github.com/terrapower/armi/pull/2277>`_) Enhance fuel handler logic to support module imports #. (`PR#2278 <https://github.com/terrapower/armi/pull/2278>`_) Adding support for moving assemblies from SFP to Core in YAML shuffle input #. (`PR#2292 <https://github.com/terrapower/armi/pull/2292>`_) Support mixed Blocks for smear density calculation #. (`PR#2305 <https://github.com/terrapower/armi/pull/2305>`_) Raising error if no driverBlock is found by latticePhysicsWriter Code Changes, Bugs and Fixes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#1654 <https://github.com/terrapower/armi/pull/1654>`_) Use clearer input syntax for hexagonal lattice pitch #. (`PR#1998 <https://github.com/terrapower/armi/pull/1998>`_) Fixing a couple of plots to use initial block height #. (`PR#2098 <https://github.com/terrapower/armi/pull/2098>`_) Removing the HTML reports feature #. (`PR#2102 <https://github.com/terrapower/armi/pull/2102>`_) Fixing issue in copyInterfaceInputs with one file #. (`PR#2111 <https://github.com/terrapower/armi/pull/2111>`_) fix side effects from tests #. (`PR#2115 <https://github.com/terrapower/armi/pull/2115>`_) Adding Reactor construction hook to Database.load() #. (`PR#2120 <https://github.com/terrapower/armi/pull/2120>`_) Cylindrical Cross Section model updates #. (`PR#2129 <https://github.com/terrapower/armi/pull/2129>`_) OperatorMPI doesn’t need to bcast quits if there no other workers #. (`PR#2153 <https://github.com/terrapower/armi/pull/2153>`_) Hiding duplicate warning messages #. (`PR#2160 <https://github.com/terrapower/armi/pull/2160>`_) Fixing bad Return in safeCopy #. (`PR#2163 <https://github.com/terrapower/armi/pull/2163>`_) Using gamma groups instead of neutron groups in gamiso.addDummyNuclidesToLibrary #. (`PR#2176 <https://github.com/terrapower/armi/pull/2176>`_) Using np.int32 when reading GEODST files #. (`PR#2180 <https://github.com/terrapower/armi/pull/2180>`_) Remove assert statements from FilletedHexagon instantiation #. (`PR#2186 <https://github.com/terrapower/armi/pull/2186>`_) Ensuring full core BPs aren’t converted like 1/3 core #. (`PR#2187 <https://github.com/terrapower/armi/pull/2187>`_) Fixing bug in Uranium.pseudoDensity #. (`PR#2189 <https://github.com/terrapower/armi/pull/2189>`_) Fixing bug in finding ISOTXS libraries to merge #. (`PR#2191 <https://github.com/terrapower/armi/pull/2191>`_) Fixing issue with full core BP geometry conversion #. (`PR#2195 <https://github.com/terrapower/armi/pull/2195>`_) Fixing round trip of hex lattice maps #. (`PR#2226 <https://github.com/terrapower/armi/pull/2226>`_) Fix equality of MultiIndexLocator and CoordinateLocation #. (`PR#2228 <https://github.com/terrapower/armi/pull/2228>`_) Fixing bug in Air.pseudoDensity when given Celsius T #. (`PR#2229 <https://github.com/terrapower/armi/pull/2229>`_) Change initialization of modArea for database load #. (`PR#2231 <https://github.com/terrapower/armi/pull/2231>`_) Fixing issue initial time node in previous PR #. (`PR#2236 <https://github.com/terrapower/armi/pull/2236>`_) Handle pinIndices for blocks that don’t have fuel #. (`PR#2245 <https://github.com/terrapower/armi/pull/2245>`_) Fixing invalid any() signature #. (`PR#2248 <https://github.com/terrapower/armi/pull/2248>`_) Fixing issue loading from snapshots database #. (`PR#2253 <https://github.com/terrapower/armi/pull/2253>`_) Making a unit test thread safe #. (`PR#2259 <https://github.com/terrapower/armi/pull/2259>`_) Re-assigning pin indices when sorting a Block #. (`PR#2260 <https://github.com/terrapower/armi/pull/2260>`_) Fixing compareLines so that it doesn’t trip on zeros #. (`PR#2268 <https://github.com/terrapower/armi/pull/2268>`_) Fixing Uranium enrichment calculations #. (`PR#2276 <https://github.com/terrapower/armi/pull/2276>`_) Fixing Composite.extend to correctly set the parent #. (`PR#2282 <https://github.com/terrapower/armi/pull/2282>`_) Fixing incorrect variable name in Pitch class #. (`PR#2291 <https://github.com/terrapower/armi/pull/2291>`_) Conserve molesHmBOL / massHmBOL when performing axial expansion #. (`PR#2294 <https://github.com/terrapower/armi/pull/2294>`_) Ensuring settings file can be found when writing one DB from another #. (`PR#2298 <https://github.com/terrapower/armi/pull/2298>`_) Preserve loading of CoordinateLocation in db load #. (`PR#2302 <https://github.com/terrapower/armi/pull/2302>`_) Handle pin indices for fuel + non fuel on the same grid #. (`PR#2307 <https://github.com/terrapower/armi/pull/2307>`_) Clearing out Component.p.pinIndices prior to assignment Code Changes, Maintenance, or Trivial ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#1386 <https://github.com/terrapower/armi/pull/1386>`_) Improve “smallRun” settings names #. (`PR#2085 <https://github.com/terrapower/armi/pull/2085>`_) Dropping black formatter for ruff #. (`PR#2093 <https://github.com/terrapower/armi/pull/2093>`_) Speed up axial expansion unit tests #. (`PR#2096 <https://github.com/terrapower/armi/pull/2096>`_) Fixing spelling errors #. (`PR#2103 <https://github.com/terrapower/armi/pull/2103>`_) Fixing spelling in docs and docstrings #. (`PR#2104 <https://github.com/terrapower/armi/pull/2104>`_) Removing defunct references to Cinder #. (`PR#2110 <https://github.com/terrapower/armi/pull/2110>`_) Combining three .gitignore files into one #. (`PR#2116 <https://github.com/terrapower/armi/pull/2116>`_) Cleaning up the codeTiming reports #. (`PR#2117 <https://github.com/terrapower/armi/pull/2117>`_) Reducing the warnings from Block.autoCreateSpatialGrids #. (`PR#2123 <https://github.com/terrapower/armi/pull/2123>`_) Removing permanently skipped tests #. (`PR#2126 <https://github.com/terrapower/armi/pull/2126>`_) Removing old TODO comments from the codebase #. (`PR#2127 <https://github.com/terrapower/armi/pull/2127>`_) Removing 3 unused Settings #. (`PR#2128 <https://github.com/terrapower/armi/pull/2128>`_) Created a fast flux energy structure for calculating fast flux #. (`PR#2130 <https://github.com/terrapower/armi/pull/2130>`_) Removing unused Parameters #. (`PR#2132 <https://github.com/terrapower/armi/pull/2132>`_) Removing unused reactivity coeffs params #. (`PR#2133 <https://github.com/terrapower/armi/pull/2133>`_) Moving NeutronicsPlugin to its own file #. (`PR#2134 <https://github.com/terrapower/armi/pull/2134>`_) Removing unused Parameters #. (`PR#2136 <https://github.com/terrapower/armi/pull/2136>`_) Removing unused TH parameters #. (`PR#2139 <https://github.com/terrapower/armi/pull/2139>`_) Removing unnecessary DB load try/except #. (`PR#2140 <https://github.com/terrapower/armi/pull/2140>`_) Cleaning up Block constructor #. (`PR#2141 <https://github.com/terrapower/armi/pull/2141>`_) Changing format-style strings to f-strings #. (`PR#2142 <https://github.com/terrapower/armi/pull/2142>`_) Quieting warnings from Block.getComponent #. (`PR#2144 <https://github.com/terrapower/armi/pull/2144>`_) Improving the default value for Assembly.getArea() #. (`PR#2146 <https://github.com/terrapower/armi/pull/2146>`_) Some more fstring conversions #. (`PR#2155 <https://github.com/terrapower/armi/pull/2155>`_) Cleaning up strange counter line #. (`PR#2157 <https://github.com/terrapower/armi/pull/2157>`_) Removing overly-specific check from the Component constructor #. (`PR#2165 <https://github.com/terrapower/armi/pull/2165>`_) Removing old setting mpiTasksPerNode from ZPPR test file #. (`PR#2166 <https://github.com/terrapower/armi/pull/2166>`_) Removing commented out code #. (`PR#2167 <https://github.com/terrapower/armi/pull/2167>`_) Removing unused test code #. (`PR#2168 <https://github.com/terrapower/armi/pull/2168>`_) Removing Deprecation Warning on sortReactor setting #. (`PR#2170 <https://github.com/terrapower/armi/pull/2170>`_) Adding a collar flag #. (`PR#2171 <https://github.com/terrapower/armi/pull/2171>`_) Cleaning up Tests to have Fewer Side Effects #. (`PR#2183 <https://github.com/terrapower/armi/pull/2183>`_) Renaming old smallRun Setting to rmExternalFilesAtEOL #. (`PR#2190 <https://github.com/terrapower/armi/pull/2190>`_) Using iterators instead of getAssemblies where possible #. (`PR#2197 <https://github.com/terrapower/armi/pull/2197>`_) Using iterators more in our unit tests #. (`PR#2203 <https://github.com/terrapower/armi/pull/2203>`_) Slight refactor on b.getSmearDensity to accommodate downstream work #. (`PR#2210 <https://github.com/terrapower/armi/pull/2210>`_) Removing python-dateutil dependency #. (`PR#2211 <https://github.com/terrapower/armi/pull/2211>`_) Remove Component.p.puFrac #. (`PR#2212 <https://github.com/terrapower/armi/pull/2212>`_) Removing duplicate lines #. (`PR#2215 <https://github.com/terrapower/armi/pull/2215>`_) Removing defunct deprecation warning #. (`PR#2220 <https://github.com/terrapower/armi/pull/2220>`_) Adding a basic unit test of Block.computeSmearDensity #. (`PR#2230 <https://github.com/terrapower/armi/pull/2230>`_) Adding Composite.getFirstComponent method #. (`PR#2232 <https://github.com/terrapower/armi/pull/2232>`_) Handling BOL times better #. (`PR#2240 <https://github.com/terrapower/armi/pull/2240>`_) Cleaning trace and profile out of RunEntryPoint #. (`PR#2241 <https://github.com/terrapower/armi/pull/2241>`_) move attributes to __init__ #. (`PR#2242 <https://github.com/terrapower/armi/pull/2242>`_) ParamLocation for Duct Temp/DPAs #. (`PR#2257 <https://github.com/terrapower/armi/pull/2257>`_) Improving Code Coverage for Blocks and MPIAction #. (`PR#2263 <https://github.com/terrapower/armi/pull/2263>`_) Adding tests to improve code coverage #. (`PR#2265 <https://github.com/terrapower/armi/pull/2265>`_) Removing deprecated settingsValidation file #. (`PR#2283 <https://github.com/terrapower/armi/pull/2283>`_) Removing unused debugDB setting #. (`PR#2285 <https://github.com/terrapower/armi/pull/2285>`_) Improving the error messages for invalid settings data #. (`PR#2289 <https://github.com/terrapower/armi/pull/2289>`_) Improving extensibility of mass redistribution method in axial expansion #. (`PR#2297 <https://github.com/terrapower/armi/pull/2297>`_) Reducing log spam when creating a lot of spatial grids #. (`PR#2300 <https://github.com/terrapower/armi/pull/2300>`_) Shortening our longest unit test names Documentation-Only Changes ^^^^^^^^^^^^^^^^^^^^^^^^^^ #. (`PR#2090 <https://github.com/terrapower/armi/pull/2090>`_) Adding an SCR section to the docs #. (`PR#2095 <https://github.com/terrapower/armi/pull/2095>`_) Edits to STR test report #. (`PR#2100 <https://github.com/terrapower/armi/pull/2100>`_) Adding more info to STR intro #. (`PR#2101 <https://github.com/terrapower/armi/pull/2101>`_) Fixing issue with SCR on main branch #. (`PR#2119 <https://github.com/terrapower/armi/pull/2119>`_) Adding basic documentation for axial expansion #. (`PR#2131 <https://github.com/terrapower/armi/pull/2131>`_) Update docstring for Settings class to reflect mutability #. (`PR#2137 <https://github.com/terrapower/armi/pull/2137>`_) Improving description of rateProdNet parameter #. (`PR#2143 <https://github.com/terrapower/armi/pull/2143>`_) Improving the docs-build instructions #. (`PR#2148 <https://github.com/terrapower/armi/pull/2148>`_) Adding tooling to help people build the docs locally #. (`PR#2150 <https://github.com/terrapower/armi/pull/2150>`_) Clarifying setting disableBlockTypeExclusionInXsGeneration #. (`PR#2151 <https://github.com/terrapower/armi/pull/2151>`_) Adding SQA for the SFP and cycles setting #. (`PR#2174 <https://github.com/terrapower/armi/pull/2174>`_) Remove traces of black #. (`PR#2213 <https://github.com/terrapower/armi/pull/2213>`_) Ensuring non-main branch PRs do not yield SCRs #. (`PR#2214 <https://github.com/terrapower/armi/pull/2214>`_) Fixing error in recent doc change #. (`PR#2217 <https://github.com/terrapower/armi/pull/2217>`_) Improving documentation of axial expansion #. (`PR#2222 <https://github.com/terrapower/armi/pull/2222>`_) Make a duplicated test tag unique #. (`PR#2238 <https://github.com/terrapower/armi/pull/2238>`_) Trying to speed up docs build #. (`PR#2249 <https://github.com/terrapower/armi/pull/2249>`_) Improving docs on entry points creation #. (`PR#2264 <https://github.com/terrapower/armi/pull/2264>`_) Update the description of the mcnpLibraryVersion case setting #. (`PR#2270 <https://github.com/terrapower/armi/pull/2270>`_) Fixing sphinx warnings in the doc build #. (`PR#2274 <https://github.com/terrapower/armi/pull/2274>`_) Adding user documentation of core symmetry #. (`PR#2279 <https://github.com/terrapower/armi/pull/2279>`_) Fixing the SCR table in the docs #. (`PR#2286 <https://github.com/terrapower/armi/pull/2286>`_) Improving Docs for 0.6.0 Release ================================================ FILE: doc/qa_docs/scr/index.rst ================================================ Software Change Requests (SCR) ============================== You can find a Software Change Request (SCR) for each releases below. ---------- .. toctree:: :maxdepth: 1 :glob: :reversed: * ================================================ FILE: doc/qa_docs/scr/latest_scr.rst ================================================ SCR for ARMI 0.7.0 ================== This is a listing of all the Software Change Request (SCR) changes in the ARMI repository, as part of the current release. Below, this SCR is organized into the individual changes that comprise the net SCR for this release. Each SCR below explicitly lists its impact on ARMI requirements, if any. It is also important to note ARMI and all its requirements are tested entirely by the automated testing that happens during the ARMI build. None of the SCRs below will be allowed to happen if any single test fails, so it can be guaranteed that all SCRs below have fully passed all testing. SCR Listing ----------- The following lists display all the SCRs in this release of the ARMI framework. .. exec:: import os from automateScr import buildScrListing thisPrNum = int(os.environ.get('PR_NUMBER', -1) or -1) return buildScrListing("7b741a19", thisPrNum) ================================================ FILE: doc/qa_docs/sdid.rst ================================================ Software Design and Implementation Document (SDID) ================================================== Purpose and Scope ----------------- This document is the Software Design and Implementation Document (SDID) for ARMI. The purpose of this document is to define how the ARMI requirements are implemented. These are important user stories for anyone wanting to use ARMI or develop their own ARMI-based application. The implementation of the ARMI requirements is described in detail in an Implementation Traceability Matrix (ITM). Procedural Compliance ^^^^^^^^^^^^^^^^^^^^^ This document includes information on four topics: the (1) software environment, (2) measures to mitigate possible failures, (2) implementation of the computational sequence, and (4) technical adequacy. Software Environment ^^^^^^^^^^^^^^^^^^^^ ARMI is built using the Python programming language and runs on Windows and Linux operating systems. Failure Mitigation ^^^^^^^^^^^^^^^^^^ ARMI provides a suite of unit tests which provide indication of the proper usage of the program. These tests are described in the software test report and are directly traceable to the requirements in the software requirements specification document. The purpose of these tests is to provide a way for downstream users to test and measure the utility of the ARMI framework for their own purposes, in their own environment. This allows users and developers to perform failure analysis. These tests allow for a push-button way to measure and mitigate consequences and problems including external and internal abnormal conditions and events that can affect the software. Implementation of Computational Sequence ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The computational sequence and relevant portions of the technical adequacy are specific to the implementation and are described for each implementation in the :ref:`Implementation Traceability Matrix <armi_impl_matrix>`. Technical Adequacy ^^^^^^^^^^^^^^^^^^ The internal completeness for each implementation is shown by providing traceability to the requirements as showing in the :ref:`Implementation Traceability Matrix <armi_impl_matrix>`. The consistency of the implementation is provided by a best practice used by the development team including, revision control, ensuring that code content is reviewed by non-code originating team members, and ensuring training for developers. Clarity is provided by the descriptions of the implementations in the :ref:`Implementation Traceability Matrix <armi_impl_matrix>`. Figures are added as needed in the implementation in the :ref:`Implementation Traceability Matrix <armi_impl_matrix>`. Design and Implementation ------------------------- To automate the process of tracking the implementation of all requirements in ARMI, we are using the :ref:`Implementation Traceability Matrix <armi_impl_matrix>` below. This will connect high-quality, in-code documentation with each requirement in a complete way. However, before giving a complete overview of the requirement implementations, this document will describe the design of two main features in the ARMI codebase: the plugin system and the reactor data model. These are the two major features which you need to understand to understand what ARMI is, and why it is useful. So, at the risk of duplicating documentation, the design of these two features will be discussed in some detail. Implementation of Plugin System ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The first important design idea to understand in ARMI is that ARMI is a framework for nuclear reactor modeling. What this means is that the science or engineering calculations for nuclear reactor modeling do not happen in ARMI. The point of ARMI is to tie together disparate nuclear modeling software that already exist. Thus, ARMI must be able to wrap external codes, and orchestrate running them at each time step we want to model. The second design idea is that at each time step, there is an ordered list of conceptual reactor modeling steps to be executed. ARMI calls these steps :py:class:`Interfaces <armi.interfaces.Interface>` and runs the code in each, in order, at each time step. While ARMI does have a default list of modeling steps, and a default order, none of the steps are mandatory, and their order is modifiable. An example interface stack would be: * preprocessing * fuel management * depletion * fuel performance * cross sections * critical control * flux * thermal hydraulics * reactivity coefficients * transient * bookkeeping * postprocessing So, how do we add Interfaces to the simulation? The third major design idea is that developers can create an ARMI :py:class:`Plugin <armi.plugins.ArmiPlugin>`, which can add one or more Interfaces to the simulation. Lastly, at the highest level of the design, a developer can create an ARMI :py:class:`Application <armi.apps.App>`. This is a flexible container that allows developers to register multiple Plugins, which register multiple Interfaces, which fully define all the code that will be run at each time step of the simulation. Below is a diagram from an example ARMI Application. Following this design, in the real world you would expect an ARMI Application to be made by various teams of scientists and engineers that define one Plugin and a small number of Interfaces. Then a simulation of the reactor would be carried out over some number of cycles / time nodes, where each of the Interfaces would be run in a specified order at each time node. .. figure:: /.static/armi_application_structure.png :align: center An example ARMI Application. If this high-level design seems abstract, that is by design. ARMI is not concerned with implementing scientific codes, or enforcing nuclear modelers do things a certain way. ARMI is a tool that aims to support a wide audience of nuclear reactor modelers. Implementation of Reactor Data Model ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In the previous section, we described how an ARMI Application is put together. But that Application is only useful if it can pass information about the reactor between all the external codes that are being wrapped by each Interface. Thus, an important part of the ARMI design is that is has a robust and detailed software data model to represent the current state of the reactor. This data model can be queried and manipulated by each Interface to get data that is needed to run the external reactor modeling codes. The structure of the ARMI reactor data model is designed to be quite flexible, and heavily modifiable in code. But most of the practical work done with ARMI so far has been on pin-type reactor cores, so we will focus on such an example. At the largest scale, the :py:class:`Reactor <armi.reactor.reactors.Reactor>` contains a :py:class:`Core <armi.reactor.reactors.Core>` and a :py:class:`Spent Fuel Pool <armi.reactor.assemblyLists.SpentFuelPool>`. The Core is made primarily of a collection of :py:class:`Assemblies <armi.reactor.assemblies.Assembly>`, which are vertical collections of :py:class:`Blocks <armi.reactor.blocks.Block>`. Each Block, and every other physical piece of the Reactor is a :py:class:`Composite <armi.reactor.composites.Composite>`. Composites have a defined shape, material(s), location in space, and parent. Composites have parents because ARMI defines all Reactors as a hierarchical model, where outer objects contain inner children, and the Reactor is the outermost object. The important thing about this model is that it is in code, so developers of ARMI Interfaces can query and modify the reactor data model in any way they need. .. figure:: /.static/armi_reactor_objects.png :align: center Structure of the ARMI reactor data model. .. _armi_hardware: Hardware/OS Compatibility ^^^^^^^^^^^^^^^^^^^^^^^^^ ARMI is a Python-based framework, designed to help tie together various nuclear models, all written in a variety of languages. ARMI officially supports Python versions 3.9 and higher. ARMI is also designed to work on modern versions of both Windows and Linux. The memory, CPU, and hardware needs of an ARMI simulation depend on the Reactor. Simulations run with lumped fission products will require more memory than those run without. Simulations with much larger, more detailed reactor core blueprints, or containing more components, will take up more memory than simpler blueprints. ARMI can also be run with only one process, but most users choose to run ARMI in parallel on a computing cluster of some kind. In practice, users tend to find that dozens or hundreds of parallel processes are helpful for speeding up ARMI runs, and each process will ideally have 1 or 2 GB of RAM. Error/Input Handling ^^^^^^^^^^^^^^^^^^^^ ARMI's internal error-handling library is the :py:mod:`runLog <armi.runLog>`. This tool handles the warnings and errors for internal ARMI code and all the plugins. The ``runLog`` system will handle both print-to-screen and log file messages. At the end of the run, all log messages from every plugin and from all parallel processes are tabulated into centralized log files. The ``runLog`` system will also tabulate a list of all warnings that occurred doing a simulation. And it should be noted that most full "errors" will cause the ARMI simulation to fail and stop hard, ending the run early. This is the ideal solution, so people know the run results are invalid. To that affect, ARMI makes use of Python's robust `Exception` system. .. _armi_impl_matrix: Implementation Traceability Matrix ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The requirements and associated tests which demonstrate acceptance of the codebase with the requirements are in the Software Requirements Specification Document :ref:`(SRSD) <armi_srsd>`. This section contains a list of all requirement implementations. Here are some quick metrics for the requirement implementations in ARMI: * :need_count:`type=='req' and status=='accepted'` Accepted Requirements in ARMI * :need_count:`type=='req' and status=='accepted' and len(implements_back)>0` Accepted Requirements with implementations * :need_count:`type=='impl'` implementations linked to Requirements And here is a full listing of all the requirement implementations in ARMI, that are tied to requirements: .. needextract:: :filter: id.startswith('I_ARMI_') ================================================ FILE: doc/qa_docs/srsd/bookkeeping_reqs.rst ================================================ .. _armi_bookkeeping: Bookkeeping Package ------------------- This section provides requirements for the :py:mod:`armi.bookkeeping` package within the framework, which handles data persistence, including storage and recovery, report generation, data visualization, and debugging. Functional Requirements +++++++++++++++++++++++ .. req:: The database package shall save a copy of the user settings associated with the run. :id: R_ARMI_DB_CS :subtype: functional :basis: This supports traceability and restart ability. :acceptance_criteria: Save and retrieve the user settings from the database. :status: accepted .. req:: The database package shall save a copy of the reactor blueprints associated with the run. :id: R_ARMI_DB_BP :subtype: functional :basis: This supports traceability and restart ability. :acceptance_criteria: Save and retrieve the blueprints from the database. :status: accepted .. req:: The database shall store reactor state data at specified points in time. :id: R_ARMI_DB_TIME :subtype: functional :basis: Loading a reactor from a database is needed for follow-on analysis. :acceptance_criteria: Save and load a reactor from a database at specified point in time and show parameters are appropriate. :status: accepted .. req:: ARMI shall allow runs at a particular time node to be re-instantiated from a snapshot. :id: R_ARMI_SNAPSHOT_RESTART :subtype: functional :basis: Analysts need to do follow-on analysis on detailed treatments of particular time nodes. :acceptance_criteria: After restarting a run, the reactor time node and power has been correctly reset. :status: accepted .. req:: The database shall store system attributes during a simulation. :id: R_ARMI_DB_QA :subtype: functional :basis: Storing system attributes provides QA traceability. :acceptance_criteria: Demonstrate that system attributes are stored in a database after it is initialized. :status: accepted .. req:: ARMI shall allow for previously calculated reactor state data to be retrieved within a run. :id: R_ARMI_HIST_TRACK :subtype: functional :basis: Retrieval of calculated run data from a previous time node within a run supports time-based data integration. :acceptance_criteria: Demonstrate that a set of parameters stored at differing time nodes can be recovered. :status: accepted .. ## Note: ARMI strongly suggests you use the Database for this purpose instead. Software Attributes +++++++++++++++++++ .. req:: The database produced shall be agnostic to programming language. :id: R_ARMI_DB_H5 :subtype: attribute :basis: Analysts should be free to use the data in any programming language they choose. :acceptance_criteria: Open an output file in the h5 format. :status: accepted I/O Requirements ++++++++++++++++ .. req:: ARMI shall allow extra data to be saved from a run, at specified time nodes. :id: R_ARMI_SNAPSHOT :subtype: io :basis: Analysts need to do follow-on analysis on detailed treatments of particular time nodes. :acceptance_criteria: Snapshot logic can be called for a given set of time nodes. :status: accepted ================================================ FILE: doc/qa_docs/srsd/cases_reqs.rst ================================================ .. _armi_cases: Cases Package ------------- This section provides requirements for the :py:mod:`armi.cases` package within the framework, which is responsible for running and analyzing ARMI-based cases and case suites for an application. This includes functionalities to serialize and deserialize case inputs for input modification, tracking the status of a case, and running simulations. Functional Requirements +++++++++++++++++++++++ .. req:: The case package shall provide a generic mechanism that will allow a user to run a simulation. :id: R_ARMI_CASE :subtype: functional :basis: Most workflows rely on this capability. :acceptance_criteria: Build a case and initialize a simulation. :status: accepted .. req:: The case package shall provide a tool to run multiple cases at the same time or with dependence on other cases. :id: R_ARMI_CASE_SUITE :subtype: functional :basis: Many workflows rely on this capability. :acceptance_criteria: Build a suite of cases with dependence and run them. :status: accepted .. req:: The case package shall provide a generic mechanism to allow users to modify user inputs in a collection of cases. :id: R_ARMI_CASE_MOD :subtype: functional :basis: This capability is needed by analysis workflows such as parameter studies and uncertainty quantification. :acceptance_criteria: Load user inputs and build a collection of cases that contain programmatically-perturbed inputs. :status: accepted I/O Requirements ++++++++++++++++ .. req:: The case package shall have the ability to load user inputs and perform input validation checks. :id: R_ARMI_CASE_CHECK :subtype: io :basis: Most workflows rely on this capability. :acceptance_criteria: Load user inputs and perform validation checks. :status: accepted ================================================ FILE: doc/qa_docs/srsd/cli_reqs.rst ================================================ .. _armi_cli: Command Line Interface Package ------------------------------ This section provides requirements for the :py:mod:`armi.cli` package. This package is responsible for providing user entry points to an ARMI-based application as a Command Line Interface (CLI). This package allows for developers to create their own automated work flows including: case submission, user setting validation, data migrations, and more. Functional Requirements +++++++++++++++++++++++ .. req:: The cli package shall provide a generic CLI for developers to build their own CLI. :id: R_ARMI_CLI_GEN :basis: Provides extensibility of the system behavior for an application to implement analysis workflows. :subtype: functional :status: accepted :acceptance_criteria: Create an entry point, pass it arguments, and invoke it. I/O Requirements ++++++++++++++++ .. req:: The cli package shall provide a basic CLI which allows users to start an ARMI simulation. :id: R_ARMI_CLI_CS :basis: This is relied upon for most users to submit jobs to a cluster. :subtype: io :status: accepted :acceptance_criteria: Invoke an ARMI CLI. ================================================ FILE: doc/qa_docs/srsd/framework_reqs.rst ================================================ .. _armi_framework: Framework-Related Concepts -------------------------- This section provides the highest-level requirements for the ARMI framework. These requirements are specific to the idea that ARMI is a framework, that allows for the connection of disparate scientific and nuclear engineer models. The four major pieces of the codebase covered by these requirements are: - :py:mod:`armi.apps` - An ARMI simulation is controlled by an ARMI :py:class:`Application <armi.apps.App>`. - :py:mod:`armi.plugins` - Each :py:class:`Application <armi.apps.App>` registers a list of :py:class:`Plugins <armi.plugins.ArmiPlugin>`. - :py:mod:`armi.interfaces` - Each :py:class:`Plugin <armi.plugins.ArmiPlugin>` registers a list of :py:class:`Interface <armi.interfaces.Interface>`. - :py:mod:`armi.operators` - The :py:class:`Operator <armi.operators.Operator>` contains a list of :py:class:`Interfaces <armi.interfaces.Interface>`, which are run in order at each time node. Functional Requirements +++++++++++++++++++++++ .. ## Note: These 12 requirements define ARMI at a high level. They will rarely change. .. req:: The operator package shall provide a means by which to communicate inputs and results between analysis plugins. :id: R_ARMI_OPERATOR_COMM :subtype: functional :basis: This is a foundational design concept in ARMI and is what makes it a framework. :acceptance_criteria: A plugin can access run input data and results from other plugins. :status: accepted .. req:: The operator package shall allow tight coupling between analysis plugins. :id: R_ARMI_OPERATOR_PHYSICS :subtype: functional :basis: Tight coupling is a mechanism that allows for simultaneous convergence of analysis results. :acceptance_criteria: An operator can call each interface multiple times at a given time node, subject to some convergence criteria. :status: accepted .. req:: The operator package shall provide a means to perform parallel computations. :id: R_ARMI_OPERATOR_MPI :subtype: functional :basis: Parallel computations provide scalable solutions to computational performance. :acceptance_criteria: An operator can execute logic dependent on its MPI rank. :status: accepted .. req:: ARMI shall allow users to customize how time is discretized for modeling. :id: R_ARMI_FW_HISTORY :subtype: functional :basis: Analysts will want to model the time evolution of reactors. And discretizing time is a common need to nearly all scientific modeling. :acceptance_criteria: Specify number of cycles and burn steps and observe the interfaces are run at those time nodes. :status: accepted .. req:: An application shall consist of a collection of plugins. :id: R_ARMI_APP_PLUGINS :subtype: functional :basis: Plugins are the major mechanism for adding code to a simulations. :acceptance_criteria: Construct an ARMI application from a collection of plugins. :status: accepted .. req:: An operator shall be built from user settings. :id: R_ARMI_OPERATOR_SETTINGS :subtype: functional :basis: Configuring an operator allows users to customize a simulation. :acceptance_criteria: Construct an operator that depends on user settings. :status: accepted .. req:: The operator package shall expose an ordered list of interfaces that is looped over at each time step. :id: R_ARMI_OPERATOR_INTERFACES :subtype: functional :basis: Reactor modeling is controlled by looping over an ordered list of interfaces at each time node. :acceptance_criteria: Show that interfaces are executed in order at each time step. :status: accepted .. req:: The interface package shall allow code execution at important operational points in time. :id: R_ARMI_INTERFACE :subtype: functional :basis: Defining code to be run at specific times allows users to control the reactor simulation and analysis. :acceptance_criteria: Show that interfaces allow code to be execute at BOL, EOL, BOC, and EOC. :status: accepted .. req:: The plugin module shall allow the creation of a plugin, which adds code to the application. :id: R_ARMI_PLUGIN :subtype: functional :basis: The primary way developers will add code to the simulation is by writing an ARMI plugin. :acceptance_criteria: Load a plugin into an application. :status: accepted .. req:: Plugins shall add interfaces to the operator. :id: R_ARMI_PLUGIN_INTERFACES :subtype: functional :basis: The mechanism by which plugins add code to the simulation is that plugins can register interfaces on the operator. :acceptance_criteria: Register multiple interfaces from a given plugin. :status: accepted .. req:: Plugins shall have the ability to add parameters to the reactor data model. :id: R_ARMI_PLUGIN_PARAMS :subtype: functional :basis: An important feature of plugins is that they can add parameters to the reactor model, thus increasing the variety of physical values the simulations can track. :acceptance_criteria: Register multiple parameters from a given plugin. :status: accepted .. req:: Plugins shall have the ability to add custom settings to the simulation. :id: R_ARMI_PLUGIN_SETTINGS :subtype: functional :basis: An important feature of plugins is that they can add settings that can be used to configure a simulation. :acceptance_criteria: Add multiple settings from a given plugin. :status: accepted .. ## Note: These 12 requirements define ARMI at a high level. They will rarely change. ================================================ FILE: doc/qa_docs/srsd/materials_reqs.rst ================================================ .. _armi_mats: Materials Package ----------------- This section provides requirements for the :py:mod:`armi.materials` package within the framework, which contains ARMI's system for defining materials. The materials system in ARMI allows for an extreme amount of flexibility in defining materials with temperature-dependent properties like density, linear expansion factor, and the like. ARMI also comes packaged with a small set of basic materials, though these are meant only as example materials and (because ARMI is open source) these materials can not include proprietary or classified information. As such, we explicitly forbid the use of the example ARMI materials in safety-related modeling and will not be writing requirements on those materials. Functional Requirements +++++++++++++++++++++++ .. req:: The materials package shall allow for material classes to be searched across packages in a defined namespace. :id: R_ARMI_MAT_NAMESPACE :subtype: functional :basis: This is just a design choice in ARMI, to define how new material definitions are added to a simulation. :acceptance_criteria: Import a material class from a package in the ARMI default namespace. :status: accepted .. req:: The materials package shall allow for multiple material collections to be defined with an order of precedence in the case of duplicates. :id: R_ARMI_MAT_ORDER :subtype: functional :basis: The ability to represent physical material properties is a basic need for nuclear modeling. :acceptance_criteria: Only the preferred material class is returned when multiple material classes with the same name are defined. :status: accepted .. req:: The materials package shall provide the capability to retrieve material properties at different temperatures. :id: R_ARMI_MAT_PROPERTIES :subtype: functional :basis: The ability to represent physical material properties is a basic need for nuclear modeling. :acceptance_criteria: Instantiate a Material instance and show that the instance has the appropriate method names defined and examine the methods signatures to ensure they allow for temperature inputs. :status: accepted .. req:: The materials package shall allow for user-input to impact the materials in a component. :id: R_ARMI_MAT_USER_INPUT :subtype: functional :basis: The ability to represent physical material properties is a basic need for nuclear modeling. :acceptance_criteria: Instantiate a reactor from blueprints that uses the material modifications and show that the modifications are used. :status: accepted .. req:: Materials shall generate nuclide mass fractions at instantiation. :id: R_ARMI_MAT_FRACS :subtype: functional :basis: The ability to represent physical material properties is a basic need for nuclear modeling. :acceptance_criteria: Show that the material mass fractions are populated when the object is created. :status: accepted .. req:: The materials package shall provide a class for fluids that defines the thermal expansion coefficient as identically zero. :id: R_ARMI_MAT_FLUID :subtype: functional :basis: Thermal expansion coefficients need to be zero for fluids so that fluid components cannot drive thermal expansion of their own or linked component dimensions. :acceptance_criteria: Instantiate a Fluid material and show that its linear expansion is identically zero. :status: accepted ================================================ FILE: doc/qa_docs/srsd/nucDirectory_reqs.rst ================================================ .. _armi_nuc_dirs: Nuclide Directory Package ------------------------- This section provides requirements for the :py:mod:`armi.nucDirectory` package within the framework, which is responsible for defining elemental and isotopic information that is used for reactor physics evaluations. Functional Requirements +++++++++++++++++++++++ .. req:: The nucDirectory package shall provide an interface for querying basic data for elements of the periodic table. :id: R_ARMI_ND_ELEMENTS :subtype: functional :basis: Element data is needed for converting between mass and number fractions, expanding elements into isotopes, and other tasks. :acceptance_criteria: Query elements by Z, name, and symbol. :status: accepted .. req:: The nucDirectory package shall provide an interface for querying basic data for important isotopes and isomers. :id: R_ARMI_ND_ISOTOPES :subtype: functional :basis: Isotope data is used to aid in construction of cross-section generation models, to convert between mass and number fractions, and other tasks. :acceptance_criteria: Query isotopes and isomers by name, label, MC2-3 ID, MCNP ID, and AAAZZZS ID. :status: accepted .. req:: The nucDirectory package shall store data separately from code. :id: R_ARMI_ND_DATA :subtype: functional :basis: Storing data separately from code is good practice in scientific programs. :acceptance_criteria: The nucDirectory element, isotope, and isomer data is stored in plain text files in a folder next to the code. :status: accepted ================================================ FILE: doc/qa_docs/srsd/nuclearDataIO_reqs.rst ================================================ .. _armi_nuc_data: Nuclear Data I/O Package ------------------------ This section provides requirements for the :py:mod:`armi.nuclearDataIO` package within the framework, which handles reading and writing of standard interface files for reactor physics software (e.g., cross section data). Functional Requirements +++++++++++++++++++++++ .. req:: The nuclearDataIO package shall be capable of reading and writing ISOTXS files into and out of mutable data structures. :id: R_ARMI_NUCDATA_ISOTXS :subtype: functional :basis: These files are the MC2 output format. :acceptance_criteria: Read one or more ISOTXS files and its basic input data correctly, and correctly write that data back out to a single file. :status: accepted .. req:: The nuclearDataIO package shall be capable of reading and writing GAMISO files into and out of mutable data structures. :id: R_ARMI_NUCDATA_GAMISO :subtype: functional :basis: These files are generated by MCC-v3. :acceptance_criteria: Read a GAMISO file and its basic input data correctly, and correctly write that data back out. :status: accepted .. req:: The nuclearDataIO package shall be capable of reading and writing GEODST files into and out of mutable data structures. :id: R_ARMI_NUCDATA_GEODST :subtype: functional :basis: These files are generated by DIF3D. :acceptance_criteria: Read a GEODST file and its basic input data correctly, and correctly write that data back out. :status: accepted .. req:: The nuclearDataIO package shall be capable of reading and writing DIF3D files into and out of mutable data structures. :id: R_ARMI_NUCDATA_DIF3D :subtype: functional :basis: These files are used in DIF3D. :acceptance_criteria: Read a DIF3D file and its basic input data correctly, and correctly write that data back out. :status: accepted .. req:: The nuclearDataIO package shall be capable of reading and writing PMATRX files into and out of mutable data structures. :id: R_ARMI_NUCDATA_PMATRX :subtype: functional :basis: These files are generated by MCC-v3 and used in GAMSOR. :acceptance_criteria: Read a PMATRX file and its basic input data correctly, and correctly write that data back out. :status: accepted .. req:: The nuclearDataIO package shall be capable of reading and writing DLAYXS files into and out of mutable data structures. :id: R_ARMI_NUCDATA_DLAYXS :subtype: functional :basis: These files are used to generate kinetics parameters. :acceptance_criteria: Read a DLAYXS file and its basic input data correctly, and correctly write that data back out. :status: accepted .. req:: The nuclearDataIO package shall be able to compute macroscopic cross sections from microscopic cross sections and number densities. :id: R_ARMI_NUCDATA_MACRO :subtype: functional :basis: Macroscopic cross sections are needed by many analysts. :acceptance_criteria: Compute macroscopic cross sections from microscopic cross sections and number densities. :status: accepted ================================================ FILE: doc/qa_docs/srsd/physics_reqs.rst ================================================ .. _armi_physics: Physics Package --------------- This section provides requirements for the :py:mod:`armi.physics` package within the framework, which contains interfaces for important physics modeling and analysis in nuclear reactors. It is important to note that ARMI is a framework, and as such does not generally include the actual science or engineering calculations for these topics. For instance, ARMI has an interface for "safety analysis", but this interface is just a *place* for developers to implement their own safety analysis code. It would be inappropriate to include the actual science or engineering calculations for a detailed safety analysis of a particular reactor in ARMI because ARMI is meant only to house the code to let nuclear modeling and analysis work, not the analysis itself. Functional Requirements +++++++++++++++++++++++ .. ## globalFlux ###################### .. req:: ARMI shall ensure that the computed block-wise power is consistent with the power specified in the reactor data model. :id: R_ARMI_FLUX_CHECK_POWER :subtype: functional :status: accepted :basis: This requirement ensures that neutronics solver scales the neutron flux appropriately such that the computed block-wise power captures the specified global power. :acceptance_criteria: Test that throws an error when the summed block-wise powers does not match the specified total power. .. req:: ARMI shall provide an interface for querying options relevant to neutronics solvers. :id: R_ARMI_FLUX_OPTIONS :subtype: functional :status: accepted :basis: Reactor analysts will want to use popular neutronics solvers, e.g. DIF3D-Variant. :acceptance_criteria: The interface correctly returns specified neutronics solver options. .. req:: ARMI shall allow modification of the reactor geometry when needed for neutronics solver execution. :id: R_ARMI_FLUX_GEOM_TRANSFORM :subtype: functional :status: accepted :basis: Axial expansion can cause a disjointed mesh which cannot be resolved by deterministic neutronics solvers. :acceptance_criteria: Geometry transformations are performed before executing a neutronics solve. .. req:: ARMI shall calculate neutron reaction rates for a given block. :id: R_ARMI_FLUX_RX_RATES :subtype: functional :status: accepted :basis: This is a generic ARMI feature implemented to aid in calculating dose, converting results calculated on one mesh to another, and for comparing reaction rates against experiments. :acceptance_criteria: Calculate accurate reaction rates for a given multigroup flux and cross section library for a wide collection of Blocks. .. req:: ARMI shall be able to calculate DPA and DPA rates from a multigroup neutron flux and DPA cross sections. :id: R_ARMI_FLUX_DPA :subtype: functional :status: accepted :basis: DPA rates are necessary for fuel performance calculations. :acceptance_criteria: The DPA rate is calculated for a composite with an associated multi-group neutron flux. .. ## isotopicDepletion ###################### .. req:: The isotopicDepletion package shall have the ability to generate cross-section tables from a CCCC-based library in a user-specified format. :id: R_ARMI_DEPL_TABLES :subtype: functional :status: accepted :basis: Depletion solvers require cross-sections to be supplied from external sources if not using built-in cross sections. :acceptance_criteria: Produce a table with the specified formatting containing the appropriate cross sections. .. req:: The isotopicDepletion package shall provide a base class to track depletable composites. :id: R_ARMI_DEPL_ABC :subtype: functional :status: accepted :basis: Depletion analysis may want a way to track depletable composites. :acceptance_criteria: Store and retrieve depletable objects. .. ## energyGroups ###################### .. req:: The neutronics package shall provide the neutron energy group bounds for a given group structure. :id: R_ARMI_EG_NE :subtype: functional :basis: The bounds define the energy groupings. :acceptance_criteria: Return the correct energy bounds. :status: accepted .. req:: The neutronics package shall return the energy group index which contains the fast energy threshold. :id: R_ARMI_EG_FE :subtype: functional :basis: The energy groups are only useful if a developer can find the correct one easily. :acceptance_criteria: Identify the correct energy group for a given energy threshold. :status: accepted .. ## macroXSGenerationInterface ###################### .. req:: The neutronics package shall be able to build macroscopic cross sections for all blocks. :id: R_ARMI_MACRO_XS :subtype: functional :basis: Most steady-state neutronics workflows will rely on this capability. :acceptance_criteria: Calculate the macroscopic cross sections for a block. :status: accepted .. ## executers ###################### .. req:: The executers module shall provide the ability to run external calculations on an ARMI reactor with configurable options. :id: R_ARMI_EX :subtype: functional :basis: An ARMI plugin needs to be able to to wrap an external executable. :acceptance_criteria: Execute a mock external calculation based on an ARMI reactor. :status: accepted .. ## fuelCycle ###################### .. req:: The fuel cycle package shall allow for user-defined assembly shuffling logic to update the reactor model based on reactor state. :id: R_ARMI_SHUFFLE :subtype: functional :basis: Shuffle operations can be based on assemblies' burnup state, which may not be known at the start of a run. :acceptance_criteria: Execute user-defined shuffle operations based on a reactor model. :status: accepted .. req:: The fuel cycle package shall be capable of leaving user-specified blocks in place during shuffling operations. :id: R_ARMI_SHUFFLE_STATIONARY :subtype: functional :basis: It may be desirable to leave certain blocks, such as grid plates, in place. :acceptance_criteria: Shuffle an assembly while leaving a specified block in place. :status: accepted .. req:: A hexagonal assembly shall support rotating around the z-axis in 60 degree increments. :id: R_ARMI_ROTATE_HEX :subtype: functional :basis: Rotation of assemblies is common during operation, and requires updating the location of physics data assigned on the assembly. :acceptance_criteria: After rotating a hexagonal assembly, spatial data corresponds to rotating the original assembly data. :status: accepted .. req:: The framework shall provide an algorithm for rotating hexagonal assemblies to equalize burnup. :id: R_ARMI_ROTATE_HEX_BURNUP :subtype: functional :basis: Rotating of assemblies to minimize burnup helps maximize fuel utilization and reduces power peaking. :acceptance_criteria: After rotating a hexagonal assembly, confirm the pin with the highest burnup is in the same sector as pin with the lowest power in the high burnup pin's ring. :status: accepted .. ## crossSectionGroupManager ###################### .. req:: The cross-section group manager package shall run before cross sections are calculated. :id: R_ARMI_XSGM_FREQ :subtype: functional :basis: The cross section groups need to be up to date with the core state at the time that the Lattice Physics Interface is called. :acceptance_criteria: Initiate the cross-section group manager by the same setting that initiates calculating cross sections. And ensure the cross-section group manager always runs before cross sections are calculated. :status: accepted .. req:: The cross-section group manager package shall create separate collections of blocks for each combination of user-specified XS type and burnup and/or temperature group. :id: R_ARMI_XSGM_CREATE_XS_GROUPS :subtype: functional :basis: This helps improve the performance of downstream cross section calculations. :acceptance_criteria: Create cross section groups and their representative blocks. :status: accepted .. req:: The cross-section group manager package shall provide routines to create representative blocks for each collection based on user-specified XS type and burnup and/or temperature group. :id: R_ARMI_XSGM_CREATE_REPR_BLOCKS :subtype: functional :basis: The Lattice Physics Interface needs a representative block from which to generate a lattice physics input file. :acceptance_criteria: Create representative blocks using volume-weighted averaging and custom cylindrical averaging. :status: accepted ================================================ FILE: doc/qa_docs/srsd/reactors_reqs.rst ================================================ .. _armi_reactors: Reactors Package ---------------- This section provides requirements for the :py:mod:`armi.reactors` package within the framework, unsurprisingly this is the largest package in ARMI. In this package are sub-packages for fully defining a nuclear reactor, starting from blueprints and all the way through defining the full reactor data model. It is this reactor data object that is critical to the framework; this is how different reactor modeling tools share information. Functional Requirements +++++++++++++++++++++++ .. ## reactors ###################### .. req:: The reactor data model shall contain one core and a collection of ex-core objects, all composites. :id: R_ARMI_R :status: accepted :basis: A shared reactor data model is a fundamental concept in ARMI. :acceptance_criteria: Build a reactor data model from a blueprint file, and show it has a core and a spent fuel pool. :subtype: functional .. req:: Assemblies shall be retrievable from the core object by name and location. :id: R_ARMI_R_GET_ASSEM :status: accepted :basis: Useful for analysis, particularly mechanical and control rod analysis. :acceptance_criteria: Retrieve assemblies from the core by name and location. :subtype: functional .. req:: The core shall be able to construct a mesh based on its blocks. :id: R_ARMI_R_MESH :status: accepted :basis: Preservation of material and geometry boundaries is needed for accurate physics calculations. :acceptance_criteria: Construct a mesh from a core object. :subtype: functional .. req:: ARMI shall support third-core symmetry for hexagonal cores. :id: R_ARMI_R_SYMM :status: accepted :basis: Symmetric model definitions allow for easier user setup and reduced computational expense. :acceptance_criteria: Construct a core of full or 1/3-core symmetry. :subtype: functional .. req:: The core shall be able to provide assemblies that are neighbors of a given assembly. :id: R_ARMI_R_FIND_NEIGHBORS :status: accepted :basis: Useful for analysis, particularly mechanical and control rod analysis. :acceptance_criteria: Return neighboring assemblies from a given assembly in a core. :subtype: functional .. req:: ARMI shall provide an ex-core composite to represent spent fuel pools (SFP) for spent fuel assemblies. :id: R_ARMI_SFP :status: accepted :basis: A SFP data model is a fundamental concept in modeling solid fuel reactors. :acceptance_criteria: Build a reactor data model with a SFP, then move an assembly from the reactor core to the the SFP and back. :subtype: functional .. ## parameters ###################### .. req:: The parameters package shall provide the capability to define parameters that store values of interest on any Composite. :id: R_ARMI_PARAM :status: accepted :basis: The capability to define new parameters is a common need for downstream analysis or plugins. :acceptance_criteria: Ensure that new parameters can be defined and accessed on a Reactor, Core, Assembly, Block, and Component. :subtype: functional .. req:: The parameters package shall allow for some parameters to be defined such that they are not written to the database. :id: R_ARMI_PARAM_DB :status: accepted :basis: Users will require some parameters to remain unwritten to the database file. :acceptance_criteria: A parameter can be filtered from inclusion into the list of parameters written to the database. :subtype: functional .. req:: The parameters package shall provide a way to signal if a parameter needs updating across multiple processes. :id: R_ARMI_PARAM_PARALLEL :status: accepted :basis: Parameters updated on compute nodes must be propagated to the head node. :acceptance_criteria: A parameter has an attribute which signals its last updated status among the processors. :subtype: functional .. req:: The parameters package shall allow for a parameter to be serialized for reading and writing to database files. :id: R_ARMI_PARAM_SERIALIZE :status: accepted :basis: Users need to be able to understand what parameters were involved during a given run after it is completed, both for QA purposes and to begin a new analysis using data from previous analyses. :acceptance_criteria: The Serializer construct can pack and unpack parameter data. :subtype: functional .. ## zones ###################### .. req:: The zones module shall allow for a collection of reactor core locations (a Zone). :id: R_ARMI_ZONE :status: accepted :basis: This is a basic feature of ARMI and is useful for reactivity coefficients analysis. :acceptance_criteria: Store and retrieve locations from a zone that corresponds to a reactor. Also, store and retrieve multiple Zone objects from a Zones object. :subtype: functional .. ## blocks ###################### .. req:: The blocks module shall be able to homogenize the components of a hexagonal block. :id: R_ARMI_BLOCK_HOMOG :status: accepted :basis: Homogenizing blocks can improve performance of the uniform mesh converter. :acceptance_criteria: A homogenized hexagonal block has the same mass, dimensions, and pin locations as the block from which it is derived. :subtype: functional .. req:: Blocks shall include information on their location. :id: R_ARMI_BLOCK_POSI :status: accepted :basis: Simulations and post-simulation analysis both require block-level physical quantities. :acceptance_criteria: Any block can be queried to get absolute location and position. :subtype: functional .. req:: The blocks module shall define a hex-shaped block. :id: R_ARMI_BLOCK_HEX :status: accepted :basis: Hexagonal blocks are used in some pin-based reactors. :acceptance_criteria: Verify a block can be created that declares a hexagonal shape. :subtype: functional .. req:: The blocks module shall return the number of pins in a block, when applicable. :id: R_ARMI_BLOCK_NPINS :status: accepted :basis: This is a common need for analysis of pin-based reactors. :acceptance_criteria: Return the number of pins in a valid block. :subtype: functional .. ## assemblies ###################### .. req:: The assemblies module shall define an assembly as a composite type that contains a collection of blocks. :id: R_ARMI_ASSEM_BLOCKS :status: accepted :basis: ARMI must be able to represent assembly-based reactors. :acceptance_criteria: Validate an assembly's type and the types of its children. :subtype: functional .. req:: Assemblies shall include information on their location. :id: R_ARMI_ASSEM_POSI :status: accepted :basis: Assemblies are an important part of pin-type reactor cores, and almost any analysis that uses assemblies will want to know the location of the assemblies. :acceptance_criteria: Any assembly can be queried to get absolute location and position. :subtype: functional .. ## flags ###################### .. req:: The flags module shall provide unique identifiers (flags) to enable disambiguating composites. :id: R_ARMI_FLAG_DEFINE :subtype: functional :basis: Flags are used to determine how objects should be handled. :acceptance_criteria: No two existing flags have equivalence. :status: accepted .. req:: The set of unique flags in a run shall be extensible without user knowledge of existing flags' values. :id: R_ARMI_FLAG_EXTEND :subtype: functional :basis: Plugins are able to define their own flags. :acceptance_criteria: After adding a new flag, no two flags have equivalence. :status: accepted .. req:: Valid flags shall be convertible to and from strings. :id: R_ARMI_FLAG_TO_STR :subtype: functional :basis: Flags need to be converted to strings for serialization. :acceptance_criteria: A string corresponding to a defined flag is correctly converted to that flag, and show that the flag can be converted back to a string. :status: accepted .. ## geometryConverters ###################### .. req:: ARMI shall be able to convert a hexagonal one-third-core geometry to a full-core geometry, and back again. :id: R_ARMI_THIRD_TO_FULL_CORE :subtype: functional :basis: Useful to improve modeling performance, if the analysis can accept the approximation. :acceptance_criteria: Convert a hexagonal 1/3 core reactor to full, and back again. :status: accepted .. req:: ARMI shall be able to add and remove assemblies along the 120 degree line in a 1/3 core reactor. :id: R_ARMI_ADD_EDGE_ASSEMS :subtype: functional :basis: Helpful for analysis that are using 1/3 core hex reactors :acceptance_criteria: Add and then remove assemblies in a 1/3 core reactor. :status: accepted .. req:: ARMI shall be able to convert a hex core to a representative RZ core. :id: R_ARMI_CONV_3DHEX_TO_2DRZ :subtype: functional :basis: Some downstream analysis requires a 2D R-Z geometry. :acceptance_criteria: Convert a hex core into an RZ core. :status: accepted .. ## axialExpansionChanger ###################### .. req:: The axial expansion changer shall perform axial thermal expansion and contraction on solid components within a compatible ARMI assembly according to a given axial temperature distribution. :id: R_ARMI_AXIAL_EXP_THERM :subtype: functional :basis: Axial expansion is used to conserve mass and appropriately capture the reactor state under temperature changes. :acceptance_criteria: Perform thermal expansion due to an applied axial temperature distribution. :status: accepted .. req:: The axial expansion changer shall perform axial expansion/contraction given a list of components and corresponding expansion coefficients. :id: R_ARMI_AXIAL_EXP_PRESC :subtype: functional :basis: Axial expansion is used to conserve mass and appropriately capture the reactor state under temperature changes. :acceptance_criteria: Perform axial expansion given a list of components from an assembly and corresponding expansion coefficients. :status: accepted .. req:: The axial expansion changer shall perform expansion during core construction based on block heights at a user-specified temperature. :id: R_ARMI_INP_COLD_HEIGHT :subtype: functional :basis: The typical workflow in ARMI applications is to transcribe component dimensions, which are generally given at room temperatures. :acceptance_criteria: Perform axial expansion during core construction based on block heights at user-specified temperature. :status: accepted .. req:: The axial expansion changer shall allow user-specified target axial expansion components on a given block. :id: R_ARMI_MANUAL_TARG_COMP :subtype: functional :basis: The target axial expansion component influences the conservation of mass in a block. :acceptance_criteria: Set a target component and verify it was set correctly. :status: accepted .. req:: The axial expansion changer shall preserve the total height of a compatible ARMI assembly. :id: R_ARMI_ASSEM_HEIGHT_PRES :subtype: functional :basis: Many physics solvers require that the total height of each assembly in the core is consistent. :acceptance_criteria: Perform axial expansion and confirm that the height of the compatible ARMI assembly is preserved. :status: accepted .. ## uniformMesh ###################### .. req:: The uniform mesh converter shall make a copy of the reactor where the new reactor core has a uniform axial mesh. :id: R_ARMI_UMC :subtype: functional :basis: This is used in the global flux calculations. :acceptance_criteria: Convert a reactor to one where the core has a uniform axial mesh. :status: accepted .. req:: The uniform mesh converter shall map select parameters from composites on the original mesh to composites on the new mesh. :id: R_ARMI_UMC_PARAM_FORWARD :subtype: functional :basis: This is used in the global flux calculations. :acceptance_criteria: Create a new reactor with the uniform mesh converter and ensure that the flux and power density block-level parameters are mapped appropriately to the new reactor. :status: accepted .. req:: The uniform mesh converter shall map select parameters from composites on the new mesh to composites on the original mesh. :id: R_ARMI_UMC_PARAM_BACKWARD :subtype: functional :basis: This is used in the global flux calculations. :acceptance_criteria: Create a new reactor with the uniform mesh converter and ensure that the flux and power density block-level parameters are mapped appropriately back to the original reactor. :status: accepted .. req:: The uniform mesh converter shall try to preserve the boundaries of fuel and control material. :id: R_ARMI_UMC_NON_UNIFORM :subtype: functional :basis: Regions with extremely small axial size can cause difficulties for the deterministic neutronics solvers. :acceptance_criteria: Create a reactor with slightly non-uniform mesh and verify after the uniform mesh converter the mesh is still non-uniform. :status: accepted .. req:: The uniform mesh converter shall produce a uniform axial mesh with a size no smaller than a user-specified value. :id: R_ARMI_UMC_MIN_MESH :subtype: functional :basis: Regions with extremely small axial size can cause difficulties for the deterministic neutronics solvers. :acceptance_criteria: Create a reactor with a mesh that is smaller than the minimum size. After the uniform mesh conversion the new mesh conforms to the user-specified value. :status: accepted .. ## blockConverters ###################### .. req:: The block converter module shall be able to convert one or more given hexagonal blocks into a single user-configurable representative cylindrical block. :id: R_ARMI_BLOCKCONV_HEX_TO_CYL :subtype: functional :basis: Needed, for example, for generating 1D cross sections for control rods. :acceptance_criteria: Create a cylindrical block from one or more given hexagonal blocks and confirm that the cylindrical block has the appropriate volume fractions and temperatures. :status: accepted .. req:: The block converter module shall be able to homogenize one component into another on a block. :id: R_ARMI_BLOCKCONV :subtype: functional :basis: Needed, for example, for merging wire into coolant or gap into clad to simplify the model. :acceptance_criteria: Homogenize one component into another from a given block and confirm the new components are appropriate. :status: accepted .. ## components ###################### .. req:: The components package shall define a composite corresponding to a physical piece of a reactor. :id: R_ARMI_COMP_DEF :subtype: functional :basis: This is a fundamental design choice in ARMI, to describe a physical reactor. :acceptance_criteria: Create components, and verify their attributes and parameters. :status: accepted .. req:: A component's dimensions shall be calculable for any temperature. :id: R_ARMI_COMP_DIMS :subtype: functional :basis: Users require access to dimensions at perturbed temperatures. :acceptance_criteria: Calculate a components dimensions at a variety of temperatures. :status: accepted .. req:: Components shall be able to compute dimensions, areas, and volumes that reflect its current state. :id: R_ARMI_COMP_VOL :subtype: functional :basis: It is necessary to be able to compute areas and volumes when state changes. :acceptance_criteria: Calculate volumes/areas, clear the cache, change the temperature, and recalculate volumes/areas. :status: accepted .. req:: Components shall allow for constituent nuclide fractions to be modified. :id: R_ARMI_COMP_NUCLIDE_FRACS :subtype: functional :basis: The ability to modify nuclide fractions is a common need in reactor analysis. :acceptance_criteria: Modify nuclide fractions on a component. :status: accepted .. req:: Components shall be made of one-and-only-one material or homogenized material. :id: R_ARMI_COMP_1MAT :subtype: functional :basis: This is an ARMI design choice. :acceptance_criteria: Create a component with a given material, and retrieve that material. :status: accepted .. req:: Components shall be associated with material properties. :id: R_ARMI_COMP_MAT :subtype: functional :basis: Users require access to material properties for a given component. :acceptance_criteria: Get material properties from a component material. :status: accepted .. req:: Components shall enable an ordering based on their outermost component dimensions. :id: R_ARMI_COMP_ORDER :subtype: functional :basis: It is desirable to know which components are located physically inside of others. :acceptance_criteria: Order a collection of components, based on their dimensions. :status: accepted .. req:: The components package shall define components with several basic interrogable shapes. :id: R_ARMI_COMP_SHAPES :subtype: functional :basis: Modeling real-world reactor geometries requires a variety of shapes. :acceptance_criteria: Create a variety of components with different shapes and query their shape information. :status: accepted .. req:: The components package shall handle radial thermal expansion of individual components. :id: R_ARMI_COMP_EXPANSION :subtype: functional :basis: Users need the ability to model thermal expansion of a reactor core. :acceptance_criteria: Calculate radial thermal expansion for a variety components. :status: accepted .. req:: The components package shall allow the dimensions of fluid components to change based on the solid components adjacent to them. :id: R_ARMI_COMP_FLUID :subtype: functional :basis: The shapes of fluid components are defined externally. :acceptance_criteria: Determine the dimensions of a fluid component, bounded by solids. :status: accepted .. ## composites ###################### .. req:: The composites module shall define an arbitrary physical piece of a reactor with retrievable children in a hierarchical data model. :id: R_ARMI_CMP :subtype: functional :basis: This is a fundamental aspect of the ARMI framework. :acceptance_criteria: Create a composite with children. :status: accepted .. req:: Composites shall be able to be associated with flags. :id: R_ARMI_CMP_FLAG :subtype: functional :basis: Flags are used to provide context as to what a composite object represents. :acceptance_criteria: Give a composite one or more flags. :status: accepted .. req:: Composites shall have their own parameter collections. :id: R_ARMI_CMP_PARAMS :subtype: functional :basis: Parameters should live on the part of the model which they describe. :acceptance_criteria: Query a composite's parameter collection. :status: accepted .. req:: The total mass of specified nuclides in a composite shall be retrievable. :id: R_ARMI_CMP_GET_MASS :subtype: functional :basis: Downstream analysis will want to get masses. :acceptance_criteria: Return the mass of specified nuclides in a composite. :status: accepted .. req:: Composites shall allow synchronization of state across compute nodes. :id: R_ARMI_CMP_MPI :subtype: functional :basis: Parallel executions of ARMI require synchronization of reactors on different nodes. :acceptance_criteria: Synchronize a reactor's state across compute processes. :status: accepted .. req:: The homogenized number densities of specified nuclides in a composite shall be retrievable. :id: R_ARMI_CMP_GET_NDENS :subtype: functional :basis: The ability to retrieve homogenized number densities is a common need in reactor analysis. :acceptance_criteria: Retrieve homogenized number densities of specified nuclides from a composite. :status: accepted .. req:: Composites shall be able to return number densities for all their nuclides. :id: R_ARMI_CMP_NUC :subtype: functional :basis: Analysts not using lumped fission products need this capability. :acceptance_criteria: Return the number densities for all nuclides for a variety of composites. :status: accepted .. ## grids ###################### .. req:: The grids package shall allow for pieces of the reactor to be organized into regular-pitch hexagonal lattices (grids). :id: R_ARMI_GRID_HEX :subtype: functional :basis: This is necessary for representing reactor geometry. :acceptance_criteria: Construct a hex grid from pitch and number of rings, and return both. :status: accepted .. req:: The grids package shall be able to represent 1/3-symmetry or full hexagonal grids. :id: R_ARMI_GRID_SYMMETRY :subtype: functional :basis: Analysts frequently want symmetrical representations of a reactor for efficiency reasons. :acceptance_criteria: Construct a 1/3 symmetry and full grid and show they have the correct number of constituents. :status: accepted .. req:: A hexagonal grid with 1/3 symmetry shall be able to determine if a constituent object is in the first third. :id: R_ARMI_GRID_SYMMETRY_LOC :subtype: functional :basis: Helpful for analysts doing analysis on third-core hex grids. :acceptance_criteria: Correctly identify an object that is in the first 1/3 and one that is not. :status: accepted .. req:: A hexagonal grid with 1/3 symmetry shall be capable of retrieving equivalent contents based on 1/3 symmetry. :id: R_ARMI_GRID_EQUIVALENTS :subtype: functional :basis: This is necessary for shuffle of 1/3-core symmetry reactor models. :acceptance_criteria: Return the zero or 2 elements which are in symmetric positions to a given element. :status: accepted .. req:: Grids shall be able to nest. :id: R_ARMI_GRID_NEST :subtype: functional :basis: This is typical of reactor geometries, for instance pin grids are nested inside of assembly grids. :acceptance_criteria: Nest one grid within another. :status: accepted .. req:: Hexagonal grids shall be either x-type or y-type. :id: R_ARMI_GRID_HEX_TYPE :subtype: functional :basis: This is typical of reactor geometries, for instance pin grids inside of assembly grids. :acceptance_criteria: Construct a "points-up" and a "flats-up" grid. :status: accepted .. req:: The grids package shall be able to store components with multiplicity greater than 1. :id: R_ARMI_GRID_MULT :subtype: functional :basis: The blueprints system allows for components with multiplicity greater than 1, when there are components that are compositionally identical. :acceptance_criteria: Build a grid with components with multiplicity greater than 1. :status: accepted .. req:: The grids package shall be able to return the coordinate location of any grid element in a global coordinate system. :id: R_ARMI_GRID_GLOBAL_POS :subtype: functional :basis: This is a common need of a reactor analysis system. :acceptance_criteria: Return a hexagonal grid element's location. :status: accepted .. req:: The grids package shall be able to return the location of all instances of grid components with multiplicity greater than 1. :id: R_ARMI_GRID_ELEM_LOC :subtype: functional :basis: This is a necessary result of having component multiplicity. :acceptance_criteria: Return a hexagonal grid element's locations when its multiplicity is greater than 1. :status: accepted I/O Requirements ++++++++++++++++ .. req:: The blueprints package shall allow the user to define a component using a custom text file. :id: R_ARMI_BP_COMP :subtype: io :basis: This is a basic ARMI feature, that we have custom text blueprint files. :acceptance_criteria: Read a blueprint file and verify a component was correctly created. :status: accepted .. req:: The blueprints package shall allow the user to define a block using a custom text file. :id: R_ARMI_BP_BLOCK :subtype: io :basis: This is a basic ARMI feature, that we have custom text blueprint files. :acceptance_criteria: Read a blueprint file and verify a block was correctly created with shape, material, and input temperature. :status: accepted .. req:: The blueprints package shall allow the user to define an assembly using a custom text file. :id: R_ARMI_BP_ASSEM :subtype: io :basis: This is a basic ARMI feature, that we have custom text blueprint files. :acceptance_criteria: Read a blueprint file and verify a assembly was correctly created. :status: accepted .. req:: The blueprints package shall allow the user to define a core using a custom text file. :id: R_ARMI_BP_CORE :subtype: io :basis: This is a basic ARMI feature, that we have custom text blueprint files. :acceptance_criteria: Read a blueprint file and verify a core was correctly created. :status: accepted .. req:: The blueprints package shall allow the user to define a lattice map in a reactor core using a custom text file. :id: R_ARMI_BP_GRID :subtype: io :basis: This is a basic ARMI feature, that we have custom text blueprint files. :acceptance_criteria: Read a blueprint file and verify a lattice grid was correctly created at the assembly and pin levels. :status: accepted .. req:: The blueprints package shall allow the user to define a reactor, including both a core and a spent fuel pool using a custom text file. :id: R_ARMI_BP_SYSTEMS :subtype: io :basis: This is a basic ARMI feature, that we have custom text blueprint files. :acceptance_criteria: Read a blueprint file and verify a reactor was correctly created. :status: accepted .. req:: The blueprints package shall allow the user to define isotopes which should be depleted. :id: R_ARMI_BP_NUC_FLAGS :subtype: io :basis: This is a basic ARMI feature, that we have custom text blueprint files. :acceptance_criteria: Read a blueprint file and verify the collection of depleted nuclide flags. :status: accepted .. req:: The blueprints package shall allow the user to produce a valid blueprints file from an in-memory blueprint object. :id: R_ARMI_BP_TO_DB :subtype: io :basis: The capability to export custom blueprints input files from an in-memory blueprints object is a fundamental ARMI feature. :acceptance_criteria: Write a blueprint file from an in-memory blueprint object. :status: accepted ================================================ FILE: doc/qa_docs/srsd/runLog_reqs.rst ================================================ .. _armi_log: RunLog Module ------------- This section provides requirements for the simulation logging module, :py:mod:`armi.runLog`, which manages the reporting of messages to the user. Functional Requirements +++++++++++++++++++++++ .. req:: The runLog module shall allow for a simulation-wide log with user-specified verbosity. :id: R_ARMI_LOG :subtype: functional :status: accepted :basis: Logging simulation information is required for analysts to document and verify simulation results. :acceptance_criteria: Messages are written to the log with specified verbosity. I/O Requirements ++++++++++++++++ .. req:: The runLog module shall allow logging to the screen, to a file, or both. :id: R_ARMI_LOG_IO :subtype: io :status: accepted :basis: Logging simulation information is required for analysts to document and verify simulation results. :acceptance_criteria: Messages can be written to log files and log streams. .. req:: The runLog module shall allow log files to be combined from different processes. :id: R_ARMI_LOG_MPI :subtype: io :status: accepted :basis: Logging simulation information is required for analysts to document and verify simulation results. :acceptance_criteria: Messages in different log files can be concatenated. ================================================ FILE: doc/qa_docs/srsd/settings_reqs.rst ================================================ .. _armi_settings: Settings Package ---------------- This section provides requirements for the :py:mod:`armi.settings` package, which is responsible for providing a centralized means for users to configure an application. This package can serialize and deserialize user settings from a human-readable text file. When a simulation is being initialized, settings validation is performed to enforce things like type consistency, and to find incompatible settings. To make settings easier to understand and use, once a simulation has been initialized, settings become immutable. Functional Requirements +++++++++++++++++++++++ .. req:: The settings package shall allow the configuration of a simulation through user settings. :id: R_ARMI_SETTING :status: accepted :basis: Settings are how the user configures their run. :acceptance_criteria: Create and edit a set of settings that can be used to initialize a run. :subtype: functional .. req:: All settings must have default values. :id: R_ARMI_SETTINGS_DEFAULTS :status: accepted :basis: Enforcing a default recommendation for a setting allows for ease-of-use of the system :acceptance_criteria: A setting cannot be created without providing a default value. :subtype: functional .. req:: Settings shall support rules to validate and customize each setting's behavior. :id: R_ARMI_SETTINGS_RULES :status: accepted :basis: Validation of user settings adds quality assurance pedigree and reduces user errors. :acceptance_criteria: Query a setting and make decisions based on its value. :subtype: functional .. req:: The settings package shall supply the total reactor power at each time step of a simulation. :id: R_ARMI_SETTINGS_POWER :status: accepted :basis: Power history is needed by many downstream plugins and methodologies for normalization. :acceptance_criteria: Retrieve the power fractions series from the operator and access the value at a given time step. :subtype: functional .. req:: The settings package shall allow users to define basic metadata for the run. :id: R_ARMI_SETTINGS_META :status: accepted :basis: Storing metadata in the settings file makes it easier for analysts to differentiate many settings files, and describe the simulations they configure. :acceptance_criteria: Set and retrieve the basic metadata settings. :subtype: functional I/O Requirements ++++++++++++++++ .. req:: The settings package shall use human-readable, plain-text files as input and output. :id: R_ARMI_SETTINGS_IO_TXT :status: accepted :basis: Settings are how the user configures their run. :acceptance_criteria: Show a settings object can be created from a text file with a well-specific format, and written back out to a text file. :subtype: io ================================================ FILE: doc/qa_docs/srsd/utils_reqs.rst ================================================ .. _armi_utils: Utilities Package ----------------- This section provides requirements for the :py:mod:`armi.utils` package within the framework, which is one of the smaller high-level packages in ARMI. This package contains a small set of basic utilities which are meant to be generally useful in ARMI and in the wider ARMI ecosystem. While most of the code in this section does not rise to the level of a "requirement", some does. Functional Requirements +++++++++++++++++++++++ .. req:: ARMI shall provide a utility to convert mass densities and fractions to number densities. :id: R_ARMI_UTIL_MASS2N_DENS :subtype: functional :basis: This is a widely used utility. :acceptance_criteria: Provide a series of mass densities and fractions and verify the returned number densities. :status: accepted .. req:: ARMI shall provide a utility to expand elemental mass fractions to natural nuclides. :id: R_ARMI_UTIL_EXP_MASS_FRACS :subtype: functional :basis: This is a widely used utility. :acceptance_criteria: Expand an element's mass into a list of it's naturally occurring nuclides and their corresponding mass fractions. :status: accepted .. req:: ARMI shall provide a utility to format nuclides and densities into an MCNP material card. :id: R_ARMI_UTIL_MCNP_MAT_CARD :subtype: functional :basis: This will be useful for downstream MCNP plugins. :acceptance_criteria: Create an MCNP material card from a collection of densities. :status: accepted ================================================ FILE: doc/qa_docs/srsd.rst ================================================ Software Requirements Specification Document (SRSD) =================================================== Purpose ------- This Software Requirements Specification Document (SRSD) is prepared for the Advanced Reactor Modeling Interface (ARMI) framework. The purpose of thisdocument is to define the functional requirements, I/O requirements, relevant attributes, and applicable design constraints for ARMI. This SRSD will be accompanied by a Software Design and Implementation Document (SDID), that describes how the requirements are implemented within the software and a Software Test Report (STR), that documents the test plan and reporting of test results. .. _armi_srsd: Introduction ------------ The Advanced Reactor Modeling Interface (ARMI®) is an open-source framework for nuclear reactor design and analysis. Based on Python, ARMI provides a richly-featured toolset for connecting disparate nuclear reactor modeling tools. ARMI is not meant to directly implement the science or engineering aspects of nuclear reactor modeling, but to help the wealth of existing models work together. It does this by providing easy-to-use tools for coordinating reactor simulation and analysis workflows. A large part of the power of ARMI is that it provides a flexible in-memory data model of a reactor, which is used to pass information between different external tools. ARMI: * Provides a hub-and-spoke mechanism to standardize communication and coupling between physics kernels and the specialist analysts who use them, * Facilitates the creation and execution of detailed models and complex analysis methodologies, * Provides an ecosystem within which to rapidly and collaboratively build new analysis and physics simulation capabilities, and * Provides useful utilities to assist in reactor development. Because the ARMI software is just a framework for other, much larger nuclear models, ARMI does not contain any proprietary or classified information. This allows ARMI to be open-source software. It also greatly simplifies the software design and maintenance. For instance, ARMI does not have any performance requirements. ARMI has been used to model nuclear reactors for over a decade, and in that time the practical reality is that ARMI is quite light weight and >99% of the run time of a simulation occurs in running other nuclear models. Here are some quick metrics for ARMI's requirements: * :need_count:`type=='req'` Requirements in ARMI * :need_count:`type=='req' and status=='preliminary'` Preliminary Requirements * :need_count:`type=='req' and status=='accepted'` Accepted Requirements * :need_count:`type=='req' and len(implements_back)>0` Requirements with implementations * :need_count:`type=='req' and len(tests_back)>0` Requirements with tests * :need_count:`type=='test'` tests linked to Requirements * :need_count:`type=='impl'` implementations linked to Requirements .. Note each of these docs has their own section header .. include:: srsd/framework_reqs.rst .. include:: srsd/bookkeeping_reqs.rst .. include:: srsd/cases_reqs.rst .. include:: srsd/cli_reqs.rst .. include:: srsd/materials_reqs.rst .. include:: srsd/nucDirectory_reqs.rst .. include:: srsd/nuclearDataIO_reqs.rst .. include:: srsd/physics_reqs.rst .. include:: srsd/reactors_reqs.rst .. include:: srsd/runLog_reqs.rst .. include:: srsd/settings_reqs.rst .. include:: srsd/utils_reqs.rst Software Attributes ------------------- ARMI is a Python-based framework, designed to help tie together various nuclear models, written in a variety of languages. ARMI officially supports Python versions 3.9 and up. ARMI is heavily tested and used in both Windows and Linux. More specifically, ARMI is always designed to work in the most modern Windows operating system (Windows 10 and Windows 11 currently). Similarly, ARMI is designed to work with fairly modern versions of Ubuntu (22.04 and 24.04 at the time of writing) and Red Hat (RHEL 7 and 8 currently). Version control for ARMI is achieved using Git and is publicly hosted as open-source software on GitHub. To ensure ARMI remains portable and open-source, it only uses third-party libraries that are similarly fully open-source and that make no onerous demands on ARMI's distribution or legal status. ARMI makes use of a huge suite of unit tests to cover the codebase. The tests are run via Continuous Integration (CI) both internally and publicly. Every unit test must pass on every commit to the ARMI main branch. Also, as part of our rigorous quality system, ARMI enforces tight controls on code style using Ruff as our code formatter and linter. ================================================ FILE: doc/qa_docs/str.rst ================================================ Software Test Report (STR) ========================== Purpose and Scope ----------------- This document is the software test report (STR) for the ARMI framework. .. _ref_armi_default_test_criteria: Default Test Criteria --------------------- The acceptance tests for ARMI requirements are very uniform. They are all unit tests. Unless the test states otherwise, all of the following test criteria apply to each ARMI requirement test. Any deviation from these standard conditions will be documented in :numref:`Section %s <ref_armi_test_trace_matrix>` on a test-by-test basis. This section defines some test attributes that all tests here have in common. Testing Approach ^^^^^^^^^^^^^^^^ Software verification testing shall be a part of the software development process and leverage continuous integration (CI) testing to demonstrate the correctness of the software during the development process. CI testing shall occur for each Pull Request (PR) and shall consist of unit testing. No PR will be merged into the main branch until all CI passes successfully. The ARMI framework provides requirements with unit tests meeting acceptance criteria. Specifically, as the ARMI codebase cannot be run as a stand-alone application without external physics kernels or sensor data, any ARMI system tests will necessarily be limited. Thus, software projects leveraging ARMI capabilities are responsible for qualification of their end-use applications under their respective quality assurance commitments. Planned Test Cases, Sequence, and Identification of Stages Required """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" The test cases are described in the test traceability matrix in :numref:`Section %s <ref_armi_test_trace_matrix>`. All tests must be run, and the sequence can be in any order unless otherwise specified for the test in :numref:`Section %s <ref_armi_test_trace_matrix>`. Requirements for Testing Logic Branches """"""""""""""""""""""""""""""""""""""" Tests are written such that each test has only one primary logic path. For tests that do not conform to only one logic path, more information will be defined in the test traceability section of the STR (:numref:`Section %s <ref_armi_test_trace_matrix>`) defining the logic flow in more detail. .. _ref_armi_hardware_integration: Requirements for Hardware Integration """"""""""""""""""""""""""""""""""""" The ``ARMI`` software test will be run in modern versions Linux, Windows, and MacOS. Though for documentation brevity, we will only attach the verbose logging to this document for Linux. Criteria for Accepting the Software """"""""""""""""""""""""""""""""""" The acceptance testing must pass with satisfactory results for all tests associated with requirements in the :ref:`Software Requirements Specification Document (SRSD) <armi_srsd>` for the ``ARMI`` software. .. _ref_armi_input_data_requirements: Necessary Inputs to Run Test Cases """""""""""""""""""""""""""""""""" If inputs are necessary to run test cases or to return the system and data back to its original state, the processes will be documented in the test traceability matrix (TTM) in :numref:`Section %s <ref_armi_test_trace_matrix>` (The TTM provides traceability for each test to the required criteria). Otherwise, there are no special inputs necessary to run test cases or steps to restore the system. Required Ranges of Input Parameters for the Test Case(s) """""""""""""""""""""""""""""""""""""""""""""""""""""""" If a test uses a range of inputs, then it will be documented in the TTM in :numref:`Section %s <ref_armi_test_trace_matrix>`. Otherwise, there are no required ranges of inputs for the test case. Expected Results for the Test Case(s) """"""""""""""""""""""""""""""""""""" If a test expects a specific result, it will be documented in the TTM in :numref:`Section %s <ref_armi_test_trace_matrix>`. Otherwise, the expected test result is that no error is raised, which constitutes a passing test. Acceptance Criteria for the Test Case(s) """""""""""""""""""""""""""""""""""""""" The acceptance criteria for the test cases will be described. In cases where the SRSD requirement acceptance criteria is acceptable for the test case acceptance criteria, the SRSD requirement acceptance criteria can be referenced by default. .. _ref_armi_record_criteria: Test Record Criteria ^^^^^^^^^^^^^^^^^^^^ The default values for the remaining 12 criteria pertaining to the test record are given in this section below. A test record will be produced after the test is run which contains pertinent information about the execution of the test. This test record will be saved as part of the software test report (STR). Software Tested, Including System Software Used and All Versions """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" The ARMI version will be shown in the test record via standard output logs. Compute Platform and Hardware Used """""""""""""""""""""""""""""""""" The test record will reference the environment upon which the test is run. See :numref:`Section %s <ref_armi_hardware_integration>` for acceptable test environments. Test Equipment and Calibrations """"""""""""""""""""""""""""""" Not applicable for the ``ARMI`` software. .. _ref_armi_run_env: Runtime Environment Including System Software, and Language-Specific Environments """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" The runtime environment including the operating system, hardware, and software configuration will be specified in the test report. If necessary, more detail will be provided for individual tests which utilize custom runtime environments or have dependencies such as custom compiler options. Date of Test """""""""""" The date of the test execution is recorded in the output of the test. Tester or Data Recorder """"""""""""""""""""""" Acceptance tests will be run via automation. Simulation Models Used """""""""""""""""""""" If simulation models beyond what is described elsewhere in the documentation (SRSD, SDID, or STR) are used the simulation models will be documented in the test record. Otherwise, this test record criterion is not applicable to the test. Test Problems Identified During Test Planning """"""""""""""""""""""""""""""""""""""""""""" If specific problems such as textbooks or benchmarks are utilized for the test, then the test record will reference those problems. Otherwise, test problems are not applicable to the test record. All Input Data and Output Results and Applicability """"""""""""""""""""""""""""""""""""""""""""""""""" The input data will be recorded per :numref:`Section %s <ref_armi_input_data_requirements>`. Output data will be provided as a pass or fail of the test as part of the test record. Action Taken in connection with Any Deviations Noted """""""""""""""""""""""""""""""""""""""""""""""""""" No actions will have been assumed to be taken based on the test other than pass or fail for the test. If there are exceptions, to this statement, they will be noted in the TTM in :numref:`Section %s <ref_armi_test_trace_matrix>`. Person Evaluating Test Result """"""""""""""""""""""""""""" The reviewer of the document will evaluate the test results. Any failing unit test should result in a release failure. Acceptability """"""""""""" The test record states whether the tests pass or fail. .. _ref_armi_test_trace_matrix: Test Traceability Matrix ------------------------ The requirements and associated tests which demonstrate acceptance of the codebase with the requirements are in the :ref:`SRSD <armi_srsd>`. This section contains a list of all tests and will provide information for any non-default criteria (see :numref:`Section %s <ref_armi_default_test_criteria>` for default criteria). Here are some quick metrics for the requirement tests in ARMI: * :need_count:`type=='req' and status=='accepted'` Accepted Requirements in ARMI * :need_count:`type=='req' and status=='accepted' and len(tests_back)>0` Accepted Requirements with tests * :need_count:`type=='test' and id.startswith('T_ARMI')` tests linked to Requirements And here is a full listing of all the tests in ARMI, that are tied to requirements: .. needextract:: :types: test :filter: id.startswith('T_ARMI_') Test Results Report ------------------- This section provides the results of the test case runs for this release of ARMI software. .. _ref_armi_test_env: Testing Environment ^^^^^^^^^^^^^^^^^^^ This section describes the relevant environment under which the tests were run as required by :numref:`Section %s <ref_armi_run_env>`. Note that individual test records have the option to define additional environment information. System Information """""""""""""""""" The logged operating system and processor information proves what environment the software was tested on: .. exec:: from armi.bookkeeping.report.reportingUtils import getSystemInfo return getSystemInfo().replace("\n", "\n\n") Python Version and Packages +++++++++++++++++++++++++++ .. exec:: from pip._internal.operations.freeze import freeze return "\n\n".join(list(freeze())) .. _ref_armi_software_date: Software Tested and Date """""""""""""""""""""""" The software tested and date of testing are below: .. exec:: import os import sys from datetime import datetime from armi import __version__ as armiVersion armiCommit = str(os.environ["GIT_COMMIT"]).strip() txt = [f"Date: {datetime.now().strftime('%Y-%m-%d')}"] txt.append(f"Python version: {sys.version}") txt.append(f"ARMI version: {armiVersion}") if armiCommit: txt.append(f"ARMI commit: {armiCommit}") return "\n\n".join(txt) Record of Test Cases ^^^^^^^^^^^^^^^^^^^^ This section includes the resulting test record for each test which together with :numref:`Section %s <ref_armi_test_env>` satisfies the criteria necessary for the creation of the test record defined in :numref:`Section %s <ref_armi_record_criteria>`. .. needtable:: Acceptance test results :types: test :columns: id, title, result :filter: id.startswith('T_ARMI_') :style_row: needs_[[copy('result')]] :colwidths: 30,50,10 :class: longtable Appendix A Pytest Verbose Output -------------------------------- Shown here is the verbose output from pytest. Note that if a test says "skipped" in the first table below (serial unit tests), then it will appear in the "MPI-enabled unit tests" sections below. Some tests can be run in serial and parallel, but some can only be run in parallel. The preference in ARMI is to be explicit about which are which, as long as all the tests are run at least once. Serial unit tests: .. test-results:: ../test_results.xml MPI-enabled unit tests: .. test-results:: ../test_results_mpi1.xml .. test-results:: ../test_results_mpi2.xml .. test-results:: ../test_results_mpi3.xml ================================================ FILE: doc/readme.rst ================================================ .. include:: ../README.rst ================================================ FILE: doc/release/index.rst ================================================ ############# Release Notes ############# You can find a simplified version of the ARMI `Release Notes here <https://github.com/terrapower/armi/releases>`_. However, to meet our rigorous quality processes, you can look in the :doc:`/qa_docs/scr/index` section for the fully detailed software change log. ================================================ FILE: doc/skip_str.py ================================================ # Copyright 2025 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A simple helper script to create dummy data files for the STR. If the user wants to build the docs without going through the hassle of running the testing, they can run this simple script which will create some placeholder files for the STR: * pytest_verbose.log * test_results.xml * test_results_mpi1.xml * test_results_mpi2.xml * test_results_mpi3.xml """ def main(): # skip build the STR, if you are running locally with open("pytest_verbose.log", "w") as f: f.write("skipping STR") fileNames = [f"test_results_mpi{i}.xml" for i in range(1, 4)] fileNames.append("test_results.xml") for fileName in fileNames: with open(fileName, "w") as f: f.write("<metadata></metadata>") if __name__ == "__main__": main() ================================================ FILE: doc/tutorials/data_model.nblink ================================================ { "path": "../../armi/tests/tutorials/data_model.ipynb" } ================================================ FILE: doc/tutorials/index.rst ================================================ .. _armi-tutorials: ######### Tutorials ######### You should have ARMI installed and operational by this point. The following tutorials demonstrate in more detail how to interact with ARMI. -------------- .. toctree:: :maxdepth: 2 :numbered: nuclide_demo.ipynb materials_demo.ipynb walkthrough_inputs.rst walkthrough_lwr_inputs.rst data_model.ipynb making_your_first_app.rst param_sweep.ipynb pin-rotations.ipynb ================================================ FILE: doc/tutorials/making_your_first_app.rst ================================================ .. Note that this file makes use of Python files in a ``armi-example-app`` folder so that they can be put under testing. .. _armi-make-first-app: ******************************** Making your first ARMI-based App ******************************** In this tutorial we will build a nuclear analysis application that runs (dummy) neutron flux and thermal/hydraulics calculations. Applications that do real analysis can be modeled after this starting point. A complete, working version of this application can be found `here <https://github.com/terrapower/armi-example-app>`_. We'll assume you have the :doc:`ARMI Framework installed </user/user_install>` already. You can make sure it is ready by running the following command in a shell prompt:: (armi) $ python -c "import armi;armi.configure()" You should see an ARMI splash-screen and an ARMI version print out. If you do, you are ready to proceed. .. tip:: If you are having trouble getting it installed, see :ref:`getting-help`. You may need to ensure your ``PYTHONPATH`` variable includes the armi installation directory. .. note:: This tutorial is a companion for the :doc:`/developer/making_armi_based_apps` developer documentation. Starting a new app ================== ARMI-based applications can take on many forms, depending on your workflow. Examples may include: * Application and plugins together under one folder * Application in one folder, plugins in separate ones We will build an application that contains one plugin that runs neutronics and thermal hydraulics in one folder. This architecture will be a good starting point for many projects, and can always be separated if needed. From the command line, ``cd`` into a new directory where you'd like to store your application code. Make a folder structure that works as a `normal Python package <https://packaging.python.org/tutorials/packaging-projects/>`_, and create some empty files for us to fill in, like this:: my_armi_project/ myapp/ __init__.py __main__.py app.py plugin.py fluxSolver.py materials.py thermalSolver.py doc/ pyproject.toml README.md LICENSE.md These files are: * The outer :file:`my_armi_project` root directory is a container for your app. The name does not matter to ARMI; you can rename it to anything. * The inner :file:`myapp` directory is the actual Python package for your app. Its name is the Python package name you will use to import anything inside (e.g. ``myapp.plugin``). * :file:`myapp/__init__.py` tells Python that this directory is a Python package. Code in here runs whenever anything in the package is imported. * :file:`myapp/__main__.py` registers the application with the ARMI framework and provides one or more entry points for users of your app (including you!) to start running it. Since code here runs when the package is used as a main, it generally performs any app-specific configuration. * :file:`myapp/app.py` contains the actual app registration code that will be called by :file:`__main__.py`. This can be named anything as long as it is consistent with the registration code. * :file:`myapp/plugin.py` contains the code that defines the physics plugins we will create * :file:`myapp/fluxSolver.py` contains the flux solver * :file:`myapp/thermalSolver.py` contains the thermal/hydraulics solver * :file:`pyproject.toml` the `python package installation file <https://packaging.python.org/en/latest/flow/>`_ to help users install your application. * :file:`README.md` and :file:`LICENSE.md` are an optional description and license of your application that would be prominently featured, e.g. in a GitHub repo, if you were to put it there. * :file:`doc/` is an optional folder where your application documentation source may go. If you choose to use Sphinx you can run ``sphinx-quickstart`` in that folder to begin documentation. Registering the app with ARMI ============================= The ARMI Framework contains features to run the "main loop" of typical applications. In order to get access to these, we must register our new app with the ARMI framework. To do this, we put the following code in the top-level :file:`__main__.py` module: .. literalinclude:: armi-example-app/myapp/__main__.py :language: python :caption: ``myapp/__main__.py`` :start-after: tutorial-configure-start :end-before: tutorial-configure-end Similar code will be needed in scripts or other code where you would like your app to be used. .. tip:: You may find it appropriate to use the plugin registration mechanism in some cases rather than the app registration. More info on plugins vs. apps coming soon. Defining the app class ====================== We define our app in the :file:`myapp/app.py` module. For this example, the app class is relatively small: it will just register our one custom plugin. We will actually create the plugin shortly. .. admonition:: Apps vs. plugins vs. interfaces ARMI-based methodologies are broken down into three layers of abstraction. Apps are collections of plugins intended to perform analysis on a certain type of reactor. Plugins are independent and mixable collections of relatively arbitrary code that might bring in special materials, contain certain engineering methodologies, and/or Interfaces with one or more physics kernels. See :doc:`/developer/guide` for more info on architecture. .. literalinclude:: armi-example-app/myapp/app.py :language: python :caption: ``myapp/app.py`` Defining the physics plugin =========================== Now we will create the plugin that will coordinate our dummy physics modules. .. admonition:: What are plugins again? Plugins are the basic modular building block of ARMI-based apps. In some cases, one plugin will be associated with one physics kernel (like COBRA or MCNP). This is a reasonable practice when you expect to be mixing and matching various combinations of plugins between related teams. It is also possible to have a plugin that performs a whole cacophony of analyses using multiple codes, which some smaller research teams may find preferable. The flexibility is very broad. See :py:mod:`armi.plugins` more for info. Plugin code can exist in any directory structure in an app. In this app we put it in the :file:`myapp/plugin.py` file. .. note:: For "serious" plugins, we recommend mirroring the ``armi/physics/[subphysics]`` structure of the ARMI Framework :py:mod:`physics plugin subpackage <armi.physics>`. We will start the plugin by pointing to the two physics kernels we wish to register. We hook them in and tell ARMI the ``ORDER`` they should be run in based on the built-in ``STACK_ORDER`` attribute (defined and discussed :py:class:`here <armi.interfaces.STACK_ORDER>`). We will come back to this plugin definition later on to add a little more to the plugin. .. literalinclude:: armi-example-app/myapp/plugin.py :caption: ``myapp/plugin.py`` :language: python Defining custom settings ======================== An important facet of the above plugin is that it takes custom Settings, and has some validation built in for those ``Setting`` values. That is, the plugin registers new settings that can go in the settings file, and help the user define how the simulation runs. The following example boiler plate code defines three settings. We define two simple number settings (inlet and outlet temperatures), and we use :py:class:`Query <armi.settings.settingsValidation .Query>` to define validation on those settings. Here, the validation isn't very exciting, we just make sure the temperatures are above zero. That's not particularly physically meaningful, but serves as a simple example. The next setting is a little more complicated, we define a setting ``myAppVersion`` that defines a specific version of our app that this setting file is valid for. And if you try to run a different version you get a nasty warning printed to the screen. .. literalinclude:: armi-example-app/myapp/settings.py :caption: ``myapp/settings.py`` :language: python Creating the physics kernels ============================ So far we have basically been weaving an administrative thread to tell ARMI about the code we want to run. Now we finally get to write the guts of the code that actually does something. In your real app, this code will run your own industrial or research code, or perform your own methodology. Here we just have it make up dummy values representing flux and temperatures. Making the (dummy) flux kernel ------------------------------ In a previous tutorial, we made a function that sets a dummy flux to all parts of the core based on a radial distance from the origin. Here we will reuse that code but package it more formally so that ARMI can actually run it for us from a user perspective. The interface is responsible largely for scheduling activities to run at various time points. For a flux calculation, we want it to compute at every single time node, so we use the :py:meth:`armi.interfaces.Interface.interactEveryNode` hook. These interaction hooks can call arbitrarily complex code. The code could, for example: * Run an external executable locally * Submit an external code to a cloud HPC and wait for it to complete * Run an internal physics tool Here it just does a tiny bit of math locally. .. literalinclude:: armi-example-app/myapp/fluxSolver.py :caption: ``myapp/fluxSolver.py`` :language: python Making the thermal/hydraulics kernel ------------------------------------------ Since we told the ARMI plugin to schedule the flux solver before thermal/hydraulics solver via the ``ORDER`` attribute, we can depend on there being up-to-date block-level ``power`` state data loaded onto the ARMI reactor by the time this thermal/hydraulics solver gets called by the ARMI main loop. We'll make a somewhat meaningful (but still totally academic) flow solver here that uses energy conservation to determine an idealized coolant flow rate. To do this it will compute the total power produced by each assembly to get the required mass flow rate and then apply that mass flow rate from the bottom of the assembly to the top, computing a block-level temperature (and flow velocity) distribution as we go. .. math:: \dot{Q} = \dot{m} C_p \Delta T .. literalinclude:: armi-example-app/myapp/thermalSolver.py :caption: ``myapp/thermalSolver.py`` :language: python Adding entry points =================== In order to call our application directly, we need to add the :file:`__main__.py` file to the package. We could add all manner of :py:mod:`entry points <armi.cli.entryPoint>` here for different operations we want our application to perform. If you want to add :doc:`your own entry points </developer/entrypoints>`, you have to register them with the :py:meth:`armi.plugins.ArmiPlugin.defineEntryPoints` hook. For now, we can just inherit from the default ARMI entry points (including ``run``) by adding the following code to what we already have in :file:`myapp/__main__.py`: .. literalinclude:: armi-example-app/myapp/__main__.py :language: python :caption: ``myapp/__main__.py`` :start-after: tutorial-entry-point-start :end-before: tutorial-entry-point-end .. tip:: Entry points are phenomenal places to put useful analysis scripts that are limited in scope to the scope of the application. Running the app and debugging ============================= We are now ready to execute our application. Even though it still contains an issue, we will run it now to get a feel for the iterative debugging process (sometimes lovingly called ARMI whack-a-mole). We must make sure our ``PYTHONPATH`` contains both the armi framework itself as well as the directory that contains our app. For testing, an example value for this might be:: $ export PYTHONPATH=/path/to/armi:/path/to/my_armi_project .. admonition:: Windows tip If you're using Windows, the slashes will be the other way, you use ``set`` instead of ``export``, and you use ``;`` to separate entries (or just use the GUI). .. admonition:: Submodule tip In development, we have found it convenient to use git submodules to contain the armi framework and pointers to other plugins you may need. If you do this, you can set the ``sys.path`` directly in the ``__main__`` file and not have to worry about ``PYTHONPATH`` nearly as much. Make a run directory with some input files in it. You can use the same SFR input files we've used in previous tutorials for starters (but quickly transition to your own inputs for your own interests!). Here are the files you can download into the run directory. * :download:`Blueprints <../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml>` * :download:`Settings <../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml>` * :download:`Core map <../../armi/testing/reactors/anl-afci-177/anl-afci-177-coreMap.yaml>` * :download:`Fuel management <../../armi/testing/reactors/anl-afci-177/anl-afci-177-fuelManagement.py>` Then, run your app!:: (armi) $ python -m myapp run anl-afci-177.yaml The code will run for a while and you will see your physics plugins in the interface stack, but will run into an error:: NotImplementedError: Material Sodium does not implement heatCapacity The included academic Sodium material in the ARMI material library doesn't have any heat capacity! Here we can either add heat capacity to the material and submit a pull request to include it in the ARMI Framework (preferred for generic things), or make our own material and register it through the plugin. .. admonition:: Yet another way You could alternatively make a separate plugin that only has your team's special material properties. Adding a new material --------------------- Let's just add a subclass of sodium in our plugin that has a heat capacity defined. Make your new material in a new module called :file:`myapp/materials.py`: .. literalinclude:: armi-example-app/myapp/materials.py :caption: ``myapp/materials.py`` :language: python But wait! Now there are **two** materials with the name *Sodium* in ARMI. Which will be chosen? ARMI uses a namespace order controlled by :py:func:`armi.materials.setMaterialNamespaceOrder` which can be set either programmatically (in an app) or at runtime (via the ``materialNamespaceOrder`` user setting). In our case, we want to set it at the app level, so we will yet again add more to the :file:`myapp/__main__.py` file: .. literalinclude:: armi-example-app/myapp/__main__.py :language: python :caption: ``myapp/__main__.py`` :start-after: tutorial-material-start :end-before: tutorial-material-end .. admonition:: Why ``__main__.py``? We put this line in ``__main__.py`` rather than ``__init__.py`` so it only activates when we're explicitly running our app. If we put it in ``__init__`` it would change the order even in situations where code from anywhere within our app was imported, possibly conflicting with another app's needs. Now ARMI should find our new updated Sodium material and get past that error. Run it once again:: (armi) $ python -m myapp run anl-afci-177.yaml .. tip:: You may want to pipe the output to a log file for convenient viewing with a command like ``python -m myapp run anl-afci-177.yaml > run.stdout`` Checking the output =================== Several output files should have been created in the run directory from that past command. Most important is the ``anl-afci-177.h5`` HDF5 binary database file. You can use this file to bring the ARMI state back to any state point from the run for analysis. To visualize the output in a 3D graphics program like `ParaView <https://www.paraview.org/Wiki/ParaView>`_ or `VisIT <https://wci.llnl.gov/simulation/computer-codes/visit>`_, you can run the ARMI ``vis-file`` entry point, like this:: (armi) $ python -m myapp vis-file -f vtk anl-afci-177.h5 This creates several ``VTK`` files covering different time steps and levels of abstraction (assembly vs. block params). If you load up the block file and plot one of the output params (such as ``THcoolantOutletT`` you can see the outlet temperature going nicely from 360 |deg|\ C to 510 |deg|\ C (as expected given our simple TH solver). .. figure:: /.static/anl-acfi-177-coolant-temperature.jpg :alt: The coolant temperature as seen in ParaView viewing the VTK file. :align: center The coolant temperature as seen in ParaView viewing the VTK file. .. admonition:: Fancy XDMF format The ``-f xdmf`` produces `XDMF files <http://xdmf.org/index.php/XDMF_Model_and_Format>`_ that are lighter-weight than VTK, just pointing the visualization program to the data in the primary ARMI HDF5 file. However it is slightly more finicky and has slightly less support in some tools (looking at VisIT). A generic description of the outputs is provided in :doc:`/user/outputs`. You can add your own outputs from your plugins. Bonus: Ad-hoc UserPlugins ========================= It will often be the case that you are not building an ARMI application from scratch, but you are using a pre-existing ARMI application. And while working with this (potentially quite large) ARMI application, you want to add a one-off change. Maybe you want to make a special plot during the run, or do a quick "what-if" modification of the :py:class:`Reactor <armi.reactor.reactors.Reactor>`. These things come up for scientific or engineering work: a quick one-off idea you want to test out and probably only use once. This is where a :py:class:`UserPlugin <armi.plugins.UserPlugin>` come in. There are two parts to defining a :py:class:`UserPlugin <armi.plugins.UserPlugin>`: Define the UserPlugin in Python ------------------------------- This can be done by subclassing :py:class:`armi.plugins.UserPlugin`: .. code-block:: python from armi import plugins from armi.reactor.flags import Flags class UserPluginExample(plugins.UserPlugin): """ This plugin flex-tests the onProcessCoreLoading() hook, and arbitrarily adds "1" to the power ever each fuel block. """ @staticmethod @plugins.HOOKIMPL def onProcessCoreLoading(core, cs, dbLoad): for b in core.getBlocks(Flags.FUEL): b.p.power += 1.0 In most ways, ``UserPluginExample`` above is just a normal :py:class:`ArmiPlugin <armi.plugins.ArmiPlugin>`. You can implement any of the normal :py:class:`ArmiPlugin <armi.plugins.ArmiPlugin>` hooks, like: :py:meth:`exposeInterfaces() <armi.plugins.ArmiPlugin.exposeInterfaces>`, :py:meth:`defineParameters() <armi.plugins.ArmiPlugin.defineParameters>`, and so on. The :py:class:`UserPlugin <armi.plugins.UserPlugin>` class is more limited than a regular plugin though, you cannot implement: * :py:meth:`armi.plugins.ArmiPlugin.defineParameters` * :py:meth:`armi.plugins.ArmiPlugin.defineParameterRenames` * :py:meth:`armi.plugins.ArmiPlugin.defineSettings` * :py:meth:`armi.plugins.ArmiPlugin.defineSettingsValidators` Define a list of UserPlugins in the Settings File ------------------------------------------------- In order for your simulation to know about your custom :py:class:`UserPlugin <armi.plugins.UserPlugin>` you need to add a line to your Settings file: .. code-block:: userPlugins: - armi.tests.test_user_plugins.UserPlugin0 - //path/to/my/pluginz.py:UserPlugin1 - C:\\path\to\my\pluginZ.py:UserPlugin2 What we have above is actually an example of including three different plugins via your settings YAML file: * By providing a ``.``-separated ARMI import path (if you included your :py:class:`UserPlugin <armi.plugins.UserPlugin>` in your commit. * By providing a full Linux/Unix/MacOS file path, then a colon (``:``), followed by the class name. * By providing a full Windows file path, then a colon (``:``), followed by the class name. .. |deg| unicode:: U+00B0 ================================================ FILE: doc/tutorials/materials_demo.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# The ARMI Material Library\n", "\n", "While *nuclides* are the microscopic building blocks of nature, their collection into *materials* is what we interact with at the engineering scale. The ARMI Framework provides a `Material` class, which has a composition (how many of each nuclide are in the material), and a variety of thermomechanical properties (many of which are temperature dependent), such as:\n", "\n", "* Mass density \n", "* Heat capacity\n", "* Linear or volumetric thermal expansion\n", "* Thermal conductivity\n", "* Solidus/liquidus temperature\n", "\n", "and so on. \n", "\n", "Many of these properties are widely available in the literature for fresh materials. As materials are irradiated, the properties tend to change in complex ways. Material objects can be extended to account for such changes. \n", "\n", "The ARMI Framework comes with a small set of example material definitions. These are generally quite incomplete (often missing temperature dependence), and are of academic quality at best. To do engineering design calculations, users of ARMI are expected to make or otherwise prepare materials. As the ecosystem grows, we hope the material library will mature.\n", "\n", "In any case, here we will explore the use of `Material`s. Let's get an instance of the Uranium Oxide material." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from armi.materials import uraniumOxide\n", "\n", "uo2 = uraniumOxide.UO2()\n", "density500 = uo2.density(Tc=500)\n", "print(f\"The density of UO2 @ T = 500C is {density500:.2f} g/cc\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Taking a look at the composition" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print(uo2.massFrac)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The mass fractions of a material, plus its mass density, fully define the composition. Conversions between number density/fraction and mass density/fraction are handled on the next level up (on `Component`s), which we will explore soon.\n", "\n", "ARMI automatically thermally-expands materials based on their coefficients of linear expansion. For instance, a piece of Uranium Oxide that's 10 cm at room temperature would be longer at 500 C according to the formula:\n", "\n", "\\begin{equation}\n", "\\frac{\\Delta L}{L_0} = \\alpha \\Delta T\n", "\\end{equation}\n", "\n", "On the reactor model, this all happens behind the scenes. But here at the material library level, we can see it in detail. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "L0 = 10.0\n", "dLL = uo2.linearExpansionFactor(500, 25)\n", "L = L0 * (1 + dLL)\n", "print(f\"Hot length is {L:.4f} cm\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Let's plot the heat capacity as a function of temperature in K." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "\n", "Tk = np.linspace(300, 2000)\n", "heatCapacity = [uo2.heatCapacity(Tk=ti) for ti in Tk]\n", "plt.plot(Tk, heatCapacity)\n", "plt.title(\"$UO_2$ heat capacity vs. temperature\")\n", "plt.xlabel(\"Temperature (K)\")\n", "plt.ylabel(\"Heat capacity (J/kg-K)\")\n", "plt.grid(ls=\"--\", alpha=0.3)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Different physics plugins require different properties to be defined. For pure neutronics runs, mass density and composition is enough. But for thermal/hydraulics runs, heat capacity and thermal conductivity is needed for solids, and more is needed for coolants. As irradiation models are investigated, creep, corrosion, porosity, swelling, and other factors will be necessary. " ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.13" }, "varInspector": { "cols": { "lenName": 16, "lenType": 16, "lenVar": 40 }, "kernels_config": { "python": { "delete_cmd_postfix": "", "delete_cmd_prefix": "del ", "library": "var_list.py", "varRefreshCmd": "print(var_dic_list())" }, "r": { "delete_cmd_postfix": ") ", "delete_cmd_prefix": "rm(", "library": "var_list.r", "varRefreshCmd": "cat(var_dic_list()) " } }, "types_to_exclude": [ "module", "function", "builtin_function_or_method", "instance", "_Feature" ], "window_display": false } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: doc/tutorials/nuclide_demo.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# The ARMI Nuclide and Element Packages\n", "\n", "One of the key features that the ARMI framework offers is access to nuclide data across an application (recall: a *nuclide* is a particular isotope of an element. Iron-56, Uranium-238, and Boron-10 are all nuclides). This is specifically useful for nuclear engineers so that manual look-ups of nuclide attributes on sources like Wikipedia, Chart of the Nuclides, etc. are not needed. \n", "\n", "The available attributes for each nuclide are:\n", "\n", "- Atomic weight/mass, in amu\n", "- Natural abundance\n", "- Atomic number, Z\n", "- Mass number, A\n", "- Half-life\n", "- Neutron yield from spontaneous fission\n", " \n", "Accessing the nuclide data begins with importing the nuclide bases, and optionally, the elements packages:" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "from armi.nucDirectory.elements import Elements\n", "from armi.nucDirectory.nuclideBases import NuclideBases\n", "\n", "elements = Elements()\n", "elements.factory()\n", "nuclideBases = NuclideBases()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "One these packages are imported, there are several module-level global dictionaries that are important to know about, since these are likely what you will be working with when implementing code that requires nuclide data or just when performing data look-ups:\n", "\n", "**Nuclide Bases Global Dictionaries**\n", "\n", "- nuclideBases.byName\n", "- nuclideBases.DBName\n", "- nuclideBases.byLabel\n", "- nuclideBases.byMcc2Id\n", "- nuclideBases.byMcc3Id\n", "- nuclideBases.byMcnpId\n", "- nuclideBases.byAAAZZZSId\n", " \n", "**Elements Global Dictionaries**\n", "\n", "- elements.byZ\n", "- elements.bySymbol\n", "- elements.byName" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Accessing Individual Nuclide Data/Attributes\n", "\n", "Here we will explore retrieving data from a couple nuclides and showing the coupling between nuclide and element definitions. For these examples, let's try to answer the following questions:\n", "\n", "- How many total nuclides and elements are defined in the framework?\n", "- What is atomic weight of a selected nuclide?\n", "- What is the natural abundance a selected nuclide?\n", "- Are there any spontaneous fission neutrons for a selected nuclide?\n", "- What is the half-life in seconds for a selected nuclide?\n", "- How many other nuclides for the same element exist for a selected nuclide?\n", " " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### How many total nuclides and elements are defined in the framework?" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Number of elements defined in the framework: 120\n", "\n", "Number of nuclides defined in the framework: 4706\n", " - Number of nuclides of type `<class 'armi.nucDirectory.nuclideBases.NuclideBase'>`: 4614\n", " - Number of nuclides of type `<class 'armi.nucDirectory.nuclideBases.NaturalNuclideBase'>`: 84\n", " - Number of nuclides of type `<class 'armi.nucDirectory.nuclideBases.DummyNuclideBase'>`: 2\n", " - Number of nuclides of type `<class 'armi.nucDirectory.nuclideBases.LumpNuclideBase'>`: 6\n" ] } ], "source": [ "import collections\n", "\n", "print(f\"Number of elements defined in the framework: {len(elements.byZ.values())}\")\n", "print(\"\")\n", "print(f\"Number of nuclides defined in the framework: {len(nuclideBases.instances)}\")\n", "nucsByType = collections.defaultdict(list)\n", "for n in nuclideBases.instances:\n", " nucsByType[type(n)].append(n)\n", "\n", "for typ, nucs in nucsByType.items():\n", " print(f\" - Number of nuclides of type `{typ}`: {len(nucs)}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Looking up nuclide and elemental data for U-235" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "<NuclideBase U235: Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03>\n", "Atomic Weight (amu): 235.043929425\n", "Natural Abundance: 0.007204\n", "Spontaneous Fission Neutron Yield: 1.87\n", "Half-life (seconds): 2.22160758861e+16\n", "\n", "Other nuclides for Uranium:\n", " - <NaturalNuclideBase U: Z:92, W:2.380289e+02, Label:U>\n", " - <NuclideBase U215: Z:92, A:215, S:0, W:2.150262e+02, Label:U215>, HL:7.00000000000e-04, Abund:0.000000e+00>\n", " - <NuclideBase U216: Z:92, A:216, S:0, W:2.160240e+02, Label:U216>, HL:4.50000000000e-03, Abund:0.000000e+00>\n", " - <NuclideBase U217: Z:92, A:217, S:0, W:2.170244e+02, Label:U217>, HL:1.60000000000e-02, Abund:0.000000e+00>\n", " - <NuclideBase U218: Z:92, A:218, S:0, W:2.180235e+02, Label:U218>, HL:6.50000000000e-04, Abund:0.000000e+00>\n", " - <NuclideBase U219: Z:92, A:219, S:0, W:2.190249e+02, Label:U219>, HL:6.00000000000e-05, Abund:0.000000e+00>\n", " - <NuclideBase U220: Z:92, A:220, S:0, W:2.200247e+02, Label:U220>, HL:1.11110000000e+01, Abund:0.000000e+00>\n", " - <NuclideBase U221: Z:92, A:221, S:0, W:2.210264e+02, Label:U221>, HL:6.60000000000e-07, Abund:0.000000e+00>\n", " - <NuclideBase U222: Z:92, A:222, S:0, W:2.220261e+02, Label:U222>, HL:4.70000000000e-06, Abund:0.000000e+00>\n", " - <NuclideBase U223: Z:92, A:223, S:0, W:2.230277e+02, Label:U223>, HL:1.80000000000e-05, Abund:0.000000e+00>\n", " - <NuclideBase U224: Z:92, A:224, S:0, W:2.240276e+02, Label:U224>, HL:8.40000000000e-04, Abund:0.000000e+00>\n", " - <NuclideBase U225: Z:92, A:225, S:0, W:2.250294e+02, Label:U225>, HL:6.90000000000e-02, Abund:0.000000e+00>\n", " - <NuclideBase U226: Z:92, A:226, S:0, W:2.260293e+02, Label:U226>, HL:2.68000000000e-01, Abund:0.000000e+00>\n", " - <NuclideBase U227: Z:92, A:227, S:0, W:2.270312e+02, Label:U227>, HL:6.60000000000e+01, Abund:0.000000e+00>\n", " - <NuclideBase U228: Z:92, A:228, S:0, W:2.280314e+02, Label:U228>, HL:5.46000000000e+02, Abund:0.000000e+00>\n", " - <NuclideBase U229: Z:92, A:229, S:0, W:2.290335e+02, Label:U229>, HL:3.48000000000e+03, Abund:0.000000e+00>\n", " - <NuclideBase U230: Z:92, A:230, S:0, W:2.300339e+02, Label:U230>, HL:1.74787200000e+06, Abund:0.000000e+00>\n", " - <NuclideBase U231: Z:92, A:231, S:0, W:2.310363e+02, Label:U231>, HL:3.62880000000e+05, Abund:0.000000e+00>\n", " - <NuclideBase U232: Z:92, A:232, S:0, W:2.320372e+02, Label:U232>, HL:2.17427219965e+09, Abund:0.000000e+00>\n", " - <NuclideBase U233: Z:92, A:233, S:0, W:2.330396e+02, Label:U233>, HL:5.02354704590e+12, Abund:0.000000e+00>\n", " - <NuclideBase U234: Z:92, A:234, S:0, W:2.340410e+02, Label:U234>, HL:7.74722532676e+12, Abund:5.400000e-05>\n", " - <NuclideBase U235: Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03>\n", " - <NuclideBase U235M: Z:92, A:235, S:1, W:2.350439e+02, Label:U23F>, HL:1.56000000000e+03, Abund:0.000000e+00>\n", " - <NuclideBase U236: Z:92, A:236, S:0, W:2.360456e+02, Label:U236>, HL:7.39063206325e+14, Abund:0.000000e+00>\n", " - <NuclideBase U237: Z:92, A:237, S:0, W:2.370487e+02, Label:U237>, HL:5.83372800000e+05, Abund:0.000000e+00>\n", " - <NuclideBase U238: Z:92, A:238, S:0, W:2.380508e+02, Label:U238>, HL:1.40996345254e+17, Abund:9.927420e-01>\n", " - <NuclideBase U239: Z:92, A:239, S:0, W:2.390543e+02, Label:U239>, HL:1.40700000000e+03, Abund:0.000000e+00>\n", " - <NuclideBase U240: Z:92, A:240, S:0, W:2.400566e+02, Label:U240>, HL:5.07600000000e+04, Abund:0.000000e+00>\n", " - <NuclideBase U241: Z:92, A:241, S:0, W:2.410603e+02, Label:U241>, HL:inf , Abund:0.000000e+00>\n", " - <NuclideBase U242: Z:92, A:242, S:0, W:2.420629e+02, Label:U242>, HL:1.00800000000e+03, Abund:0.000000e+00>\n", " - <NuclideBase U243: Z:92, A:243, S:0, W:2.430674e+02, Label:U243>, HL:inf , Abund:0.000000e+00>\n", " - <NuclideBase U244: Z:92, A:244, S:0, W:2.440679e+02, Label:U244>, HL:inf , Abund:0.000000e+00>\n", " - <NuclideBase U245: Z:92, A:245, S:0, W:2.450708e+02, Label:U245>, HL:inf , Abund:0.000000e+00>\n", " - <NuclideBase U246: Z:92, A:246, S:0, W:2.460702e+02, Label:U246>, HL:inf , Abund:0.000000e+00>\n" ] } ], "source": [ "u235 = nuclideBases.byName[\"U235\"]\n", "\n", "print(u235)\n", "print(f\"Atomic Weight (amu): {u235.weight}\")\n", "print(f\"Natural Abundance: {u235.abundance}\")\n", "print(f\"Spontaneous Fission Neutron Yield: {u235.nuSF}\")\n", "print(f\"Half-life (seconds): {u235.halflife}\")\n", "print(\"\")\n", "print(f\"Other nuclides for {elements.byZ[u235.z].name}:\")\n", "for n in elements.byZ[u235.z].nuclides:\n", " print(f\" - {n}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Looking up nuclide and elemental data for Li-7" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "<NuclideBase LI7: Z:3, A:7, S:0, W:7.016004e+00, Label:LI07>, HL:inf , Abund:9.241000e-01>\n", "Atomic Weight (amu): 7.01600439548\n", "Natural Abundance: 0.92410004\n", "Spontaneous Fission Neutron Yield: 0.0\n", "Half-life (seconds): inf\n", "\n", "Other nuclides for Lithium:\n", " - <NaturalNuclideBase LI: Z:3, W:6.940038e+00, Label:LI>\n", " - <NuclideBase LI3: Z:3, A:3, S:0, W:3.030775e+00, Label:LI03>, HL:1.11110000000e+01, Abund:0.000000e+00>\n", " - <NuclideBase LI4: Z:3, A:4, S:0, W:4.027185e+00, Label:LI04>, HL:inf , Abund:0.000000e+00>\n", " - <NuclideBase LI5: Z:3, A:5, S:0, W:5.012538e+00, Label:LI05>, HL:3.70924971603e-22, Abund:0.000000e+00>\n", " - <NuclideBase LI6: Z:3, A:6, S:0, W:6.015123e+00, Label:LI06>, HL:inf , Abund:7.590000e-02>\n", " - <NuclideBase LI7: Z:3, A:7, S:0, W:7.016004e+00, Label:LI07>, HL:inf , Abund:9.241000e-01>\n", " - <NuclideBase LI8: Z:3, A:8, S:0, W:8.022488e+00, Label:LI08>, HL:8.39900000000e-01, Abund:0.000000e+00>\n", " - <NuclideBase LI9: Z:3, A:9, S:0, W:9.026789e+00, Label:LI09>, HL:1.78300000000e-01, Abund:0.000000e+00>\n", " - <NuclideBase LI10: Z:3, A:10, S:0, W:1.003548e+01, Label:LI10>, HL:inf , Abund:0.000000e+00>\n", " - <NuclideBase LI11: Z:3, A:11, S:0, W:1.104380e+01, Label:LI11>, HL:8.75000000000e-03, Abund:0.000000e+00>\n", " - <NuclideBase LI12: Z:3, A:12, S:0, W:1.205378e+01, Label:LI12>, HL:inf , Abund:0.000000e+00>\n", " - <NuclideBase LI13: Z:3, A:13, S:0, W:1.306117e+01, Label:LI13>, HL:1.11110000000e+01, Abund:0.000000e+00>\n" ] } ], "source": [ "li7 = nuclideBases.byName[\"LI7\"]\n", "\n", "print(li7)\n", "print(f\"Atomic Weight (amu): {li7.weight}\")\n", "print(f\"Natural Abundance: {li7.abundance}\")\n", "print(f\"Spontaneous Fission Neutron Yield: {li7.nuSF}\")\n", "print(f\"Half-life (seconds): {li7.halflife}\")\n", "print(\"\")\n", "print(f\"Other nuclides for {elements.byZ[li7.z].name}:\")\n", "for n in elements.byZ[li7.z].nuclides:\n", " print(f\" - {n}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Exploring elemental Lithium data" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "<Element LI (Z=3), Lithium, ChemicalGroup.ALKALI_METAL, ChemicalPhase.SOLID>\n", "\n", "Average Atomic weight: 6.940037501798687\n", "Is Naturally Occurring?: True\n", "Is a Heavy Metal Atom?: False\n" ] } ], "source": [ "liElement = elements.bySymbol[\"LI\"]\n", "\n", "print(liElement)\n", "print(\"\")\n", "print(f\"Average Atomic weight: {liElement.standardWeight}\")\n", "print(f\"Is Naturally Occurring?: {liElement.isNaturallyOccurring()}\")\n", "print(f\"Is a Heavy Metal Atom?: {liElement.isHeavyMetal()}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Plotting the Chart of the Nuclides" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAtQAAAHwCAYAAACG+PhNAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAABeRklEQVR4nO3deZwsV13///enZ+3puRCWEAJJSEgCGEMWcokgosgmQiSIrPILkiAxggiKSAIYVgm7ol9BUaKISEDDEhEEZHOFcENCVkM2IAnZIBDu7emZ6Zn+/P6o6p7qmurq6u6q3ub1fDxCV/epOnWq7iScW/PuzzF3FwAAAID+lEY9AAAAAGCSMaEGAAAABsCEGgAAABgAE2oAAABgAEyoAQAAgAEwoQYAAAAGwIQaADows9eb2T+MehxNFvhbM/uRmV2Y8Zi/M7M3Fz22QZiZm9kR4fZfmtkfZdkXAMYFE2oAO5qZ/bqZ7TGzfWZ2i5l91sx+rqBzvcDM/muALn5O0hMkHeTuJxbQ/8i5+xnu/qZRjwMAesGEGsCOZWa/L+lPJb1F0gGSDpH0XkknF3Cu2Ry6eYCk77h7NYe+AAA5YUINYEcys7tLeqOkl7j7x9296u51d/8Xd39lZNd5M/t7M9trZleY2e5IH2ea2XVh25Vm9quRtheY2X+b2Z+Y2Q8lfVTSX0p6ZPg0/McdxnU/M7vAzO40s2vN7EXh5y+U9DeR498QO+6nUvq/h5n9azjOr5vZ4ZHjHmJmXwjPd7WZPSvlnn3FzN4UXtdeM/u8md07bHuMmd0U2/87Zvb4cHvGzF4duV8XmdnBCedoi6iY2SvD3xx838xOi+27YGbvNLPvmdltYVykHLbd28w+bWY/Dq/tP82M/88DUAj+4wJgp3qkpEVJn+iy31MlnSdpP0kXSPp/kbbrJD1a0t0lvUHSP5jZgZH2n5F0vYKn3/+fpDMk/a+7L7v7fh3Od56kmyTdT9IzJL3FzB7r7h+IHf+66EHuflVK/88Jx3cPSddK+mNJMrOKpC9I+kdJ9wn3e6+ZHZVyP35d0qnh/vOS/iBl36jfl/RcSU+WdDdJp0laSTvAzJ4U9v8ESUdKenxsl7dKepCk4yQdIen+ks4O216h4D7ur+D+v1qSZxwrAPSECTWAnepekn7g7htd9vsvd/+Mu29K+pCkY5sN7v5P7v59d2+4+0clXSMpmm3+vrv/ubtvuHut24DCJ7aPkvQqd19190sUPJV+fm+Xts0n3P3C8Fo/rGACKkknKYiQ/G04xoslnS/pmSl9/a27fzu8no9F+urmNyW91t2v9sC33P2HXY55Vni+y8OYy+ubDWZmkk6X9Hvufqe771UQ3XlOuEtd0oGSHhD+5uE/3Z0JNYBCMKEGsFP9UNK9M2Sbb41sr0habB5jZs83s0vCWMGPJR0t6d6R/W/scUz3k9ScHDZ9V8GT10HEr2E53H6ApJ9pjj+8hudJum8ffXVzsIIn+r24n9rv4Xcj2/tLWpJ0UWTs/xZ+LknvUPA0/vNmdr2ZndnjuQEgMybUAHaq/5W0Julp/RxsZg+Q9NeSfkfSvcKIxeWSLLJb/Ilotyek35d0TzPbFfnsEEk3ZxxWr09gb5T0VXffL/LPsrv/do/9SFJVwQRXUpCZ1tbktnmuw+MHdXGLgol40yGR7R9Iqkn66cjY7+7uy5Lk7nvd/RXu/kAFsZ3fN7PH9Xh+AMiECTWAHcnd71KQt/0LM3uamS2Z2ZyZ/bKZvT1DFxUFE9g7JMnMTlXwhDrNbZIOMrP5DmO6UdL/SDrHzBbN7BhJL5SUtRZ2av8JPi3pQWZ2Snjtc2b28PALjr36toKn908xszlJr5W0EGn/G0lvMrMjLXCMmd2rS58fk/QCMzvKzJYktXLj7t5Q8BeaPzGz+0iSmd3fzH4p3D7JzI4IoyF3SdqU1OjjugCgKybUAHYsd3+Xgi/LvVbBxPhGBU+cP5nh2CslvUvBk+7bJD1U0n93OexLkq6QdKuZ/aDDPs+VdKiCp9WfkPQ6d//3buPpof+WMFryRAW54+8riHO8Te0T4UzCv6C8WMHE+WYFT6yjVT/erWCC/HlJP5H0AUnlLn1+VkFZwy8piG98KbbLq8LPv2ZmP5H075IeHLYdGb7fp+DP6L3u/uVerwsAsjC+owEAAAD0jyfUAAAAwACYUAMAAAADYEINAAAADIAJNQAAADAAJtQAAADAALqtEDbW7n3ve/uhhx466mEAAABgyl100UU/cPf9k9omekJ96KGHas+ePaMeBgAAAKacmX23UxuRDwAAAGAATKgBAACAATChBgAAAAbAhBoAAAAYABNqAAAAYABMqAEAAIABMKEGAAAABlDYhNrMzjWz283s8shn7zCz/zOzS83sE2a2X6TtLDO71syuNrNfKmpcAAAAQJ6KfEL9d5KeFPvsC5KOdvdjJH1b0lmSZGZHSXqOpJ8Oj3mvmc0UODYAAAAgF4VNqN39PyTdGfvs8+6+Eb79mqSDwu2TJZ3n7mvufoOkayWdWNTYAAAAgLyMMkN9mqTPhtv3l3RjpO2m8DMAAABgrI1kQm1mr5G0IenDfRx7upntMbM9d9xxR/6DAwAAAHow9Am1mb1A0kmSnufuHn58s6SDI7sdFH62jbu/3913u/vu/fffv9CxAgAAAN0MdUJtZk+S9IeSnuruK5GmCyQ9x8wWzOwwSUdKunCYYwMAAAD6MVtUx2b2EUmPkXRvM7tJ0usUVPVYkPQFM5Okr7n7Ge5+hZl9TNKVCqIgL3H3zaLGBgAAAOTFtlIXk2f37t2+Z8+eUQ8DAAAAU87MLnL33UlthT2hBgAAAHrx0Pf+uar1uiSpMjcnSarW623bzbbLXvzS0QwyARNqAAAAjIXmhDltO+n9qI2yDjUAAAAw8ZhQAwAAYCw0ox3N7eb76HZ8v3FA5AMAAAAj08xNj9skuRdMqAEAADAyzTx0Wk6aDDUAAADQQVqsg8gHAAAAEJNUGm/SMaEGAADA0PRTGo/IBwAAADDFmFADAABgaHrJSZOhBgAAADQdpfHSMKEGAABAoQYtjUeGGgAAADtav7EOIh8AAADYMZLK4U1zzCOKCTUAAAAGlqUcHpEPAAAAoIMscQ0iHwAAAJhK8SocadGNnRrrSMOEGgAAYIcrsgoHkQ8AAAAAqZhQAwAA7HBFZJzJUAMAAGBqJZW4G3eXvfilox5CR0yoAQAAdphhZpyL6H/cEPkAAADYYYqMdaS1DdLHOOMJNQAAwJTotlrhpJW4G+eYRxQTagAAgCkxqtUKi458jDsiHwAAAMAAeEINAAAwZrJEN5ptk/Qkt5vo9UxSNIUJNQAAwJjpJzIxDSYlMx1H5AMAAAAYABNqAACAMZPXioFFr1aYd9m8SUXkAwAAYAw0c9O9TiybueOkfHUemv0VVXJvUmMeUUyoAQAAxkB04pr0eV5tg5SuG7TPac2AE/kAAAAYA+MSu0iLlBRx7mnAE2oAAIARSCqN15T2hHfYk9B4pCSqnzjINEQ84phQAwAAjEDWWES/xw1zpcSs55uWiEcckQ8AAABgAEyoAQAARqDf8nHDzlDnma+elsx0HJEPAACAAsXL4TW307LRSUtwjyI/3U1avjpqGnPTUUyoAQAACpRUDi8pSzzuZfPyaptGRD4AAAAKlDX6MK5l8/Jom3Y8oQYAAMhRWjm8qKRYRxajLqPXybTHOtIwoQYAAMhRP+XjBolP9BoVIdaRPyIfAAAAOSo6dtHpfKOOfOxkPKEGAADIIKlah7S9CkfRqxz2GxUpwk6OeUQxoQYAAMignyocefQxCVU+djoiHwAAAMAAmFADAABkkEeWeJSl64roHwEiHwAAAAni5e/S4hppqxz2E42gNN5kYUINAACQIO9sdNb9il5FkdJ4+SPyAQAAkKCXeEPWWMSwSuOltVEaL388oQYAADtaUjm8TpPapNJ4cUlxjV5jG5TGmyxMqAEAwI6WFKfIK5KRR//jUjYPnRH5AAAAAAbAhBoAAOxo/eSM0/rIo/9xKpuH7oh8AAAAJMgamZg2ZKZ7x4QaAADsaFkyzv300Uv/45qhRjZEPgAAwI7WKRbRbx9Z+x/XyAd6xxNqAAAwdeKrHErtJeySdCttFy9/16msXaeVEnspnTfslRKJeQymsAm1mZ0r6SRJt7v70eFn95T0UUmHSvqOpGe5+4/MzCS9R9KTJa1IeoG7f7OosQEAgOnWb/651+hGt/4nbaVE9KfIyMffSXpS7LMzJX3R3Y+U9MXwvST9sqQjw39Ol/S+AscFAACmXL+RhiJWK+ynj7zOnTXygcEU9oTa3f/DzA6NfXyypMeE2x+U9BVJrwo//3t3d0lfM7P9zOxAd7+lqPEBAIDJlhbrSJO0CmE8DhJ/H++zl0hG0gqLWY9JOnceiHjka9gZ6gMik+RbJR0Qbt9f0o2R/W4KP2NCDQAAEuW9EmC/kYnoZ6NcyXCQ8WMwI6vyET6N9l6PM7PTzWyPme254447ChgZAAAAkN2wJ9S3mdmBkhS+3h5+frOkgyP7HRR+to27v9/dd7v77v3337/QwQIAgPFVRGm5rP13Gkte/RddNg/5Gnbk4wJJvyHpreHrpyKf/46ZnSfpZyTdRX4aAAD0m5PuV1LeOemc/ZbGy9p/EWXzyE0Xp8iyeR9R8AXEe5vZTZJep2Ai/TEze6Gk70p6Vrj7ZxSUzLtWQdm8U4saFwAAmBzjlDPOkpPO2mcv/Wc9Nysgjk6RVT6e26HpcQn7uqSXFDUWAAAwmdIqcjS3i2hLWgAmbWGX6L5Zxtyt/17O3cu1oRislAgAAMZKM+YxSZPA+IS3l2Oa21nbekHMYziYUAMAgLGSd7xhJ5bG63QNKMbIyuYBAAAkGVa1i7S2QSp5ZO1/GNeG4eAJNQAAGLphV+/oRy+xi6xVPopGxGM0mFADAICh66cSxigjH71EK8bl2jA8RD4AAACAATChBgAAQzdOOeM8zj0u14bRIPIBAACGYtLK4fWyWmE/ZfPyQm569JhQAwCAoei17Nw4Zaj7GX8v/VMab7IR+QAAAEMxrqXlJr1sHkaPJ9QAAEDS9khG0tPPpBJ3vSyDnae0SMagbfElwKPbSdGOYZbNI+IxfphQAwAASZ0jDUn7JO03ytJ13cbfT1uR10NpvOlC5AMAAAAYABNqAAAgqXNGOL7PuJSuSxt/Hm29lMZLa8s7Q43xQ+QDAIAdKmn573GQNY8czzhnNehxnSblReanyU2PNybUAADsUL2UjMuy3yTmq0dV8o7SeNOFyAcAADtU1shB/JiiIx/9jDGvknd5nLuIyAfGG0+oAQDYQSZhtcKi4xP9Kro0HrGOycWEGgCAHaSf6EPW/cYlFjEupfH63Q+Th8gHAAA7SD+Rg/jx41LlY9iRj7S2PK4Nk4sn1AAATLikah3DWq1wnBx21oWq7V1VedeirnzNMWoszEiSSmubUsq1X/xrfyN5VbKKjj//N4dWyYOIx/RgQg0AwIQb5mp84xz5qO1dlSTV9q62JtOS1FiYSY98eDV449WRRUMw2Yh8AAAAAANgQg0AwIQrMsecdK5xzVCXdy1Kksq7FoOYR6i0tpmeXbZK8MYqQ702TA8iHwAAjKks2eheJ2ZpkYZJmOR969fObeWdjz3/tLa2G845sXUdjcj1ReMfSdJy03kjNz2dmFADADCmsuRv81xxr9Nku5/+C8tQJ+Sdux0X/WzYJfvITe8MRD4AABhTRcQispxvnCMfSfGMpOOyXlte9zVrG6YTT6gBABihbrEOtDv2/NM63p94HGRcygUS85h+TKgBABihIlfcm8rIR1pbH3GQYUY+ML2IfAAAAAAD4Ak1AAAFi8c6eGpZjGp9TpW5uqr10UY8on/Go46bYDiYUAMAUDAqPQzHsR9/4aiHIInM9E5E5AMAAAAYABNqAAAKlla6rpe2vMu7dRrnIGXzeh1/Udc2ynNj5yHyAQBAn5rZ6Gi5O2l7TnqUk6y0Khb9Zn2j1UCSJtE7sewfMY+djQk1AAB9yrL6Xvx93uXkBlmZr5eVBgcZY79tw1itcBjlCDH9iHwAANCnfqMP/bb1E03IMv5ufXY6ppc+pj3ygZ2NJ9QAAMREoxzxp4+dIg1FPD3NQ1KsI+nadmJMo1fEOtAJE2oAAGLSVgxMaivqV/55Rz66jbnXlRKLHte4Rj6AOCIfAAAAwACYUAMAENNrabmiohLDLu9GhpqcNPpD5AMAsCPEl/+WkrPE0fZu4uXj0srmZW2L9tftfINKOncvxzHJBAJMqAEAO0IvJeJ6zVB367OIDO+wxhE3LjnmUd9/IIrIBwBgR+g3+pC1LS0e0Etb1sjBMGMR8fOOMnZB5APjiCfUAICp9cD3vDvx816eUHeKaxRd2SMpUpF31KLfsnl5R0/GCaXx0A8m1AAApBj3KARl84prA7Ii8gEAQIqsv/bPK/KR1pZ3LKLTNXSLfPQ6xn7bRhH5APrBE2oAwNhLq9DR3E5qi0Ykkip5dGqLSopFZDmuCJ2iFv1GN7L2H5d39GTUiHlgUEyoAQBjb9BIQL/xhrSxjDqOkHclkk59pH0+bVU+gH4R+QAAAAAGwIQaADD28sjiduqz19hCp+OGkaHOMo54W7954bQM9ShL1xXRBgyKyAcAYCxFS97FJz29RhomKfLRKaPdyzjTxnjtcz4keVWyio4475S29n7y4Wn3fFwnq2SmkTcm1ACAsbeTSptlnej3+xcCebX9tcc++81hj2uGGsgDkQ8AwNjbSb+i7zWS0fP9sEr7a4999hsbGafIB5A3nlADAHLXLHPXS4m7pLYsovGEeFSh37J5/bRlGUcvbd2epNb2rUoLM63XKNuUfCZ4NZca4f/blzakh/7LqarW11WZm1dlzjre86yxj/j4m2Mft4krMQ8UiQk1ACB3SZPCQUqZDbO0XL9tw44jNMJJdCM2mZaCyXTz1aPHzErV+np4rnWV6lu/qM46jkkvmwcUgcgHAAAAMACeUAMABpa0kiGKZaub8sWZ1mtb24bLZ022ETyf9llrfa45Sab2R9cTqlvchJ9JDMtIJtRm9nuSflPBv86XSTpV0oGSzpN0L0kXSTrF3ddHMT4AQG+oojB897qyOYme0Q8e1t52xO4bW9vXXHJIa9tnTaW6DWF0w0EuGuNi6JEPM7u/pN+VtNvdj5Y0I+k5kt4m6U/c/QhJP5L0wmGPDQAAAOjVqDLUs5LKZjYraUnSLZIeK+mfw/YPSnraaIYGAOhVHiv65dW2UywtzrW9RpVnFtpeO8ladi5+zLislAiMi6FHPtz9ZjN7p6TvSapJ+ryCiMeP3X0j3O0mSfcf9tgAYCdKKnEnZcukdiqPFi0tl/R5tI+828axbN7RZ1+s2t5VSVJ516Iuf+Pxrbbj/vZHqlXXgrbKgi49ZT81Zk2lDdcRF85qpRakH5fK87r24RtqzJlKdQ9y0M1/YkqnbapU3VSpsim9pL2t05/NuCLWgUkw9Am1md1D0smSDpP0Y0n/JOlJPRx/uqTTJemQQw7psjcAoJthlYEbRpm5cS2b15xMx7cltSbTze1G+AXCxqy1JtOStFJbV2MuLJU3Z1pZDc7RfE3qM9r3IOPPul/RPzPAuBpF5OPxkm5w9zvcvS7p45IeJWm/MAIiSQdJujnpYHd/v7vvdvfd+++//3BGDABTbFx+fZ9XW6drS7vuPNrSxlHetZi4LQVPpaPbpbAyR2nDtVSeb7UtleeDJ9OSSnVPj3yEfUb7Thr/JEQ+gEkwiiof35P0CDNbUhD5eJykPZK+LOkZCip9/IakT41gbAAw9dJK3PX7dLlTP936KHrCFB1Hp0hGt7a0PpM+T2qLRjzirnnRsqob9wjOPVtXY2PrCfW3f3ZTwXf3JWlTzXxHY870w2Okal2qzEkVtY//mpc/oHVdlch4kq57XBDtwCQbRYb662b2z5K+KWlD0sWS3i/pXyWdZ2ZvDj/7wLDHBgA7QRG/Xu+3nyLjJr2McZQxg+rGXOJ21+MGvHdp+44y8gFMopHUoXb310l6Xezj6yWdOILhAAAAAH1j6XEA2GGKKEOWlh/OclxRGeqsY+x3/HmozNYTt7sel2PGvNtx8WOG+ecGTAKWHgeAKdWpHF6aeJa4eVy8j6S2pH6y9DEsSbnvLOOP37dmCbx4+buHvuHSttJ4l73umFbb/a6f18pa0M/SwpxuOXRdXpKsIR34v+VIabyyrn34Ruu44957S1tJvUtefGBP15p0fWnXPUxkpjFNmFADwJRK+2JdfJ+k/aahbF6afsvHNSfN28rfpZTGa06mm9se/n7YS9pWGi/6y+N4Sb1ex592PeNUNg+YdEQ+AGBKjWMJtCL7j15zFln7j2uWvdtW/i6lNN7SwlzbtjWCbWtoW2m8tj5jJfWyjj9pv6RrG2XkA5gmPKEGgCkRL4c3jk+ohzGR6hQ36ec+JJXU61QC77Kzj9latdDb277/wDU1G3+srW0vKYx4NJ9vbbQdF494ZI1rDDPWQXQDYEINAFNjEn6lPqo4SC/1pLOOeRvrsN29MbOkMQ8jStPPfsBOQuQDAKbEJFRNGEbko1PcpNNYsjztTdrexjtsd2/MbFRRmvgYiG4A7XhCDQATLF7JIyop+pC0T7Mtvl/ebcOafGV5iitJh511Yataxw3nnNg2/sPPvkS1fasqLwdZ6Nq+sHrH8qLsQYeptrKu8tK8Lnt6WY05U6nu8pLJw0UNraHWtiTJLRIHMVXmk/9sstzXYSHKAWTHhBoAJli/lSqy7DftVT6i1TrifTYn0M3X1jH7VmUrQVWO2sq6GnNLkoKlwKPaJtPStsRHP39uo7yvANIR+QAAAAAG0HVCbWb3MbNfNbOXmNlpZnaimTERB4AxMC4l0LLmmIeRoc4qWv4u3kcz6lFeXmxtt94vBaXtykvzKtWDPHSp7rLNrb6j25K2Ragn4b4CyK5j5MPMflHSmZLuKeliSbdLWpT0NEmHm9k/S3qXu/9kCOMEAEh64Hve3fY+OvHppcJFp/3iE6loNKGXtn7261enMnfxPHL8PrSVv4v1ceuLjld1ra7Kwpxuv+9623H3fOCaqhszqszOqHF9kOXoFvloZqbj24Pc1zyQkwbykZahfrKkF7n79+INZjYr6SRJT5B0fkFjAwB0kbX0W6/9JfVZZL43z6xv1i8lpqmGKxtW1xL631iPvC5sa0/sr8D7Sk4aGL2O0Q13f2XSZDps23D3T7o7k2kAGKG8f0WfFp/opa3TuIYRTegUi+hFJVzZsLKQ0P/sfNtrpv4KuK/EOoDxkRb5+F9Jr3H3LyW0fdHdH1foyAAAktJL40VFYw1p5dfix8T3S+qz17Z+9utX/LqjT6hLa5vSwoxq+1ZVWV5s2++Ar0krq+taWpzXd45Zb8U2SnXXwl2uzVXXwqJL920/390+tazZ1bqWFue09rBspQO7xVKSrqeo+0XMA8hfWuTjEEn/z8w+I+ksd4/+l/iexQ4LANCUFuugbF562bzGwkzrNX7cymrwTcGV1fW2DHRjzrSyWg/btt+7aFu13vsYo5+N8r4CyE9atY7bJO2WtJ+kr5vZgyNt/S/zBAAAAEyR1PJ37r7i7r8p6c2SvmBmZ4RNlnIYACBHveaF4+/jbfHjh1XWruj+k+5RaW2z9Rrfb2kxyEAvLW6Vv5OCyMfS4lzYtv2+RdvyzHn3e92D3B8A+ci0UqK7f9zMvi7p78zsyZKWix0WAOxczcy0tD0THDdo5GPaRSMf93nFf7cm2I2FGd32zuNV3ZhTZXZFjY2tiWZjznTn0VK1Lq3OSRW1Z7TveFjQVulhbpqUbx8UWWhgfKRNqG+PvnH3myU9wcxeKemJhY4KAHawYU6Gpz1DHdWcTDe3q+EkurqxfYLb619Uhn1tO/kvScA4Siub96QOn7/D3ReT2gAAgxvmr+inPfIR1Xxa3dyuzIZPjGc7Vz8Zt2tLi5EAGJ20snmvlfRed7+zQ/tjJS25+6eLGhwA7BRppfGacYFobCCpTepcti3aNk6iT4LTJomHnXWhantXJQVLhd9wzomttiPfca1q+9aCtuUFXfKyQyQzyV2SBd/6cemGcx4efC5J7rJ1k0rSyvrctsdLaePKOuYiEPMAxlNa5OMySf9iZquSvinpDgVLjx8p6ThJ/y7pLUUPEAB2gn6z0NNSNq/bcc3JtCTV9q62XXdzMi0p2G5Omi3y/Xlr/U+rzZtz69Sv52e7hlHHWQCMVlrk41Pu/ihJZ0i6QtKMpJ9I+gdJJ7r777n7HcMZJgBMt2FWi4gfP8rIR9K1JB1X3rWVNCzvWmxvW95a/ru8vBA+mVbw2ize4dr6PGyzRrDZfO0ky5hHHWcBMFpdq3y4+zWSrhnCWABgqj3wPe/u2BadLGV9whv9LOlpZpYJWLcnolkncVnGPMgT6hvOObFj5ZNz/+dzkleDna2i3R96oRoLMyqtN7Zy012eUEf7TIrLRMVjNkUi4gFMhkxl8wAAxUorjTfKPselLF/q+ZqT6XA7Wipv0P7HqcoHgPHVJTkGAAAAIA0TagAYA0WUQ8uSoe63j2GXcEvNElulbTu6OmK//Y9L2TwAk6Fr5MPM3q5g6fGapH+TdIyk33P3fyh4bAAw8aLl8NJyulnjGd2yvvG2bvLoI+u4KnNz28rfXf7G41t9HHPO1artC9uWF3X5HzxIjTlTqe46+KubWqltSJKWyiXN3VVXbWVd5aV5ndA4vVWpwxrS0nIwlvLyomor660+GnORDLWGm4VOQ04amHxZMtRPdPc/NLNflfQdSU+X9B8Kqn0AAFLknUHOI8Obdb8iyubFy99FNSfTze3mBLgxZ1qprbfaVmrrml8J3tdW1uWlrQmxl2L3PNJHXJ4l78hJAztblshHc9L9FEn/5O53FTgeAJgqef9qP49YQby/YZbNi5e/iyovL7Ztl+pBmbtS3bVUnm+1LZXnVV4K3peX5tvK3lmj/Z5H+0i69jyvjVgHsHNleUL9aTP7PwWRj982s/0lrXY5BgCmVjPGIXWPTESfguYxeerlCWnWsnlpJfvybEsqf1eJ7P/xb13YVv7u2POPDqIbS/O68fFStT4bHjejan1O0taktNnn0kL7NZeX5lt9RMdSxJ9NHFEOYOfIUof6zDBHfZe7b5pZVdLJxQ8NAMZTEXWWi5A1l522X95tqfcgVv4uSx9d+8xwTL/H9dI/gOmWtcrHQyQ928yeL+kZkp5Y3JAAYLz18qv9rMcVOc5uVT7SxpF3W+o9iFXryOO+5tnHIP0DmG5Zqnx8SNLhki6R1KxB5JL+vrhhAcB4iVbrSJMUfYhX8kjaLypeMSPanqWaRrStV0n9d2prVuwo71rcFuU4/OxLVNu3qvLyoq5743GtMd7926a5teALgksLplsPXpdmpVqtrieceZpWVoMvGy4tzksP3zr34R/dp1r4RcTy0ryue/bywFGN6J9Nv4h1AJCyZah3SzrK3bd/owMAdoh+q3VkjV1k7TPvKh9pY+zWf7NKR23v6vZqHmHFjtq+1bb+m5NpSVpZq6sR/r9QY1atybQUbFfrttXfylZbbWW9v0hJD9fWTxuAnStL5ONySfcteiAAAADAJMoyob63pCvN7HNmdkHzn6IHBgDjpN8SaP3kpIvM+qadr9fSb82yd+Vdi9vbwhJ45eXFtj6iVTiWFuZUCtZqUWkjjHk02xbn245rVumQgshHHhnnPNoAQJKsW5LDzH4h6XN3/2ohI+rB7t27fc+ePaMeBoAp1EtpvDzaRhkf6JTzLsK1z/lQW2m8I847Zds44tu9tknb73mef27kpoGdycwucvfdSW1ZyuZ91cwO0NbXQy5099vzHCAAjJudlMXtJ+fdt1hpvKRxJI1l0Fx5EX9uANDUNfJhZs+SdKGkZ0p6lqSvm9kzih4YAIxSEfGAcY0ODHUcsdJ4SeNIGsugMZg8/9wAIC5LlY/XSHp486l0uFLiv0v65yIHBgDDlrU0Xpqksnmd2pK2pXzL5iW1xc8VLX93+RuPb2s/+uyLO7Yd8+arWqXxNk58SFtZu7V7zGlldV1Li/Myl1Zq61oqz+vxl52mldVgHEuLc9JDOz/xTStrl0fJu6yIeADoJsuEuhSLePxQ2ReEAYCJ0W/5uE5taX1mGUcv5+s3UhIvfxeX2hYpjbcRK2u3Wg6+n7Oyui5rBJ+v1Na1YVv/99GcWOd5bUVHPgAgSZYJ9b+Z2eckfSR8/2xJny1uSAAAAMDk6Pqk2d1fKemvJB0T/vN+d//DogcGAMNWRBa6n3zyoHnhbm3x/qLl7+JS2yKl8eJl7Zol8JYW57VUDrfL80HMIxTdzuvaishQA0A3WZYef5u7v0rSxxM+A4CJ0q0cXj/SYh1pbZ3K1Q078tFcNrwyNyfF9k1r+/jle4JKHVbRsecf01rZcOu+llSZs/D8JUkb4bU2r1+qaHhZ6F6QmwbQiyyRjydIik+efznhMwAYe0WWWOt3LKPO6fb7F4JW2TuvDvUvBMPsHwCy6Bj5MLPfNrPLJD3YzC4N/7nMzG6QdOnwhggA+SkiHjDoWEb9dLbvyEqz7J1VMkcmiox1pLVRGg9AkdKeUP+jgi8fniPpzMjne939zkJHBQA5yqMcXlRSabyk6IbU3yqEeZTGi7c1S+NJ0nXvfoRk1jrf4a+7tFX+7vJXPUSNuaCtVHcd8qUNrdTqWiqbrn60t7U96fdPbZXD06Par+Hgf9/USm1DS+WSbnz8eMY6ooh4ABhExwm1u98l6S5JzzWzYyU9Omz6T0lMqAFMjF7L4eVZGq+ftiJiEW1l7yKTaam9/F1zwixJjTnTSi0oh7dSW1cjMiGOt1XrW7/wrNbrWqltRNpmBx7/oG2UxgNQpCwrJf6upA9Luk/4zz+YGX+VBzAx8o4HpPWfR1sR0Ye2Kh3ubeeLVuso1bfaSnVvq9CR1hY/d1Jb0bGOtDYqeQAoUpYvJf6mpJ9xD759YmZvk/S/kv68yIEBQC/isY549KFXSbGOpngko+8v9UVEoyFpffRyPdHj4qscRl3zloeqWl9XZW5ejZWN1ueNOdP3fmlW1bqrMjercqTP8tK8bnnUViWPuBsfP6NqfVaVuZm+xz9M8eovREAA9CLLhNokbUbeb4afAcDY6DU+kdbWbzyjV0l9duu/n0hD93Gst15LsV9c5nlf+x0/VT4AjLssE+q/lfR1M/tE+P5pkj5Q2IgAAACACZKaoTazkqSvSTpVwRcR75R0qrv/afFDA4Ds8s7b9tJ/nmPu1n8Rpd8qc/Ntr93GmOXcw8yH55GhzuvPFMDOlPqE2t0bZvYX7n68pG8OaUwA0FXSiof96JTvjeek00Tzz/2WzYuO45hzrm5V3igvL+q6s4/umD8+/KP7VFtZV3lpXqXqmmrVteC4yoIaC3NbbT/4cavtmpcdosZ8qTW+A/5LWlk1LS2arn9Ye/8HfaWhldqmlsozuukxXb/H3vX+TIpoJp88NYBuskQ+vmhmvybp4+6xr4YDwIgUkZXNOy+c1mdaW3My3dxOG1dtJcg/11bWVQonzJJUq66psemJbc3JdLOPldVgv5XVdcW/ItNeGm8m03WPKic9jHMDQJIsjxt+S9I/SVo3s73hPz8peFwAkCqveEBSn3nFG+J9Zm1rlrFrbqedu7wUxDTKS/MqVxa2jqssdGwrrTfa+lhaDEvcLW6PfKSVxis6djHKyEfanxsAxHV9Qu3uu4YxEADoJu8VD6XkSEavTzPTxpO1bF5UM+LRHF/qvs9ejoxhOaGcXzl8f7eOY7jtZ6Rq3VSZkxTb5abHlFStz6gyV0otJZjWNgmIdQAYRJbIh8zs6ZJ+TpJL+k93/+QgJzWz/ST9jaSjwz5Pk3S1pI9KOlTSdyQ9y91/NMh5AEyXYZVwy2tc/R5XZJyl0xg67dNPZKVT26REPgCgV1lWSnyvpDMkXSbpcklnmNlfDHje90j6N3d/iKRjJV0l6UxJX3T3IyV9MXwPAAAAjLUsT6gfK+mnml9INLMPSrqi3xOa2d0l/bykF0iSu68ryGefLOkx4W4flPQVSa/q9zwApkMRMY9xZ5uSz2xtW8PVmDOV6i43k4f/5bYNyRoNNeZLQS66ZGrMBl8qLG14a1sKctOtLyO6S7a9rW2fHWASq48AGE9ZJtTXSjpE0nfD9weHn/XrMEl3SPpbMztW0kWSXibpAHe/JdznVkkHDHAOAFOi3zjFJLvPN9oLKt398jtb2ysP3K+trfKt77e2Nw66d1vbdc8st7aPfOd1re2rX31k235pbdOM3DSAvGR5FLFL0lVm9hUz+7KkKyXdzcwuMLML+jjnrKSHSXpfWN+6qli8I3wanliiz8xON7M9Zrbnjjvu6OP0AAAAQH6yPKE+O+dz3iTpJnf/evj+nxVMqG8zswPd/RYzO1DS7UkHu/v7Jb1fknbv3k1dbGDKxX8tH99u7pNnW7dFWJIWfRl0YZeopcX5sCZ0sF2uzKtWXVe5Mi+V51u1oZfKQTm8WnVN5cqCNpbmW3Wpy0vzbecuLy+oti+oRR2NdkTbyssLqdeWx30dtI+8zw0AechSNu+reZ7Q3W81sxvN7MHufrWkxyl46n2lpN+Q9Nbw9VN5nhfA+IrnpEc16YnGS+ITyyzVI5KOS9PpLwtfeOu5kleDnayiI847JXJMqbXASmWupOoj7xPrb6a1HXXNKw7vWI2j2TaNk0xiHQCGIVPZvAK8VNKHzWxe0vWSTlUQP/mYmb1QQV77WSMaG4AhG1b5uGGU1Ms65uhn2/ZrTqal9u0+x9xt/MMqRzjOf24AMIiRTKjd/RJJuxOaHjfkoQAYA0lPapufN98PIx4QnYClPaHOci3d+u903ZW5OckqbU+o0/pPG3On6+405lHFLopuA4CidZxQm9kX3f1xZvY2d6d8HYDcNCMe0nBiHfEoRyed8s7dJtNpxx199sWq7V1VedeiLn/j8W3H1fatSgszqu1bVWNhpnXMv60saNM3JUkzttDW/5Hn/lC1apCFLlcWdM1p9xpo8jiNk05iHgCGLe0J9YFm9rOSnmpm50myaKO7f7PQkQGYWoNGAvqNAGTts9eoQKfIhCTV9q62vUY1J9HN16ZNX2nbjvbfnExLUq261ldcI+t+kx75AIBhSZtQny3pjyQdJOndsTZXsOALAAAAsKN1rEPt7v/s7r8s6e3u/ouxf5hMA+hbNGZQmZtrvY9up7Vl3S9+rqzn7jUGkXZceddi22tUaW2z7bVpxpbatqP9lysLW31XFvq6J/Gx93tfi/5zy+PPGwCGIUvZvDeZ2VMVLBcuSV9x908XOywAky6ek45/eS5vWaIP8fdp2/1+KTHuhnNO3Mo4x/ooLy+qWq+rvLyow866sJW1ftJz1iQPIyI2o+iXWJqZ6ea4BjWpX94jJw1gnHSdUJvZOZJOlPTh8KOXmdnPuvurCx0ZgIlWZMZ2GLnZXjLVaRnqrG1tWetY2bx+ctJ5jH8SMtQAMA6yLD3+FElPcPdz3f1cSU+SdFKxwwIw6QaNXaS1DePX/L30nxb5yNrWFg2JlsqzSu6xiLRxTErkAwDGSdY61PtJujPcvnsxQwEwaZLK33WKEDRjE0VNhuL9Z61ZnFTHOf55tV5XaW1TjYWZ1mtUNK4RL413+BsuU23fqsrLi7r8Dx7UWvK7tN5QbX1Nmi+pVl3TlX90nBpzplLd9ZzrDlBtMyyNN7OgIo3z5JRYB4BJkWVCfY6ki83sywpK5/28pDMLHRWAiTCq8mhFRQfS4hmdStxJ6aXxavtWW6/NybSkzttz1ppMS1Jts7/SeNNUNg8Axl3XyIe7f0TSIyR9XNL5kh7p7h8temAAxt8wKjYMMzqQFs/oVJFDSq/kUV5ebL2W1htb/a03Wu9L6w2V6h5s173tqXR5pr9KHlkjH+P85wYAkyJT5MPdb5F0QcFjATABmjGPQSY8RX4psde2pJhHUpUPhe+blTmiopU8KpHzVObm9PFLvxF8ydAqOuK8h7SOSXtCfdUlD0iMpRQRmRmnKh9EPABMqqwZagCQlH9FiFHr9Xp6qeRRrde3KnZEK3dkHNMg45r0Kh8AMEmyVPkAAAAA0EHqhNrMZszs/4Y1GADjL49M7TjpNK5exp+aA26WwIuWwss4pl7H1U+Gutv4h5mhBoBJlRr5cPdNM7vazA5x9+8Na1AARquXcnj92JZPjvXfrS3rfkltzRJ3ktrK3FXrdR199sWJ5e+q9bqOedMVrfJ3l736p+VhsQ/blI5459VbbS9/YOu/rLVaXUd++BT5TLCfYgVCSuuuxry1XpuOfO/3VauGZfMqC7rmxfcbq6zzIMhJA5hGWTLU95B0hZldKKkVAnT3pxY2KgAjlSUDW3TetqhzR0vbxcvcZS1/55GJsc/ESuNF/qsa3fbt1fZak+joZFpSazLd3C46tz6qDDUATIssE+o/KnwUAMZKlifIgzwlHrRtkD7KuxbbnlBHNds6lb9rPoW2TbU9oY62lTa2JtKlDclNrSfU8Ul1pyfU5cpC2xPqaCWSPO5rXN79d/tzA4Bp03VC7e5fNbMHSDrS3f/dzJa07ReXACZdHuXwsoo/tcx6zrTycVlLyzVL3ElqK3NXmZtLL393xTdb5e+OPf+4VtvS4pyuPevBreMakWtrzG5NVpcW57ZNaDs9oW5GPJrnLrJs3jD+vIl5AJh2XSfUZvYiSadLuqekwyXdX9JfSnpcsUMDMEyDlo8bJH4wbmXbEveLlL/rJdLQ6drSDLMk3bBjPAAwjbKUzXuJpEdJ+okkufs1ku5T5KAAAACASZElQ73m7utmwa8kzWxWkhc6KgCFS6rkMS1Ka5tqLMxs205smy9JZpK7SnVXY76k0npDjTkLPpckd63U5rRUrmultv1eWUPy0tZr1rZp0i13DwDTLMuE+qtm9mpJZTN7gqQXS/qXYocFoGjT/Gv5w868sO39dX/yyNb24W+4rK3tmrccF2yY6Yi3Xd36/Nt/tLVMuMx08mn/39b7p7Wfb78rtzLQPzq6/XnD3a7darvrQdP7LIKcNICdLMvzkjMl3SHpMkm/Jekzkl5b5KAAAACASdF1Qu3uDUkflPQmSW+Q9EF3n97HLMAOUfSqd3mtoph1JcOoaNm7baXxlhfbtktrm5KC+Eezrby8qNJ6o7Vfab2h8tJ80Ba+Ri0tzrW9trUtzLW9ZlHkaoVJ5yrizw0AdpIsVT6eoqCqx3WSTNJhZvZb7v7ZogcHIF/DLI1XtLTISnSVw7hLX/OQrW+BmFrbjYUZfevlh259Fqlk15gv6btPmVO1bsG9i53vxz/lqtZd63MuxdIztzxgXZJ0l9a3jSVa/zlLJY4i/uwG6ZOYBwAEsmSo3yXpF939Wkkys8Ml/askJtTAhMmzPN2wSqz1U3YulXfYlrYm0aZt0sbR7xhH9eeR9zgAYKfLkqHe25xMh66XtLeg8QAoUJ7RgVFHPvpmHbalrQl2QqitiDH2E2fJO/IRH0cv/QMAAh2fUJvZ08PNPWb2GUkfU/B/M8+U9I0hjA1AH+KxjnEsjRd/8vnQN1zaWvL7stcd07bvYa/e02q78tVHq7Ewo9Lapo7+dF21lSBGUV6a1+W/Mt8qeXf4xfNaqQVtS+V5XXf8eqvtvjcuamUtXOVwYU7fP3wrilFab6ixUGq9RtWqa9J8qfWa533o9rQ3Gg3JW7c+iXUAQHdpkY9fiWzfJukXwu07JJULGxGAgYzbqoPd2iSptne17bVTW7N+dGNhRrWV6tY+K+tqzAdfKGzMl1qTaUlaqQWT6Vbb2tb5o9tBv6W217a2SB9xecdShnHPe+0DANBZxwm1u586zIEAAAAAk6jr7y3N7DAze7eZfdzMLmj+M4zBAehd0RnnInLSzdJ28RJ38ba2EneR8nXlpflWmbvSekNL5a22pXKsLVK+Ll7KrrTWaHtta4v0EZd3znucyuYBALqzbiWlzexbkj6gYGGX1v+TuPtXix1ad7t37/Y9e/aMehjA0EWXDY+Kl1+Lvo9mqjvlq7OUcBu20lqYa15rzzUvza5rZWN+2/Y0SiuvF89XZ/nzjh8f3Y/MNAAkM7OL3H13UluWsnmr7v5nOY8JwAA6TXiHUWJt2DrlmqMT6GmeTEv9lezLI0MNAMgmy1fV32NmrzOzR5rZw5r/FD4yAB2l/fq+0/tBIhmj1CmGsTS7nrg9jbKW7Msj8gEA6F2WJ9QPlXSKpMdqK/Lh4XsAQ5J1lcM8SqxliYokRQwe+EcXt0rcXf+m49vbXntRx9J4x35kr2rVdZUr87rsGctqzAUFokt11+EXz2mlth6UvzthQ405U6nu2v/yilZWw/J3ixV99+itp6vHve/WoLydpHJlQZf89n07XltSRKZbtKLftvj9it7LblGdLG1ZEe0AgHxlmVA/U9ID3X26HwEBYy5LabZhrmSYtF+0xF1839TSeNX11mtzMi1JjTlrlcBbqa2rMTez9flqpPzdavtYmpPp+HbWa+11NcS8IxlFl80j2gEA+coS+bhc0n4FjwNAF3lUbCi6yke0IkdaW1y5Mt96LdW3vihdqnurYsdSeautVHctLUaqdSy2349yZSFxuzmWpO3o+27Rin7bssZsiq7yQbQDAPKVpcrHVyQdo2B1xNajHnd/aqEjy4AqH5hm0Uoe/VbhSItuTOJTyrRoxSjPnWccpJdqHd3aiHUAQH4GrfLxupzHAyCDrL/2L7qPcZL3ioR5nTvPNmIdADB5uk6ox6HeNAAAADCusqyUuNfMfhL+s2pmm2b2k2EMDtjJ8ihrNyml8bIa5fiHla8mJw0AkyfLE+pdzW0zM0knS3pEkYMCdqpeS+Nl/bxbH73kdKPl765/8wlt+x157g9Vq66pXFnQpc+9mxrhf2FKG9IDLp/Tyuq6lhbndcNx7WN84EVbbdef0N527Ie3Sup963m72sbSrfxdv22d7kHavevWf9qYBy1z2ERmGgBGI0uGusWDbzB+0sxeJ+nMYoYE7Fx5rnqXdtwgOd3U0nhhibpada01mZakxqy0shqWv1tdl7RVGm/rs+S2aEm9fsfca1tROeZhnhsAMDxZIh9Pj/zzDDN7q6TthWQBDKyfX+0nHZ/0Pq9YQWppvLBEXbmyoNLG1jhKG9LSYlj+bnH7MuFpbdGSemnXlmdbUbGLos8NABiNLE+ofyWyvSHpOwpiHwAGlFQar2mYVT56aWtb5TDWds2pZVU37qbKbF2NyIS6Masw5mGSto//tkdI1bqpMre9uRnzmFTxGEnWtk6IdQDA+MmSoT51GAMBdqK8y9qN+tf+1Y25ttfMx/VRDm/SIh9FxHEAAOOh44TazM5OOc7d/U0FjAcAAACYKGkZ6mrCP5L0QkmvKnhcwI6Qdx521Dnaymy97TXzcTmUBMyzLc8Mda9t5KQBYPJ0fELt7u9qbpvZLkkvk3SqpPMkvavTcQDapS0hniZrWbteRDO7R77rOtX2BVU5yssLuuYVh7faDj/7EtX2rYZti7IjHqDayrrKS8EXA5vblz2zosZsUJWjtOG6xzVLWlita2lxSdUj28d31OcareO+e9JCTyXp4iXnul1bv/dkXJCTBoDJkpqhNrN7Svp9Sc+T9EFJD3P3Hw1jYMC0KOKLgVnOldZerddbk2lJqu1bi7WtRtpWZSth6bqVrdJ1tZV1NWaXW+8bs6aV1aCP5mtULdJHtb71y7FhZpCzHjdOGWoAwPjrGPkws3dI+oakvZIe6u6vZzIN9K7o6ECnc6W1V+bmVF5eaH1eXl6ItS1G2hZbT6bLS/Nt26UNb+1X2nAtLQZ9NF+josflURYur9J1/cQuio58AAAmS9oT6ldIWpP0WkmvCRZJlBTUvXJ3v1vBYwMmVtYVD9P08wQzGo1IikhEn0Jf8ruHtB8cabv0j3460xij5yiX53VTGPO4M6E0XhDzKA10T4qIdUTvybAns0Q7AGA6pGWouy76AiDZKFe966cE3aDnynK+PO9JEbGLvMfY67kBAJOLSTNQgDwiAHmcu2hp0YpO+w6jEsagcZBhRT4AANMhy0qJADLII+YR1U+1i2hbtV7XMeevtr4MWF6a16W/tpWNPu5vfqhaNazyUVnQ5v67WlU4vFRqO272jp+oVl1TubKg6lH7a6W2rqXyvL7/6PbzH3funa39rvmtAzpW8hi2tFhH0VU+iHUAwPRjQg3kJCkuMIzoQKf9pO1VOaStCXVzMt3c3qwstPbzUqntuNlw31p1TSu1oM+V2rqq9Zm2c9ci+xUZ18gj1lFE/8Q6AGBnIvIBAAAADIAJNZCTYZZf6zaGpmapuvi2FMQ8ottJpfFa78N9y5UFLZWDtqXy9vJ30f2KLi03aE46z/7JSQPAzkbkA+hT0gqIeeq3bF5UkJleTNz3mjPupupGOBGcrYfb0Un31n8eKnMHxPLbM5I2pfpm27kvOe2eW4fHxhy9R/HrydrWL3LSAIAijWxCbWYzkvZIutndTzKzwxQsa34vSRdJOsXd19P6AEZpmFncIjQn0/HtxH1zKMWX1kevbcNerZCcNAAgzSgjHy+TdFXk/dsk/Ym7HyHpR5JeOJJRARkVHR0ofPyz9cTtxH1zGFdaH1nbhlXWjlgHAKAXI3lCbWYHSXqKpD+W9PsWLMP4WEm/Hu7yQUmvl/S+UYwP6CTv0ni9iMYWspTNS3Lch7bK323cY0m1lRVJQU76hl/d6v+wT7aX27vhaVvXe8w5V6u2b1Xl5UVdd/bRHUv7NbejbUnj7KVtXBDxAABEjSry8aeS/lDSrvD9vST92N03wvc3Sbp/0oFmdrqk0yXpkEMOSdoFKEyvK+nlGfnIErvoFj+IlrXbWNj617+2sq5qfbbVR7zcXrM8XrVeV23favD5vtWhlp0b1/4BABh65MPMTpJ0u7tf1M/x7v5+d9/t7rv333//nEcHpCs6OtBvLCK+TyfRKhzxSh7R/lPbloMvOZaXFwuNXaS1jbp/AACiRvGE+lGSnmpmT1ZQfuBukt4jaT8zmw2fUh8k6eYRjA1o00slj06r8fVStaLfL+4lnUuKVu8Iti855W6xIyKVPCKfXv6UGUnlxLZmzCMeL+n2hHeUVT76QawDAJDV0CfU7n6WpLMkycweI+kP3P15ZvZPkp6hoNLHb0j61LDHBsT1EwmIvu9lYlxElCCvSh5ZKm10HcsEV/kAACDNOC3s8ioFX1C8VkGm+gMjHg8AAADQ1Ugn1O7+FXc/Kdy+3t1PdPcj3P2Z7r42yrEBUrbSePH9ou+zloHrtO+g8iqN1yln3NNYJqxsHgAAWbFSIhDTb2m8aFm7pM+b21n7iMcOjn7dt1Tbu6ryrkVd/oZjW58f8Ve3aabukoIvG176vLurMV9Sab2hFz3pf7XeqEmS5ktl/fUXHzdwWbtu+01y2Txy0wCAfjChBmJ6LY2X1pZn2bza3tW216bmZFoKyuE15oNfPDXmS63JtCStN2pjeW3jem4AALIapww1MBbGpbxbXHnXYttr0+acbe1TWVBpvSFJKq03NF/aqtQxXyqPzbWN67kBAOgHT6ix4/VSGq8feZXNu+Etu7eOj7TPv29FMzPBU+r5mU01LryHpOAJ9Sff/jitrAaLtCwtzksndB/nMErVjbI0HrEOAEDemFBjxysyVpBn2bxOxzUn05JU22z/Lm9zMt3crtYt07mzXE+e0YpeoyjEOgAA44TIBwAAADAAJtTY8YZdfi2tjyzjjO+3uRnJUM8stLUtLc63bWc997Dz4aPMpgMAMCgiH5ha8fJ3aeXdihQvA3fYWRdulb974/Gt/ar1uo5581Wq7VtVeXlRl772p9r6OfJv71StuhaUxnv2shrhlxEvv+Gg1l+NK7PzkrYiIMs3rmpmJYh9lJcauuOE9i80Jo1zFPckb+SkAQDDxIQaUyvPEnH9tiXt16n8nSTV9q22vba1Vddar425XVsNkd8zVTfWVdLWBLW2st62Xa3PFHptw2ojJw0AGCdEPjC1xrW8W6fyd5JUXl5se21rqyy0XkuR2tNqRK55dr79mKX5tu1JKF2Xx7kBABgmnlBjqvS7ymFUWvm4ftriT0+jMY+4eMwj6poX3LPVd2Njq8/SXVvnKs3NStpqu+Fpi62n0tGYxbBL1RWBWAcAYFwwocZUSYp59BsxyKOfPOMHbX1a+j69jLHo/YpoI9YBABgnRD4wVfKIJiT1129bnk+Bs/Q5yPgnLfIBAMC44Ak1JloRqxymVbvIWgnjga+9qFXJ4/o3n9A2xiPfd+tWtY7n30ON+eDvtaX1ho64aE4rtXUtlef17UdutndqkX8ijviLmzUTZqrLlQVd8qL9+7rucUW0AwAw7phQY6INc0W/XqIi0Uoe8ePaqnXMb/2SqDFf0kotqMoRvM4knmPbSomRLyg2+x7kesY18gEAwLgi8gEAAAAMgAk1Jto4ZX2joqXxtpXNi5a/W9+qeVdab2ipHJS5a74mXWv8XJtzkZUSKwuJxyQdNykZagAAxh2RD4y9pJz0OJZ9i+arm7npaKk6KRj3NWfcTdWNOVVm6yrbQqu9XFnQtx9ZVxD12Gw7Nmky3Gy79iX3b/tcExiTICcNAJhkTKgx9iYt69utvFt1Yy7ymm/JuEkvmwcAwCQi8oGxNynRhKzl3Sqz9dZrHpGMTsck9dFP/8O4rwAATDKeUGMs5bHiYdEO+dyGVmrB5HipbLrqsUG1jWq9rsMumdPKqmtpUfruUY220niVWyuytbqWFiq6/b7rrf6q9bqO+8vbWiX1LjnjgLa26L3o96lu1rJ/RSPiAQCYJkyoMZY6lYgbl2hCtV5vTaalZpm7rUnqyup663Vbaby14Ljma1S0pF5cltUXh3lP8rqvAABMOiIfAAAAwAB4Qo2xUMSKh0VrzEml+tZ2lJcka4Sv6w15+JTa1htym5G55LEVDyWpMW8qrbsa8wmNEyapSsk4xE0AAMgbE2qMhUmMBNz0/I32D27dmijuPWTrlz8+75HtkqqHuDq55qUH5zfAESMnDQDYKYh8AAAAAANgQo2xUHTpukL6n91azbAyO9/ethBuLySUzUsrqdfHuJKOT3o/7PsKAMBOQeQDI1N0abxoVYy0c9hm8I8kWUnyma22/S+SKqtBRGNpUbr7569Wbd+qysuLqj/0UNVWgqxzecl05S9tne+wP/2WantXVd61qB+84djCri3JKEvjEfMAAOxETKgxMnmWxuu3rVqvtybTzffe2NpuTqaloATe/L5VSVJt36rqK1s1pGsr65IWt97vXW17TRpL4iqKfdyTrPsNs2weAAA7CZEPjEzRkYykc/Xax9LiVqxjaXFe5eVg0lxeXlR5aastui1J5V2Lba+drrvfe9LJUKIurHgIAEAbnlBjqIa5AmLW6MPBx9yilc1gIZWlmQV974oDW22bC9JmWN9uc0G69qwHt/VZrZda51LkCe0Nb9m9tV+GeEarjwlArAMAgHZMqDFUSZGGUUYTqvV6azItSSuba21jXIwkNlZW66rWu/cZfZ9XrGMcIx8AACBA5AMAAAAYABNqDNVQy9plzUnPLLTalmYWYhnqreOWFrPnh/MujTdOGWoAANCOyAcGlrRseHvOeHgZ4QN/6natbK5paWZBt1x1n9bnB3xNWomUv6t8+qJWWbtSeUmbYfWO+eVF6TVbGer9zrtIC2GljvKuRd355hM6njua2U6LfETF4xRZ71H0HMNAbhoAgM6YUGNg/WaXe90vS1szDx3PQq/Eyt+VImXtSptbv6ip7VttOy5a9q62d7XjRLmXnPEkl80DAADbEfnAwIqOZPTS1oxvbI9uxMrfRcraNUvhSUE5vOhx0bJ35V2LmceV5X7lFQeJH1/EfQUAAJ3xhBpjKe1JbdoE75ar7pMYu/j8Oz4keTXYySo64oRTWsfES9dFj7v+zSf0FVlJin+k7ddr/0Ug1gEAQH+YUGNgRUY+0s7XUyyiOZmObxcw5m7xjHEvmwcAAHpD5AMDKyLykeV8PcUurJK8nTD+vK+t3/EPO/IBAAD6wxNq9GWYKx7GHfmu61Tbt6by8oKuecXhbW3nnvRXanhVJavoj094pGr7VlVeXtTTzj669QXD8q5FVc7pvIpiNK7Ri+iT50mYoBLxAAAgH0yo0Zc8Ywu9xg9q+9Zar/F+GmGco+FV1cJSeLV9q2qsDV6to5cxT9pKiQAAoH9EPgAAAIABMKFGX/LMAfea5y0vL7Re48eVwnx0ySqtcnjl5cW+y9/1m0GehAw1AADIB5EPZJK0GmKeklYM7FR27ppXHN4xp3zCwpzkc5LN6bo3HNNxxca0vHO/bZ2MsjQeOWkAAIrHhBqZFLnKYS8547S2ar2+VRLPq5myykVcT9cx5nyufvYDAAD5IfKBTPKKRfRTWi6tbVvsolkSzyo9xzOKaMszUtJP5AMAABSPJ9ToaJil8eKxiCPe+u1Wybtrz3xQW9vvPP7fVG/UNFcq63PPPF61alD1o1xZ0NPf8PDWcXpd9/P1em1px/XbZ96IeQAAMFxMqNHRsGILSftFS97F9603apKkeqPWmkxLUq26pkbCcXmUv8vz2vJuSxs/AAAoHpEPAAAAYAA8oUZL0ZU8etGYL6m03ghe1zbVWJiRJJXWNjUnV12mObkac6ZS3YNj5kyNhZJKaw01Fqb774pJ1U+anwMAgOFiQo2WcaoQce1ZD25tH/57/9vW9qLn39zaftsZvxI78r5FDmtskJMGAGB8TPdjPAAAAKBgTKjRUuSKgWlt3fqPr3KYVBovbVxFr4Y4rHNTDg8AgPFE5GOHK6I0XtbSckf+xc2qVddUrixIm4228neXveQ+kkpa3VzT7D3vqdn5oG12eUHPeMTPq7ZvLViC/JW5DTuTYZbGI9YBAMBkYEK9ww1zRb94W3MCXauuSZuNVlutuqbNRvDLk81GSbV9kdJ4se0s5epGcW159w8AAMYXkY8dbpgr+m2LclQWJAVPpJvbzfczpWCCPVNqBE+im23LC6335eWFkcYuij43AACYDDyh3oGKXgEx61Pva158D1U35lSZrau6Ecsnz8yp2qhrcWZB17zyiG3l/Jrjj/eZxzVFx1/0xJZYBwAAk2/oE2ozO1jS30s6QJJLer+7v8fM7inpo5IOlfQdSc9y9x8Ne3w7QdKEt4joQ9dxhJPo+GS60xi7nbvTcZOyUiIAAJhMo4h8bEh6hbsfJekRkl5iZkdJOlPSF939SElfDN8DAAAAY23oE2p3v8Xdvxlu75V0laT7SzpZ0gfD3T4o6WnDHttOMczScqnjmK23vXYbY9K5i7i2pP767ZOcNAAA02+kGWozO1TS8ZK+LukAd78lbLpVQSQEOSh6SfGv/Or/k7wavLGKHvOJ32nljw979R7V9q5KCmtIe0O1vasq71pUaX5RtX2rKi8v6ro3HNM2xsPOurC13w3nnNh2vnhb3tdWdGk8ctMAAEyXkU2ozWxZ0vmSXu7uPzGzVpu7u5l5h+NOl3S6JB1yyCHDGOrEK7y0XHMyLUlebcsxNyfTkoJtb7S2S2Hxjtq+1e0l9cLjant7a8v92gZsGyRjDgAAJsNIyuaZ2ZyCyfSH3f3j4ce3mdmBYfuBkm5POtbd3+/uu9199/777z+cAU+4wsu7NVculNpWL6zMbV/lsPm+vGtR5eVwe3lxW//R/Xppm4SyeQAAYLqMosqHSfqApKvc/d2Rpgsk/Yakt4avnxr22KbJMEvj3bopNcLfJ5SsPZIxe8ABmq2EqxxWFnTNyx/QNq5OY2xGOXptG0dEPAAAmG6jiHw8StIpki4zs0vCz16tYCL9MTN7oaTvSnrWCMY2NXotOzdIablGJPLR8GpbJGN2ObKyYXUtcVyTslphHucGAADTZ+gTanf/L0nWoflxwxzLNIt/sa7TwihJ22n7RftuKlmlNakuWUXlXYtbT6grC60lxsuVhcRx9XruXtuK7j/LuQEAwPRipcQpUXQlj2aMQ1JbdY3K3JzO+ukTO7Zd/Gt/01YB5PjzfzP3sQ0T8Q0AABDHhHpKFL2iX7xaR1olj7ZYR0oFkF7HWNS19dMGAADQNJIqHwAAAMC0YEI9JYpe0S9e/i5rW1pJvVGWruu3DQAAII7IBzqKxjPu/6VlrTaCH5fF0qJu+PrWfmtPebhWa+uSpFJ5vu2448//zUKz3UUjMw0AALphQj0lisxQS9JqY7VtOzppXqk1Wm0rtXVFf/ExDisZ5tU/AABAEiIfU6LoFf0WS4tt29HjlsrzrbbodtZxjXPkAwAAoBueUE+wvFdD/PTJfyb3fZIks2W99Kd/tlVP+tZdu1TbNyNJKi/PSGdtHbfwr99QI6z0sbBrUZWfYyVDAACwczChnmC9roaY1lat11uTaUly39e24uGMbU2Oa/t6KJuX8dyDjj+v/gEAAHpF5AMAAAAYABPqCZZ3zthsubWf2XKrHF5516LKy5HSeMvZy+ZNQoYaAABgEEQ+JkjRy4sfPLssuQVvrKIb3rK7LQvd6dzNpcaLGlcRyE0DAIC8MKGeIEVmkLMuE17YuUfUPwAAwKCIfEyQomMRWVY1LOrcw+4fAAAgLzyhHnN5l8aTpA/9ynvV8KpKVtEbHvV41fatqry8qKe/8UTV9gUVO8rLi9LZuZ1y5Ih4AACAojChHnNFxC4aYbSj4dXWBLq2b1W+vt7aL14aL69zF9VHL/0DAADkicjHmCsiFlEKox0lq7Sqd5SX0yt5THrkAwAAoCg8oZ4S8Sey0UlkvO2Y+UXJNyVb1LVnPihTJY9JRMwDAAAMAxPqMddP7CLtOElb1TyGWMkjrW0Y5wYAACgKkQ8AAABgAEyox1y/q/2l5oeb5fGGWBovra2o/gEAAIaByMeY6Xc1xMrcXNtxT3v0hVprrGqhtKgrTjlUteqaJKlcWdAzPnSyatU1lSsL0mn5X8MokZsGAADDxoR6zOSVM15rBOXw1hqrrcm0pGB7dqa1Pa0ZagAAgGEh8jFm8oo+LJSCEngLpcXgSXSoXFlovS9XFqYu8gEAADBsPKEeA0WshviG+1weVPOwio449fi2tmY8ZBomoEQ8AADAqDGhHgOFxC4ipfGynG8aIh8AAACjQORjDBQSfYhU8shyvkmNfAAAAIwaT6hHJI+Yx/uecq42vaoZq+jtxxyt2t7gi4jlXYt6xvue3KrkUfnt9gog44joBgAAmFRMqEckj9jFZhjn2PRqazItSbW9q5oNK3tEK3mk9TlOkQ8AAIBJQuQDAAAAGAAT6hHJI0s8E+ajZ6yi8q7FVlt512Jiaby0PkedoQYAAJhURD6GpN8VENP87KIkl2TSDeecuK3/QTPa0VhKXmMmKw0AAKYNE+ohKSTHHCmNN2if3VYdzDt7DQAAMC2IfAxJEbGLaGm8IkrX9Tr+XvsEAACYBjyhLlAepfG+9WvntlY8fNqD2kvjPe2s4H1516J0Tl6j3tLviorEOgAAwE7ChLpAva6A2C3WES+NF90uMvJBrAMAAKAzIh8AAADAAJhQFyiXHHMkJ72tNF74vrxrcaTLf5OTBgAAOxmRjzHTykxLbbnp8q7FQkrj9YucNAAAQIAJdYH6ylA3J9NSW246j5x013P30QcAAMBOR+SjQH2tGNiMeEhtMY88Yh3dzk2sAwAAoHc8oc5Rv6sh7nn6+1ul8X71p05oK413wzknjCzWEUXEAwAAIBkT6hz1HadIKY3Xa2yk37Ze+gAAAMAWIh856jd2kVbJo4gqHP1EPgAAAJCMJ9QD6nc1xEM+U1ettq5yWXr7LzxU642a5ktlXf/mE/qKjRSBmAcAAEB3TKgH1G8ko1ZblyTVautab9QkSeuN2lBXMqSSBwAAwOCIfAAAAAADYEI9oH5zzOXyvCSpXJ7XfKksSZovlQvNSae1URoPAACgP0Q+RuRfPvChVqm8Y88/rdDSeGShAQAAisOEekB9l7WLlMobVmk8AAAA5I/Ix4D6XmkwUiqv6FgHAAAAisMT6h71shrikX92o2rVNZUrCzr7C/+iRvhUumQVHXv+i3ONeRDrAAAAGA0m1D3qpexcrbomSapV11qTaUlqdIh5EOsAAACYPEQ+etRTJY/KgiSpXFlQqRnxUPCEup/VCol1AAAAjB+eUPcoHq146t1PUW3vqsq7FvXJb1++9WVDq+hYbVXvOOVfXjzwCojEOgAAAMYPE+oB1faubr1GYh39VO8g1gEAADB5iHwAAAAAA2BCPaDyrsWt10hOup9yeOSkAQAAJg+Rjx41bju+LSctHd1qa654KJGTBgAA2CnG7gm1mT3JzK42s2vN7MxRj2ebWE46mqGOZ56jGeosbeSkAQAAJs9YTajNbEbSX0j6ZUlHSXqumR012lHFxGId0cjHoKscEusAAACYPOMW+ThR0rXufr0kmdl5kk6WdOVIRxVROuDitvcX3DWigQAAAGAsjNUTakn3l3Rj5P1N4WctZna6me0xsz133HHHUAcHAAAAxI3bhLord3+/u+92993777//qIcDAACAHW7cJtQ3Szo48v6g8DMAAABgLI3bhPobko40s8PMbF7ScyRdMOIxAQAAAB2N1ZcS3X3DzH5H0uckzUg6192vGPGwAAAAgI7GakItSe7+GUmfGfU4AAAAgCzGLfIBAAAATBQm1AAAAMAAmFADAAAAA2BCDQAAAAyACTUAAAAwACbUAAAAwACYUAMAAAADYEINAAAADIAJNQAAADAAJtQAAADAAJhQAwAAAAMwdx/1GPpmZndI+u6ITn9vST8Y0bmnGfe1GNzXYnBfi8F9LQb3tRjc1/yN6z19gLvvn9Qw0RPqUTKzPe6+e9TjmDbc12JwX4vBfS0G97UY3NdicF/zN4n3lMgHAAAAMAAm1AAAAMAAmFD37/2jHsCU4r4Wg/taDO5rMbivxeC+FoP7mr+Ju6dkqAEAAIAB8IQaAAAAGAAT6h6Z2ZPM7Gozu9bMzhz1eCaVmR1sZl82syvN7Aoze1n4+evN7GYzuyT858mjHuukMbPvmNll4f3bE352TzP7gpldE77eY9TjnCRm9uDIz+QlZvYTM3s5P6+9M7Nzzex2M7s88lniz6cF/iz87+2lZvaw0Y18vHW4r+8ws/8L790nzGy/8PNDzawW+bn9y5ENfMx1uK8d/703s7PCn9erzeyXRjPq8dfhvn40ck+/Y2aXhJ9PxM8rkY8emNmMpG9LeoKkmyR9Q9Jz3f3KkQ5sApnZgZIOdPdvmtkuSRdJepqkZ0na5+7vHOX4JpmZfUfSbnf/QeSzt0u6093fGv5F8B7u/qpRjXGShf8duFnSz0g6Vfy89sTMfl7SPkl/7+5Hh58l/nyGE5WXSnqygvv9Hnf/mVGNfZx1uK9PlPQld98ws7dJUnhfD5X06eZ+6KzDfX29Ev69N7OjJH1E0omS7ifp3yU9yN03hzroCZB0X2Pt75J0l7u/cVJ+XnlC3ZsTJV3r7te7+7qk8ySdPOIxTSR3v8Xdvxlu75V0laT7j3ZUU+1kSR8Mtz+o4C8v6M/jJF3n7qNaVGqiuft/SLoz9nGnn8+TFfwfrrv71yTtF/5lHDFJ99XdP+/uG+Hbr0k6aOgDm3Adfl47OVnSee6+5u43SLpWwbwBMWn31cxMwcO1jwx1UANiQt2b+0u6MfL+JjEJHFj4t8/jJX09/Oh3wl9Rnks0oS8u6fNmdpGZnR5+doC73xJu3yrpgNEMbSo8R+3/oefndXCdfj75b25+TpP02cj7w8zsYjP7qpk9elSDmmBJ/97z85qPR0u6zd2viXw29j+vTKgxUma2LOl8SS93959Iep+kwyUdJ+kWSe8a3egm1s+5+8Mk/bKkl4S/WmvxIOdF1qsPZjYv6amS/in8iJ/XnPHzmT8ze42kDUkfDj+6RdIh7n68pN+X9I9mdrdRjW8C8e99sZ6r9ocWE/HzyoS6NzdLOjjy/qDwM/TBzOYUTKY/7O4flyR3v83dN929Iemvxa/LeubuN4evt0v6hIJ7eFvzV+Xh6+2jG+FE+2VJ33T32yR+XnPU6eeT/+YOyMxeIOkkSc8L/7KiMJLww3D7IknXSXrQyAY5YVL+vefndUBmNivp6ZI+2vxsUn5emVD35huSjjSzw8InVc+RdMGIxzSRwozUByRd5e7vjnwezUf+qqTL48eiMzOrhF/ylJlVJD1RwT28QNJvhLv9hqRPjWaEE6/tyQk/r7np9PN5gaTnh9U+HqHgS0q3JHWA7czsSZL+UNJT3X0l8vn+4ZdrZWYPlHSkpOtHM8rJk/Lv/QWSnmNmC2Z2mIL7euGwxzfhHi/p/9z9puYHk/LzOjvqAUyS8JvSvyPpc5JmJJ3r7leMeFiT6lGSTpF0WbM0jqRXS3qumR2n4Fe+35H0W6MY3AQ7QNIngr+vaFbSP7r7v5nZNyR9zMxeKOm7Cr7wgR6Ef0F5gtp/Jt/Oz2tvzOwjkh4j6d5mdpOk10l6q5J/Pj+joMLHtZJWFFRVQYIO9/UsSQuSvhD+N+Fr7n6GpJ+X9EYzq0tqSDrD3bN+8W5H6XBfH5P07727X2FmH5N0pYKIzUuo8JEs6b66+we0/Tsq0oT8vFI2DwAAABgAkQ8AAABgAEyoAQAAgAEwoQYAAAAGwIQaAAAAGAATagAAAGAATKgBAACAATChBoCQmbmZvSvy/g/M7PU59f13ZvaMPPrqcp5nmtlVZvblAs9xnJk9uaj+U877z+HCDjKz75jZ+ZG2Z5jZ34XbJ5nZG4c9PgA7FxNqANiyJunpZnbvUQ8kKlyON6sXSnqRu/9iUeORdJyCBVe26XGsmZnZT0uacffoCmknmNlRCbv/q6RfMbOlIsYCAHFMqAFgy4ak90v6vXhD/Amzme0LXx9jZl81s0+Z2fVm9lYze56ZXWhml5nZ4ZFuHm9me8zs22Z2Unj8jJm9w8y+YWaXmtlvRfr9TzO7QMHKa/HxPDfs/3Ize1v42dmSfk7SB8zsHbH9H2NmXwmf8v6fmX3YwuXzzOyE8BouMrPPNZdWDvffHW7fO3wqPC/pjZKebWaXmNmzzez1ZvYhM/tvSR8ys0PN7Evh9XzRzA6J3MM/M7P/Ce/VM8LPDzSz/wj7u9zMHp3wZ/M8bS1J3vQuSa+J7+jBimVfkXRSQj8AkDsm1ADQ7i8kPc/M7t7DMcdKOkPST0k6RdKD3P1ESX8j6aWR/Q6VdKKkp0j6SzNbVPBE+S53f7ikh0t6kZkdFu7/MEkvc/cHRU9mZveT9DZJj1XwtPjhZvY0d3+jpD2Snufur0wY5/GSXi7pKEkPlPQoM5uT9OeSnuHuJ0g6V9Ifd7pQd1+XdLakj7r7ce7+0bDpKEmPd/fnhv190N2PkfRhSX8W6eJABZP+kxQsOS5Jvy7pc+5+XHgvL0k49aMkXRT77GOSHmZmRyTsv0dS0sQcAHJXyK/mAGBSuftPzOzvJf2upFrGw77h7rdIkpldJ+nz4eeXSYpGLz7m7g1J15jZ9ZIeIumJko6JPP2+u6QjJa1LutDdb0g438MlfcXd7wjP+WFJPy/pk13GeaG73xQec4mCCf6PJR0t6QvhA+sZSbdkueiYC9y9eb8eKenp4faHJL09st8nw3twpZkdEH72DUnnhpP7T7r7JQn9Hyjpjthnm5LeIeksSZ+Ntd0u6X59XAcA9Iwn1ACw3Z8qeHJciXy2ofC/mWZWkjQfaVuLbDci7xtqf3DhsfO4JJP00vBp73Hufpi7Nyfk1UEuIkF0nJvh2EzSFZHzP9Tdnxju07pmSYtd+s461ugYTJLc/T8U/IXgZkl/Z2bPTziu1mEMHwqPPTj2+aKy/4UIAAbChBoAYtz9TgVxghdGPv6OpBPC7adKmuuj62eaWSnMVT9Q0tWSPifpt8OnszKzB5lZJa0TSRdK+oUw1zwj6bmSvtrHeBSOYX8ze2R4/rnwC4BS+zVHK5TslbQrpc//kfSccPt5kv4zbQBm9gBJt7n7XyuIyTwsYberJG2Ldrh7XdKfaHvu/UGSLk87LwDkhQk1ACR7l6RotY+/VjCJ/ZaCSEM/T4+/p2Ay/FlJZ7j7qoIJ5JWSvmlml0v6K3WJ44XxkjMlfVnStyRd5O7xL+xlEmainyHpbeG1XSLpZ8PmdyqY7F+s9nvxZUlHNb+UmNDtSyWdamaXKsiUv6zLMB4j6VvheZ4t6T0J+/xruF+SD2j7PfvF8BgAKJwFX4YGAGB8mVlZwUT+Ue6+2WXfAyT9o7s/biiDA7DjMaEGAEwEM/slSVe5+/e67PdwSfUOX24EgNwxoQYAAAAGQIYaAAAAGAATagAAAGAATKgBAACAATChBgAAAAbAhBoAAAAYwP8PhhLrqnAMLTwAAAAASUVORK5CYII=\n", "text/plain": [ "<Figure size 864x576 with 1 Axes>" ] }, "metadata": { "needs_background": "light" }, "output_type": "display_data" } ], "source": [ "import matplotlib.pyplot as plt\n", "\n", "xyc = []\n", "for name, base in nuclideBases.byName.items():\n", " if not base.a:\n", " continue\n", " xyc.append((base.a - base.z, base.z, base.abundance or 0.5))\n", "x, y, c = zip(*xyc)\n", "plt.figure(figsize=(12, 8))\n", "plt.scatter(x, y, c=c, marker=\"s\", s=6)\n", "plt.title(\"Chart of the nuclides\")\n", "plt.xlabel(\"Number of neutrons (N)\")\n", "plt.ylabel(\"Number of protons (Z)\")\n", "plt.show()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.13" }, "varInspector": { "cols": { "lenName": 16, "lenType": 16, "lenVar": 40 }, "kernels_config": { "python": { "delete_cmd_postfix": "", "delete_cmd_prefix": "del ", "library": "var_list.py", "varRefreshCmd": "print(var_dic_list())" }, "r": { "delete_cmd_postfix": ") ", "delete_cmd_prefix": "rm(", "library": "var_list.r", "varRefreshCmd": "cat(var_dic_list()) " } }, "types_to_exclude": [ "module", "function", "builtin_function_or_method", "instance", "_Feature" ], "window_display": false } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: doc/tutorials/param_sweep.nblink ================================================ { "path": "../../armi/tests/tutorials/param_sweep.ipynb", "extra-media": [ "../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml", "../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml", "../armi/testing/reactors/anl-afci-177/anl-afci-177-coreMap.yaml", "../armi/testing/reactors/anl-afci-177/anl-afci-177-fuelManagement.py" ] } ================================================ FILE: doc/tutorials/pin-rotations.nblink ================================================ { "path": "../../armi/tests/tutorials/pin-rotations.ipynb", "extra-media": [ "../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml", "../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml", "../../armi/testing/reactors/anl-afci-177/anl-afci-177-coreMap.yaml", "../../armi/testing/reactors/anl-afci-177/anl-afci-177-fuelManagement.py" ] } ================================================ FILE: doc/tutorials/walkthrough_inputs.rst ================================================ .. _walkthrough-inputs: *************************************** Building input files for a fast reactor *************************************** The true power of ARMI comes when you have a reactor at your fingertips. To get this, you must describe the reactor via input files. This tutorial will walk you through building input files from scratch for a reactor. We will model the CR=1.0 sodium-cooled fast reactor documented in `ANL-AFCI-177 <https://publications.anl.gov/anlpubs/2008/05/61507.pdf>`_. The full :doc:`documentation for input files is available here </user/inputs>`. .. tip:: The full inputs created in this tutorial are available for download at the bottom of this page. Setting up the blueprints ========================= First we'll set up the fuel assembly design in the blueprints input. Make a new file called ``anl-afci-177-blueprints.yaml``. We'll be entering information based on Table 4.4 of the reference. To define the pin cell we need dimensions of the fuel pin, cladding, ducts, wire wrap, and so on. The cladding dimensions are clear from the table. The outer diameter is given as the pin diameter, and the inner diameter is simply that minus twice the cladding thickness. We will use the ``Circle`` shape, and make the material ``HT9`` steel. Since we're inputting cold dimensions, we'll set ``Tinput`` to room temperature and let ARMI thermally expand the clad up to an average operating temperature of 450 °C. Lastly, since there are 271 pins in the assembly, we'll set the ``mult`` (short for *multiplicity*) to 271: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: start-block-clad :end-before: end-block-clad .. note:: In fast reactors, neutrons aren't as affected by spatial details at a pin level, and compositions are often quite spatially flat across an assembly. Thus we can often just copy a component using the ``mult`` input equal to the number of pins, and neutronic modeling is sufficient. For subchannel T/H, the spatial details are of course much more important. .. note:: The ``&block_fuel`` is a YAML anchor which will be discussed more below. Next, let's enter the wire wrap. This is a helical wire used in fast reactors to mix coolant and keep pins separate (used in lieu of a grid spacer). ARMI has a special shape for this, called a ``Helix``. Helices are defined by their axial pitch (how much linear distance between two wrappings axially), the wire diameter, and the diameter of the pin they're wrapping around (called ``helixDiameter``). Thus, we input the wire wrap into the blueprints as follows. .. note:: The wire axial pitch isn't specified in the table so we just use a typical value of 30 cm. .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-block-clad :end-before: end-block-wire We set the wire inner diameter to 0 to make it a solid wire. If we set it to something non-zero, the wire itself would be hollow on the inside, which would be crazy. Now, it's time to do the fuel. This example reactor uses UZr metal fuel with a liquid sodium thermal bond in the large gap between the fuel and the cladding. The fraction of space inside the clad that is fuel is called the "smeared density", so we can figure out the actual fuel slug dimensions from the information in the table. Specifically, the smeared density is 75%, which means that 75% of the area inside the circle made by the inner diameter of the cladding (0.6962 cm) is fuel. Thus, the fuel outer diameter is given by solving: .. math:: 0.75 = \frac{\pi d^2}{\pi 0.6962^2} which gives :math:`d = 0.6029`, our fuel outer diameter. Now we can enter our fuel slug component into blueprints: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-block-wire :end-before: end-block-fuel .. note:: We upped the hot temperature to 500 °C, indicative of the fact that fuel will be running a bit hotter than cladding. Let's enter a description of the thermal bond next. This is an annulus of sodium between the fuel and the cladding. Since those dimensions are already set, we will use **linked dimensions**. Thus, no numbers (beyond temperatures) are needed! .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-block-fuel :end-before: end-block-bond The next physical component we need to model is the hexagonal assembly duct. This information is provided in Table 4.3 of ANL-AFCI-177. For the ``Hexagon`` shape, we enter inner and outer flat-to-flat distances ("pitch") instead of diameters. The outer pitch is given as ``15.710``, and we can calculate the inner pitch from that and the duct thickness. It ends up looking like this: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-block-bond :end-before: end-block-duct It's essential to capture the spacing between adjacent ducts too (the assembly pitch, also defined in Table 4.3), and we define this by defining a special ``Hexagon`` full of interstitial coolant outside the duct: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-block-duct :end-before: end-block-intercoolant That defines everything in our assembly except for the coolant. The shape of the coolant is geometrically complex, it's a hexagon with holes punched through it (one for each cladding tube/wire wrap). Rather than explicitly defining this shape, ARMI allows you to input a ``DerivedShape`` in certain conditions (e.g. when the rest of the assembly is filled and only one ``DerivedComponents`` is defined. It can simply back-calculate the area of this shape automatically. And that's just what we'll do with the coolant: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-block-intercoolant :end-before: end-block-coolant And that completes our generic fuel block description. Defining non-fuel blocks ------------------------ For this core model, we will need some reflectors, shields, and control blocks as well. In detailed models, it can often be important to model these in detail. For this example, we'll keep it simple. Control blocks will simply be filled with sodium (representing an all-rods-out condition), reflectors will just be full pins of HT9 steel with coolant around them, and shields will be unclad B4C in sodium. Normally the pin sizes would be different, but again for simplicity, we're just duplicating the pin dimensions. For brevity, we will simply provide the definitions as described. Radial Shields ^^^^^^^^^^^^^^ Here is a very simplified radial shield: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-block-coolant :end-before: end-block-radialshield Reflectors ^^^^^^^^^^ Here is a reflector block definition. We can use this for radial reflectors and axial reflectors. We include wire wrap so the axial reflector will work with our basic thermal hydraulic solver: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-block-radialshield :end-before: end-block-reflector Control ^^^^^^^ Here is a big empty sodium duct (what you'd find below a control absorber bundle): .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-block-reflector :end-before: end-block-control Plenum ^^^^^^ We also need to define empty cladding tubes above the fuel for the fission gasses to accumulate in. This just has a ``gap`` component made of the ``Void`` material, which is just empty space: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-block-control :end-before: end-block-plenum That should be enough to define the whole core. Defining how the blocks are arranged into assemblies ---------------------------------------------------- With block cross-sections defined, we now set their heights and stack them up into assemblies. While we're at it, we can conveniently adjust some frequently-modified material parameters, such as the uranium enrichment. Defining the fuel assemblies ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ There are three fuel assemblies defined in ANL-AFCI-177, each with different enrichments. We can specify some assembly data to be shared across all assemblies and just overlay the differences. We define the ``assemblies`` section of the blueprints input file. We get core and plenum height from table 4.4, and split the core into 5 equally-sized sections at 20.32 cm tall each. This defines the depletion mesh. Each of these 5 blocks will deplete and accumulate state independently. In the ``axial mesh points`` section, we specify a roughly even neutronic/transport mesh, with slightly larger neutronic mesh points in the very tall single-block plenum: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-block-plenum :end-before: end-assemblies-common Now that the common heights and neutronic mesh are specified, we start applying them to the various individual assemblies. We start with the inner core and refer to the heights and mesh with YAML anchors. As described in Section 2.0 of the reference, an enrichment splitting of 1.0, 1.25, and 1.5 was used for inner, middle, and outer core in order to help minimize radial power peaking. The specific enrichments of each zone are shown in Table 4.8. For simplicity, let's just use these as uranium enrichments rather than the detailed material from the paper. Specifying more details is possible via the **custom isotopics** input fields.: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-assemblies-common :end-before: end-assemblies-ic .. warning:: The weirdest thing about this input section is the use of YAML anchors for the blocks. Under the hood, this copies the entire block definition into each entry of that list. This is a bit strange, and we plan to switch this to a string-based block name rather than a full YAML anchor in the ``blocks`` list. .. note:: Notice the blank strings in the ``U235_wt_frac`` section? Those are placeholders indicating that the material in those blocks does not have uranium in it, and thus adjusting uranium enrichment doesn't make sense. These are the axial reflectors, plena, grid plates, etc. For the middle core, we can use the same stack of blocks (using an anchor), but we need different enrichments. We can choose whether or not to use the same ``xs types``. When composition is different, one often uses independent cross section types so you get cross sections specific to different enrichments. This is a trade-off, since more cross section types means more lattice physics calculations, which can require either more time or more processors: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-assemblies-ic :end-before: end-assemblies-mc Same deal for the outer core. .. note:: The columnar form of YAML lists is very convenient when using text editors with column-edit capabilities. It is highly recommended to make sure you know how to column edit. .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-assemblies-mc :end-before: end-assemblies-oc Defining the non-fuel assemblies ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Let's make some shield, reflector, and control assemblies. It's fine for these to have different numbers of blocks. Some physics kernels (like DIF3D) have some requirements of axial mesh boundaries at least lining up between assemblies, but there are some ARMI features that can automatically adjust the mesh if you have very complicated assemblies: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-assemblies-oc :end-before: end-assemblies-rr .. note:: Here we just reuse the fuel block cross sections. In more precise models, a different approach may be used. Here is the radial shield: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-assemblies-rr :end-before: end-assemblies-sh Here are the control blocks: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-assemblies-sh :end-before: end-assemblies-section And that's it! All blueprints are now defined. Specifying the core map ======================= With blueprints defined we can now arrange assemblies into the core. This is with the geometry input file. .. note:: There are GUI tools to help making the core map easy to set up. .. note:: We plan to converge on consistent input between pin maps and core maps for the physics kernels and analyses that require finer detail of how the pins are arranged within blocks. Geometry can be input various ways. The most straightforward is to provide a simple ASCII-based map of the core. For this problem, a 1/3 hexagonal model can be input as follows (see Figure 4.3 in the reference). First, we refer to a geometry file from the ``systems`` section of the ``blueprints`` file: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml :language: yaml :start-after: end-assemblies-section :end-before: end-systems-section And then, in the core map file (``anl-afci-177-coreMap.yaml``): .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-coreMap.yaml :language: yaml .. note:: The two-letter values here can be any contiguous strings, and correspond with the ``specifier`` field in the blueprints input. .. note:: GUI utilities are also useful for building core maps like this. Specifying settings =================== Now we need to specify some **settings** that define fundamental reactor parameters, as well as modeling approximation options. For this, we make a **settings file**, called ``anl-afci-177.yaml``. The thermal power in this reference is 1000 MWt. The thermal efficiency isn't specified, so let's assume 0.38. From Table 4.8, the cycle length is 370 EFPD. Let's also assume a 0.90 capacity factor which will gives full cycles of 411.1 days. .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml :language: yaml :start-after: begin-settings :end-before: end-section-1 We need to tell the system which other input files to load by bringing in the blueprints and geometry (the shuffling and fuel handler info will be described momentarily): .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml :language: yaml :start-after: end-section-1 :end-before: end-section-2 In terms of our simulation parameters, let's run it for 10 cycles, with 2 depletion time steps per cycle: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml :language: yaml :start-after: end-section-2 :end-before: end-section-3 Set some physics kernel and environment options: .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml :language: yaml :start-after: end-section-3 .. note:: The :ref:`ARMI GUI <armi-gui>` is simply an optional frontend to this settings file. Behind the scenes it just reads and writes this. It is quite convenient for discovering important settings and describing what they do, however. Defining fuel management ======================== Finally, let's specify the fuel management file that we referred to above by creating the file ``anl-afci-177-fuelManagement.py``. Fuel management is very wide-open, so we use Python scripts to drive it. It's generally overly constraining to require any higher-level input for such a general problem. In ANL-AFCI-177, section 2 says no shuffling was modeled, and that the core is in a batch shuffling mode, limited by a cladding fast fluence of 4.0e23 n/cm\ :sup:`2`. Often, SFR studies use the REBUS code's implicit equilibrium fuel cycle mode. There is an ARMI equilibrium module at TerraPower that performs this useful calculation (with different inputs), but for this sample problem, we will simply model 10 cycles with explicit fuel management. The shuffling algorithm we'll write will simply predict whether or not the stated fluence limit will be violated in the next cycle. If it will be, the fuel assembly will be replaced with a fresh one of the same kind. .. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-fuelManagement.py :language: python There! You have now created all the ARMI inputs, from scratch, needed to perform a simplified reactor analysis of one of the SFRs in the ANL-AFCI-177 document. The possibilities from here are only limited by your creativity, (and a few code limitations ;). As you load the inputs in ARMI it will provide some consistency checks and errors to help identify common mistakes. Here are the full files used in this example: * :download:`Blueprints <../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml>` * :download:`Settings <../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml>` * :download:`Core map <../../armi/testing/reactors/anl-afci-177/anl-afci-177-coreMap.yaml>` * :download:`Fuel management <../../armi/testing/reactors/anl-afci-177/anl-afci-177-fuelManagement.py>` The next tutorial will guide you through inputs for a classic LWR benchmark problem (C5G7). ================================================ FILE: doc/tutorials/walkthrough_lwr_inputs.rst ================================================ .. _walkthrough-lwr: ****************************************** Building input files for a thermal reactor ****************************************** In the :doc:`previous tutorial </tutorials/walkthrough_inputs>`, we introduced the basic input files and made a full input for a sodium-cooled fast reactor. In this tutorial, we will build simple inputs for the light-water reactor (LWR) benchmark problem called C5G7 as defined in `NEA/NSC/DOC(2003)16 <https://www.oecd-nea.org/upload/docs/application/pdf/2019-12/nsc-doc2003-16.pdf>`_. The compositions are documented in `NEA/NSC/DOC(96)2 <https://www.oecd-nea.org/upload/docs/application/pdf/2020-01/nsc-doc96-02-rev2.pdf>`_. .. tip:: The full inputs created in this tutorial are available for download at the bottom of this page. .. warning:: C5G7 is a problem with defined 7-group macroscopic cross sections. Rather than Using those cross sections directly, this input is meant to regenerate them rather than to using the provided macros directly. .. warning:: ARMI was historically developed in support of fast reactors and most features have been used and tested in fast reactor contexts. This tutorial shows that simple LWR cases can be defined in input, but there is still a lot of work to make sure all ARMI capabilities are operational in this context. Thus, be warned that as of 2020, doing LWR analysis with ARMI will certainly require new developments. We are excited to expand ARMI scope fully into LWR relevant analysis. In particular, the handling of detailed locations within a block is relatively experimental (fast reactors usually just smear it out). Setting up the blueprints ========================= This tutorial is shorter than the previous, focusing mostly on the new information. Custom isotopic vectors ----------------------- When using materials that differ in properties or composition from the materials in the ARMI material library, you can use custom isotopics to specify their composition. The composition details below are documented in Table 2 of `NEA/NSC/DOC(96)2 <https://www.oecd-nea.org/upload/docs/application/pdf/2020-01/nsc-doc96-02-rev2.pdf>`_. .. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml :language: yaml :start-after: start-custom-isotopics :end-before: end-custom-isotopics .. tip:: Scripts that load the prescribed cross sections from the benchmark into the ARMI cross section model could be written fairly easily, allowing users to quickly evaluate this full benchmark problem with various global solvers. The UO2 block ------------- Now we define the pins and other components of the UO2 block. What's new here is that we're pointing to custom isotopics in many cases, and we're using the ``latticeIDs`` input to add textual specifiers, which will be used in the ``grids`` input section below to count and place the pins into a square-pitch lattice. Note that the ``latticeIDs`` section is a list. The component will fill every position in the grid that has any of the specifiers in this list. You will see the `<<: *guide_tube` notation below. This means use the specifications of guide_tube, but make the modifications that appear below. .. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml :language: yaml :start-after: end-custom-isotopics :end-before: end-block-uo2 .. note:: The dummy pitch component has no material and is simply used to define the assembly pitch. In a future upgrade, this information will be taken directly from the ``lattice pitch`` grid definition below. The MOX block ------------- The next assembly is very similar. We define three separate fuel pins, each with different ``latticeIDs``, and then use YAML anchors to just copy the moderator, guide tube, and fission chamber from the previous assembly. .. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml :language: yaml :start-after: end-block-uo2 :end-before: end-block-mox The moderator block ------------------- The moderator block for the radial and axial reflectors is very simple: .. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml :language: yaml :start-after: end-block-mox :end-before: end-block-mod The 3-D Assembly definitions ---------------------------- Now that the pins are defined, we stack them into assemblies, very similar to what we did in the SFR tutorial. There are three distinct assembly definitions. .. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml :language: yaml :start-after: end-block-mod :end-before: end-assemblies The Systems definition ---------------------- This problem only considers a core, so we will only have a core system in this problem. If pumps, heat exchangers, spent fuel pools, etc were to be modeled, they would be here alongside the core. We also anchor the core at the global coordinates (0,0,0). If we wanted the core at some other elevation, we could adjust that here. .. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml :language: yaml :start-after: end-assemblies :end-before: end-systems The Grids definitions --------------------- Now we define the core map and the assembly pin maps using the generic grid input section. In the previous tutorial, we loaded the grid definition from an XML file. In this tutorial, we define the grid directly with an textual ``lattice map`` input section. The core map is particularly simple; it only has 9 assemblies. .. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml :language: yaml :start-after: end-systems :end-before: end-grid-core The pin map for the UO2 assembly is larger, but still relatively straightforward. Recall that on the ``uo2`` block above we said that we want to apply the grid with the name ``UO2 grid``, and wanted to fill any ``U`` position with the ``fuel`` component defined up there. Here's where we define that grid. .. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml :language: yaml :start-after: end-grid-core :end-before: end-grid-UO2 Similarly, we define the ``MOX grid`` as follows: .. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml :language: yaml :start-after: end-grid-UO2 :end-before: end-grid-MOX This grid is more complex in that it has different enrichment zones throughout the assembly. Nuclide Flags ------------- .. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml :language: yaml :start-after: end-grid-MOX :end-before: end-nucflags The default nuclide flags provided do not contain oxygen or hydrogen, but these elements are present in the ``SaturatedWater`` material. Thus, we list them in this input section, and specifically leave out the trace isotope, ``O18``. The settings file ================= Really, the only thing the settings file does in this case is point to the blueprints file. As we turn this case into an actual run, we may add various cross section and neutrons options to evaluate the benchmark. .. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-settings.yaml :language: yaml Defining fuel management ======================== By not defining any fuel management settings, we skip fuel management for this benchmark problem entirely. There! You have now created all the ARMI inputs, from scratch, needed to represent the C5G7 benchmark problem. Ok, so now what? ================ You can run the default ARMI app on these inputs, which will run a few cycles and make an output database:: $ python -m armi run c5g7-settings.yaml But since the baseline app doesn't do any real calculations, it won't have a lot in it. You have to add plugins to do calculations (see the `plugin directory <https://github.com/terrapower/armi-plugin-directory>`_). Of course, you can fiddle around with the reactor in memory. For example, in an ipython session, you can plot one of the assembly's pin locations. .. code-block:: python import matplotlib.pyplot as plt import armi from armi.reactor.flags import Flags armi.configure() o = armi.init(fName = "c5g7-settings.yaml") b = o.r.core.getFirstBlock(Flags.MOX) flags = [Flags.LOW, Flags.MEDIUM, Flags.HIGH] colors = ["green", "yellow", "red"] for f, c in zip(flags, colors): x, y=[], [] pin = b.getComponent(Flags.FUEL| f) for loc in pin.spatialLocator: xi, yi, zi = loc.getGlobalCoordinates() x.append(xi) y.append(yi) plt.scatter(x, y, color=c) plt.show() This should show a simple representation of the block. .. figure:: https://terrapower.github.io/armi/_static/c5g7-mox.png :figclass: align-center A representation of a C5G7 fuel assembly. Here are the full files used in this example: * :download:`Blueprints <../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml>` * :download:`Settings <../../armi/testing/reactors/c5g7/c5g7-settings.yaml>` ================================================ FILE: doc/user/_gallery/index.rst ================================================ .. This is just here as a placeholder to avoid 404s after we moved gallery up one. A variety of sphinx extensions may support redirects but none seem mature yet. .. include:: /gallery/index.rst ================================================ FILE: doc/user/accessingEntryPoints.rst ================================================ ********************** Accessing Entry Points ********************** Reports Entry Point =================== There are two ways to access the reports entry point in ARMI. The first way is through a yaml settings file. Here, the call is as follows:: (venv) $ armi report anl-afci-177.yaml It is also possible to call this on an h5 file:: (venv) $ armi report -h5db refTestBase.h5 .. note:: When working with a h5 file, -h5db must be included Once these are called, a report is generated and outputted as an html file in reportsOutputFiles. ================================================ FILE: doc/user/index.rst ================================================ ######### User Docs ######### Here you will learn how to use :term:`ARMI`. It will cover getting and installing ARMI and its prerequisites, making ARMI input files, running various kinds of ARMI runs, analyzing ARMI output files, etc. -------------- .. toctree:: :maxdepth: 2 :numbered: inputs outputs settings_report params_report manual_data_access spatial_block_data physics_coupling accessingEntryPoints radial_and_axial_expansion symmetry_handling ================================================ FILE: doc/user/inputs.rst ================================================ ****** Inputs ****** ARMI input files define the initial state of the reactor model and tell ARMI what kind of analysis should be performed on it. .. note:: We have a :ref:`walkthrough-inputs` tutorial for a quick overview of the inputs. There are several input files: Settings file Contains simulation parameters (like full power, cycle length, and which physics modules to activate) and all kind of modeling approximation settings (e.g. convergence criteria) Blueprints file Contains dimensions and composition of the components/blocks/assemblies in your reactor systems, from fuel pins to heat exchangers Fuel management file Describes how fuel moves around during a simulation Depending on the type of analysis, developers may create other input files for things like: control logic, ex-core models for transients and shielding, etc. YAML Files ========== ARMI's input files all use the `YAML <https://en.wikipedia.org/wiki/YAML>`_ format. This is a well-known file format, chosen because it is human-readable and easy to hand-write. That being said, there are two details about the YAML format that are important to know: Ordering YAML is not order specific; however, one of the techniques used to limit the size of the input includes using YAML anchors to reuse block and component definitions. YAML anchors (e.g. ``&block_name``) must be defined before their corresponding alias (e.g. ``*block_name``) used. Duplicate Keys YAML allows for duplicate keys. However, in ARMI, duplicates might be erroneous. Unfortunately, because the international YAML specification allows for duplicates, none of the YAML-parsing libraries see it as an error. You will have to hand-verify your inputs are correct. The Settings Input File ======================= The **settings** input file defines a series of key/value pairs the define various information about the system you are modeling as well as which modules to run and various modeling/approximation settings. For example, it includes: * The case title * The reactor power * The number of cycles to run * Which physics solvers to activate * Whether or not to perform a critical control search * Whether or not to do tight coupling iterations * What neutronics approximations specific to the chosen physics solver to apply * Environment settings (paths to external codes) * How many CPUs to use on a computer cluster This file is a YAML file that you can edit manually with a text editor or with the ARMI GUI. Here is an excerpt from a settings file: .. literalinclude:: ../../armi/tests/armiRun.yaml :language: yaml :lines: 1-14 A full listing of settings available in the framework may be found in the :ref:`Table of all global settings <settings-report>` . Many settings are provided by the ARMI Framework, and others are defined by various plugins. .. _armi-gui: The ARMI GUI ------------ The ARMI GUI may be used to manipulate many common settings (though the GUI can't change all of the settings). The GUI also enables the graphical manipulation of a reactor core map, and convenient automation of commands required to submit to a cluster. The GUI is a front-end to these files. You can choose to use the GUI or not, ARMI doesn't know or care -- it just reads these files and runs them. Note that one settings input file is required for each ARMI case, though many ARMI cases can refer to the same Blueprints, Core Map, and Fuel Management inputs. .. tip:: The ARMI GUI is not yet included in the open-source ARMI framework, but a simple grid editor GUI is, as described in :ref:`grids` The assembly clicker ^^^^^^^^^^^^^^^^^^^^ The assembly clicker (aka the :py:mod:`Grid Editor <armi.utils.gridEditor>`) allows users to define the 2-D layout of the assemblies defined in the :ref:`bp-input-file`. This can be done in hexagon or cartesian. The results of this arrangement get written to grids in blueprints. Click on the assembly palette on the right and click on the locations where you want to put the assembly. By default, the input assumes a 1/3 core model, but you can create a full core model through the menu. If you want one assembly type to fill all positions in a ring, right click it once it is placed and choose ``Make ring like this hex``. Once you submit the job or save the settings file (File -> Save), you will be prompted for a new name of the geometry file before the settings file is saved. The geometry setting in the main tab will also be updated. The ARMI Environment Tab ^^^^^^^^^^^^^^^^^^^^^^^^ The environment tab contains important settings about which version of ARMI you will run and with which version of Python, etc. Most important is the ``ARMI location`` setting. This points to the codebase that will run. If you want to run the released version of ARMI, ensure that it is set in this setting. If you want to run a developer version, then be sure to update this setting. Other settings on this tab may need to be updated depending on your computational environment. Talk to your system admins to determine which settings are best. Some special settings --------------------- A few settings warrant additional discussion. .. _detail-assems: Detail assemblies ^^^^^^^^^^^^^^^^^ Many plugins perform more detailed analysis on certain regions of the reactor. Since the analyses often take longer, ARMI has a feature, called *detail assemblies* to help. Different plugins may treat detail assemblies differently, so it's important to read the plugin documentation as well. For example, a depletion plugin may perform pin-level depletion and rotation analysis only on the detail assemblies. Or perhaps CFD thermal/hydraulics will be run on detail assemblies, while subchannel T/H is run on the others. Detail assemblies are specified by the user in a variety of ways, through the GUI or the settings system. .. warning:: The Detail Assemblies mechanism has begun to be too broad of a brush for serious multiphysics calculations with each plugin treating them differently. It is likely that this feature will be extended to be more flexible and less surprising in the future. Detail Assembly Locations BOL The ``detailAssemLocationsBOL`` setting is a list of assembly location strings (e.g. ``004-003`` for ring 4, position 3). Assemblies that are in these locations at the beginning-of-life will be activated as detail assemblies. Detail assembly numbers The ``detailAssemNums`` setting is a list of ``assemNum``\ s that can be inferred from a previous case and specified, regardless of when the assemblies enter the core. This is useful for activating detailed treatment of assemblies that enter the core at a later cycle. Detail all assemblies The ``detailAllAssems`` setting makes all assemblies in the problem detail assemblies .. _kinetics-settings: Kinetics settings ^^^^^^^^^^^^^^^^^ In reactor physics analyses it is standard practice to represent reactivity in either absolute units (i.e., dk/kk' or pcm) or in dollars or cents. To support this functionality, the framework supplies the ``beta`` and ``decayConstants`` settings to apply the delayed neutron fraction and precursor decay constants to the Core parameters during initialization. These settings come with a few caveats: 1. The ``beta`` setting supports two different meanings depending on the type that is provided. If a single value is given, then this setting is interpreted as the effective delayed neutron fraction for the system. If a list of values is provided, then this setting is interpreted as the group-wise (precursor family) delayed neutron fractions (useful for reactor kinetics simulations). 2. The ``decayConstants`` setting is used to define the precursor decay constants for each group. When set, it must be provided with a corresponding ``beta`` setting that has the same number of groups. For example, if six-group delayed neutron fractions are provided, the decay constants must also be provided in the same six-group structure. 3. If ``beta`` is interpreted as the effective delayed neutron fraction for the system, then the ``decayConstants`` setting will not be utilized. 4. If both the group-wise ``beta`` and ``decayConstants`` are provided and their number of groups are consistent, then the effective delayed neutron fraction for the system is calculated as the summation of the group-wise delayed neutron fractions. .. _cycle-history: Cycle history ^^^^^^^^^^^^^ For all cases, ``nCycles`` and ``power`` must be specified by the user. In the case that only a single state is to be examined (i.e. no burnup), the user need only additionally specify ``nCycles = 1``. In the case of burnup, the reactor cycle history may be specified using either the simple or detailed option. The simple cycle history consists of the following case settings: * ``power`` * ``nCycles`` (default = 1) * ``burnSteps`` (default = 4) * ``availabilityFactor(s)`` (default = 1.0) * ``cycleLength(s)`` (default = 365.2425) In addition, one may optionally use the ``powerFractions`` setting to change the reactor power between each cycle. With these settings, a user can define a history in which each cycle may vary in power, length, and uptime. The history is restricted, however, to each cycle having a constant power, to each cycle having the same number of burnup nodes, and to those burnup nodes being evenly spaced within each cycle. An example simple cycle history might look like .. code-block:: yaml settings: power: 1000000 nCycles: 3 burnSteps: 2 cycleLengths: [100, R2] powerFractions: [1.0, 0.5, 1.0] availabilityFactors: [0.9, 0.3, 0.93] Note the use of the special shorthand list notation, where repeated values in a list can be specified using an "R" followed by the number of times the value is to be repeated. The above scheme would represent 3 cycles of operation: 1. 100% power for 90 days, split into two segments of 45 days each, followed by 10 days shutdown (i.e. 90% capacity) 2. 50% power for 30 days, split into two segments of 15 days each, followed by 70 days shutdown (i.e. 15% capacity) 3. 100% power for 93 days, split into two segments of 46.5 days each, followed by 7 days shutdown (i.e. 93% capacity) In each cycle, criticality calculations will be performed at 3 nodes evenly-spaced through the uptime portion of the cycle (i.e. ``availabilityFactor``*``powerFraction``), without option for changing node spacing or frequency. This input format can be useful for quick scoping and certain types of real analyses, but clearly has its limitations. To overcome these limitations, the detailed cycle history, consisting of the ``cycles`` setting may be specified instead. For each cycle, an entry to the ``cycles`` list is made with the following optional fields: * ``name`` * ``power fractions`` * ``cumulative days``, ``step days``, or ``burn steps`` + ``cycle length`` * ``availability factor`` An example detailed cycle history employing all of these fields could look like .. code-block:: yaml settings: power: 1000000 nCycles: 4 cycles: - name: A step days: [1, 1, 98] power fractions: [0.1, 0.2, 1] availability factor: 0.1 - name: B cumulative days: [2, 72, 78, 86] power fractions: [0.2, 1.0, 0.95, 0.93] - name: C step days: [5, R5] power fractions: [1, R5] - cycle length: 100 burn steps: 2 availability factor: 0.9 Note that repeated values in a list may be again be entered using the shorthand notation for ``step days``, ``power fractions``, and ``availability factors`` (though not ``cumulative days`` because entries must be monotonically increasing). Such a scheme would define the following cycles: 1. A 2 day power ramp followed by full power operations for 98 days, with three nodes clustered during the ramp and another at the end of the cycle, followed by 900 days of shutdown 2. A 2 day power ramp followed by a prolonged period at full power and then a slight power reduction for the last 14 days in the cycle 3. Constant full-power operation for 30 days split into six even increments 4. Constant full-power operation for 90 days, split into two equal-length 45 day segments, followed by 10 days of downtime As can be seen, the detailed cycle history option provides much flexibility for simulating realistic operations, particularly power ramps or scenarios that call for unevenly spaced burnup nodes, such as xenon buildup in the early period of thermal reactor operations. .. note:: Although the detailed cycle history option allows for powers to change within each cycle, it should be noted that the power over each step is still considered to be constant. .. note:: The ``name`` field of the detailed cycle history is not yet used for anything, but this information will still be accessible on the operator during runtime. .. note:: Cycles without names will be given the name ``None`` .. warning:: When a detailed cycle history is combined with tight coupling, a subclass of :py:meth:`LatticePhysicsInterface.interactCoupled <armi.physics.neutronics.latticePhysics.latticePhysicsInterface.LatticePhysicsInterface.interactCoupled>` should be used. .. _restart-cases: Restart cases ^^^^^^^^^^^^^ Oftentimes the user is interested in re-examining just a specific set of time nodes from an existing run. In these cases, it is sometimes not necessary to rerun an entire reactor history, and one may instead use one of the following options: 1. Snapshot, where the reactor state is loaded from a database and just a single time node is run. 2. Restart, where the cycle history is loaded from a database and the calculation continues through the remaining specified time history. For either of these options, it is possible to alter the specific settings applied to the run by simply adjusting the case settings for the run. For instance, a run that originally had only neutronics may incorporate thermal hydraulics during a snapshot run by adding in the relevant TH settings. .. note:: For either of these options, it is advisable to first create a new case settings file with a name different than the one from which you will be restarting off of, so as to not overwrite those results. To run a snapshot, the following settings must be added to your case settings: * Set ``runType`` to ``Snapshots`` * Add a list of cycle/node pairs corresponding to the desired snapshots to ``dumpSnapshot`` formatted as ``'CCCNNN'`` * Set ``reloadDBName`` to the existing database file that you would like to load the reactor state from An example of a snapshot run input: .. code-block:: yaml runType: Snapshots reloadDBName: my-old-results.h5 dumpSnapshot: ['000000', '001002'] # 2 snapshots at BOL and cycle 1-node 2 To run a restart, the following settings must be added to your case settings: * Set ``runType`` to ``Standard`` * Set ``loadStyle`` to ``fromDB`` * Set ``startCycle`` and ``startNode`` to the cycle/node that you would like to continue the calculation from (inclusive). ``startNode`` may use negative indexing. * Set ``reloadDBName`` to the existing database file from which you would like to load the reactor history up to the restart point * If you would like to change the specified reactor history (see :ref:`restart-cases`), keep the history up to the restarting cycle/node unchanged, and just alter the history after that point. This means that the cycle history specified in your restart run should include all cycles/nodes up to the end of the simulation. For complicated restarts, it may be necessary to use the detailed ``cycles`` setting, even if the original case only used the simple history option. A few examples of restart cases: - Restarting a calculation at a specific cycle/node and continuing for the remainder of the originally-specified cycle history: .. code-block:: yaml # old settings settings: nCycles: 2 burnSteps: 2 cycleLengths: [100, 100] runType: Standard loadStyle: fromInput loadingFile: my-blueprints.yaml .. code-block:: yaml # restart settings settings: nCycles: 2 burnSteps: 2 cycleLengths: [100, 100] runType: Standard loadStyle: fromDB startCycle: 1 startNode: 0 reloadDBName: my-original-results.h5 - Add an additional cycle to the end of a case: .. code-block:: yaml # old settings settings: nCycles: 1 burnSteps: 2 cycleLengths: [100] runType: Standard loadStyle: fromInput loadingFile: my-blueprints.yaml .. code-block:: yaml # restart settings settings: nCycles: 2 burnSteps: 2 cycleLengths: [100, 100] runType: Standard loadStyle: fromDB startCycle: 0 startNode: -1 reloadDBName: my-original-results.h5 - Restart but cut the reactor history short: .. code-block:: yaml # old settings settings: nCycles: 3 burnSteps: 2 cycleLengths: [100, 100, 100] runType: Standard loadStyle: fromInput loadingFile: my-blueprints.yaml .. code-block:: yaml # restart settings settings: nCycles: 2 burnSteps: 2 cycleLengths: [100, 100] runType: Standard loadStyle: fromDB startCycle: 1 startNode: 0 reloadDBName: my-original-results.h5 - Restart with a different number of steps in the third cycle using the detailed ``cycles`` setting: .. code-block:: yaml # old settings settings: nCycles: 3 burnSteps: 2 cycleLengths: [100, 100, 100] runType: Standard loadStyle: fromInput loadingFile: my-blueprints.yaml .. code-block:: yaml # restart settings settings: nCycles: 3 cycles: - cycle length: 100 burn steps: 2 - cycle length: 100 burn steps: 2 - cycle length: 100 burn steps: 4 runType: Standard loadStyle: fromDB startCycle: 2 startNode: 0 reloadDBName: my-original-results.h5 .. note:: The ``skipCycles`` setting is related to skipping the lattice physics calculation specifically, it is not required to do a restart run. .. note:: The ISO binary cross section libraries are required to run cases that skip the lattice physics calculation (e.g. MC^2) .. note:: Restarting a calculation with an different version of ARMI than what was used to produce the restarting database may result in undefined behavior. Shuffling ^^^^^^^^^ .. note:: The ``explicitRepeatShuffles`` setting points to a ``*-SHUFFLES.txt`` file that records moves from a previous run for exact repetition. Users may also define a custom shuffle plan in a YAML file referenced by the ``shuffleSequenceFile`` setting. The YAML format organizes data by cycle in a ``sequence`` mapping. Keys are the cycle where the shuffling should occur during the beginning-of-cycle step. The first available cycle where shuffling will occur is cycle 1. Each cycle contains a list of high-level actions. An action is a mapping containing one of the keys ``cascade``, ``swap``, or ``extraRotations``. ``cascade`` chains describe a sequence of assembly displacements beginning with a fresh fuel assembly and ending with the final location's assembly being discharged. Optional ``fuelEnrichment`` lists specify the U235 weight fraction enrichment for each axial block in the fresh assembly, from bottom to top, including zeroes for non-fuel blocks. ``swap`` swaps the assemblies at two locations after all cascades are processed. ``extraRotations`` map final location labels to relative counterclockwise angles in degrees and are applied after all cascades, swaps, and any algorithmic rotation routines defined with the ``assemblyRotationAlgorithm`` setting. The angle is relative to the assembly's current orientation and whatever assembly ends up at the given location is rotated. Valid angles depend on the assembly's geometry. Extra rotations therefore: * apply to whatever assembly resides at the specified location once all cascades and swaps are complete; * rotate the assembly relative to its current orientation; and * execute after any algorithmic rotation routines. A cascade with no final destination defaults to deleting the assembly. Assemblies can be retained in the model by ending the cascade with ``SFP``. When ``SFP`` is specified, the discharged assembly is stored in the spent fuel pool even if the ``trackAssems`` setting is ``False``; ``Delete`` always removes the assembly from the model. Assemblies may also be re-inserted from the spent fuel pool by starting a cascade with ``SFP`` and providing a ``ringPosCycle`` to identify the spent fuel pool assembly returning to the core. ``ringPosCycle`` is a list conatining ring, pos, and cycle used to specify that the assembly which resided at (ring, pos) during the specified cycle number is to be re-introduced into the reactor in the associated shuffle cascade. No assembly type is required in this case. The cascade then proceeds as normal from the destination location. For example .. code:: yaml sequence: 1: - cascade: ["outer fuel", "009-045", "008-004", "SFP"] fuelEnrichment: [0, 0.12, 0.14, 0.15, 0] # wt fraction U235 by block - swap: ["009-045", "008-004"] - extraRotations: {"009-045": 60} 2: - cascade: ["outer fuel", "010-046", "009-045", "Delete"] fuelEnrichment: [0, 0.12, 0.14, 0.15, 0] A cascade that loads an assembly from the SFP may look like:: .. code:: yaml sequence: 1: - cascade: ["SFP", "005-003", "SFP"] ringPosCycle: [3, 5, 4] This example retrieves the assembly that resided at ring 3, position 5 during cycle 4 from the spent fuel pool and places it in location ``005-003`` (ring 5, position 3) while sending the previous occupant of ``005-003`` to the spent fuel pool. .. note:: Consider using yaml anchors ``&`` and aliases ``*`` to reduce repetition. For cycle 1 above, the actions execute in the following order: 1. The assembly originally at ``008-004`` is discharged to the spent fuel pool ``SFP``. 2. The assembly originally at ``009-045`` moves to ``008-004``. 3. A fresh ``outer fuel`` assembly is created with the specified axial enrichment profile and inserted at ``009-045``. 4. The fresh assembly and the moved assembly at ``008-004`` are swapped, leaving the fresh assembly at ``008-004`` and the moved assembly back at ``009-045``. 5. The assembly now at ``009-045`` is rotated an additional 60 degrees counterclockwise. .. note:: The restart.dat file is required to repeat the exact fuel management methods during a branch search. These can potentially modify the reactor state in ways that cannot be captures with the SHUFFLES.txt file. Zones ^^^^^ Zones are a collection of assemblies that share some similar characteristics. A zone might be those assemblies with a similar orrificing pattern or a some subset of fuel assemblies. Some codes may wish to study behavior by lumping the reactor into a few channels with bulk or aggregated properties. Users can collect assemblies in each of these channels through the :attr:`~armi.reactor.cores.Core.zones` attribute on the core. See also the :class:`~armi.reactor.zones.Zones` class. Users can define these zones with the ``zonesFile`` setting. It must point to YAML file that contains the high-level key ``customZonesMap`` containing a map of ``location: zone`` maps. .. code:: yaml customZonesMap: 001-001: primary control 002-001: fuel z0 003-001: fuel z0 004-001: fuel z1 004-002: secondary control The ``location`` keys are the ARMI ring-position assembly identifier. It is not required to have every assembly be inside a zone. But assemblies not listed will not be added to any zone, i.e., there is no default zone. This example would produce four zones: 1. ``primary control`` containing the center assembly at ``001-001``, 2. ``fuel z0`` containing two fuel assemblies: ``002-001`` and ``003-001``, 3. ``fuel z1`` containing one fuel assembly: ``004-001``, and 4. ``secondary control`` containing the assembly at ``004-002``. An alternative method is with the ``zoneDefinitions`` setting in the primary settings file. This contains a list of zone names and the assemblies that make up that zone. The following would create an identical zone structure as above. .. code:: yaml settings: zoneDefinitions: - "primary control: 001-001" - "fuel z0: 002-001, 003-001" - "fuel z1: 004-001" - "secondary control: 004-002" .. note:: These are list of strings, not additional maps. Wrapping in quotations is required to process the zone definitions. These zones will be populated according to the :meth:`~armi.reactor.cores.Core.buildManualZones` core method. .. _bp-input-file: The Blueprints Input File ========================= The **blueprints** input defines the dimensions of structures in the reactor, as well as their material makeup. In a typical case, pin dimensions, isotopic composition, control definitions, coolant type, etc. are defined here. The specifics of each assembly type are then overlaid, possibly including enrichment distributions and other material modifications. .. note:: See the :py:mod:`~armi.reactor.blueprints` module for implementation and more detail. This input file is formatted using `YAML <https://en.wikipedia.org/wiki/YAML>`_, which allows text-based change tracking for design control. ARMI does not have a blueprints-editing GUI yet, but may in the future. .. note:: You can point many ARMI runs to the same Blueprints input file using full paths in ``loadingFile`` setting. ARMI adds an ``!include`` YAML tag, which can be used to include the contents of an external YAML file in any part of a blueprints file. The can be useful for sharing core or assembly pin layouts amongst multiple cases. For example:: grids: core: !include path/to/core_grid.yaml Would have the effect of copy-pasting the contents of ``path/to/core_grid.yaml`` into the main blueprints file. The rules that ARMI uses to handle things like indentation of the included text are usually rather intuitive, but sometimes it can be useful to witness the behavior first-hand. The ``expand-bp`` command can be used to do a dry run for testing inputs with !includes. ARMI models are built hierarchically, first by defining components, and then by larger and larger collections of the levels of the reactor. Blueprint sections ------------------ The **blueprints** input file has several sections that corresponds to different levels of the reactor hierarchy. You will generally build inputs "bottoms up", first by defining elementary pieces (like pins) and then collecting them into the core and reactor. The ARMI data model is represented schematically below, and the blueprints are defined accordingly: .. figure:: /.static/armi_reactor_objects.png :align: center The primary data containers in ARMI :ref:`blocks <blocks-and-components>`: Defines :py:class:`~armi.reactor.components.component.Component` inputs for a :py:class:`~armi.reactor.blocks.Block`. :ref:`assemblies <assemblies>`: Defines vertical stacks of blocks used to define the axial profile of an :py:class:`~armi.reactor.assemblies.Assembly`. :ref:`systems <systems>`: Reactor-level structures like the core, the spent fuel pool, pumps, the head, etc. :ref:`grids <grids>`: Lattice definitions for the core map or pin maps :ref:`nuclide flags <nuclide-flags>`: Special setting: Specifies nuclide modeling options, whether a nuclide is being modeled for cross sections and/or depletion. For instance, it allows you to ignore nuclides above Curium for depletion speed. This also allows you to expand elements to a subset of nuclides. For example, you can choose to expand Oxygen to just Oxygen-16 and neglect Oxygen-17 and 18. :ref:`custom isotopics <custom-isotopics>`: Special setting: defines user-specified isotopic compositions. The core map input files can be graphically manipulated with the :py:mod:`Grid editor <armi.utils.gridEditor>`. .. _blocks-and-components: Blocks and Components --------------------- Blocks and components are defined together in the **blueprints** input. We will start with a component, and then define the whole ``blocks:`` input. The structure will be something like:: blocks: block name 1: component name 1: ... component name 2: block name 2: component name 1: ... component name 2: ... .. note:: You can also define components at the top level of the blueprints file under the ``components:`` top level section, but bringing anything defined there into the reactor model must currently be done programmatically. We are currently developing additional input capabilities to use these more flexibly. Associated with this is a ``component groups:`` section which can collect different free components with different volume fractions. This also is not fully implemented yet. Defining a Component ^^^^^^^^^^^^^^^^^^^^ The **Components** section defines the pin (if modeling a pin-type reactor) and assembly in-plane dimensions (axial dimensions are defined in the :ref:`assemblies` input) and the material makeups of each :py:mod:`Component <armi.reactor.components>`. :py:mod:`Blocks <armi.reactor.blocks>` are defined here as collections of geometric components that have specific temperatures, dimensions, material properties, and isotopic compositions. An component may be defined as:: fuel: shape: Circle material: UZr Tinput: 20.0 Thot: 450.0 mult: 169 id: 0.0 od: 0.757 Here we have provided the following information: Component name The component name (``fuel``) is specified at the top. Some physics kernels interpret names specially, so pay attention to any naming conventions. As a general rule, you can expect that people will be doing regex on your name, so you should not use any of these characters in your component names: ``. ^ $ * + ? { } [ ] \ | ( ) :``. shape The shape will be extruded to the length specified in the ``assemblies`` input section below. ARMI contains a variety of built-in simple shapes, and plugins can define their own design-specific/proprietary shapes. material The material links the component to a certain set of thermo-physical properties (e.g. temperature-dependent thermal expansion coefficients, density, thermal conductivity, etc., which are used in the various physics kernels. Natural isotopic composition is determined from this material specification as well (unless custom isotopics are supplied). The entry here should either be a class name of a valid material (``UZr``) or a ``module:className`` pair for specifying specific material (e.g. ``armi.materials.uZr:UZr``). Materials are handled through the :py:mod:`material library <armi.materials>`. |Tinput| The temperature (in C) that corresponds to the input dimensions given here. This facilitates automatic thermal expansion. |Thot| The temperature (in C) that the component dimensions will be thermal expanded to (using material properties based on the ``material`` input). To disable automatic thermal expansion, set |Tinput| and |Thot| both to the same value mult Multiplicity specifies how many duplicates of this component exist in this block. If you want 169 pins per assembly, this would be 169. This does not explicitly describe the location of the pins. Note that many fast-neutron systems only need volume fractions, not precise spatial locations, at least for pre-conceptual/simple studies. id Inner diameter (in cm). Each shape has different required input dimension keys. For annulus, set id to non-zero. od Outer diameter (in cm). .. _componentTypes: Component Types ^^^^^^^^^^^^^^^ Each component has a variety of dimensions to define the shape and composition. All dimensions are in cm. The following is a list of included component shapes and their dimension inputs. Again, additional/custom components with arbitrary dimensions may be provided by the user via plugins. .. exec:: from armi.reactor.components import ComponentType from dochelpers import createListTable rows = [['Component Name', 'Dimensions']] for c in ComponentType.TYPES.values(): rows.append([c.__name__, ', '.join(c.DIMENSION_NAMES)]) return createListTable(rows, widths=[25, 65], klass="longtable") When a ``DerivedShape`` is specified as the final component in a block, its area is inferred from the difference between the area of the block and the sum of the areas comprised by the other components in the block. This is useful for complex shapes like coolant surrounding a lattice of pins. .. _componentLinks: Component Links ^^^^^^^^^^^^^^^ Dimensions of a component may depend on the dimensions of a previously-defined component in the same block. For instance, the sodium bond between fuel and cladding. The format is simply ``<componentName>.<dimensionName>``. The dimension names are available in the table above. :: blocks: fuel: # block name fuel: # component name shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 isotopics: LABEL1 mult: 169.0 od: 0.757 bond: shape: Circle material: Sodium Tinput: 450.0 Thot: 450.0 mult: fuel.mult id: fuel.od # bond is connected to the outside of fuel od: clad.id # and the inside of the clad clad: shape: Circle material: HT9 Tinput: 25.0 Thot: 450.0 id: 0.905 mult: fuel.mult od: 1.045 Linked component dimensions (such as ``bond.id`` being linked to ``fuel.od``) remain linked as dimensions change. For example when the above defined fuel is expanded from cold temperature of 25 to the hot temperature of 600 the ``bond.id`` will still be whatever the ``fuel.od`` is. This can result in the displacement of material. For example, in the above case, if the fuel expansion removes more cross sectional area than the clad expansion creates, the amount of thermal bond will be reduced. This is physical since, in reality, the fluid would be displaced as dimensions change. Pin lattices ^^^^^^^^^^^^ Pin lattices may be explicitly defined in the block/component input in conjunction with the ``grids`` input section. A block may assigned a grid name, and then each component may be assigned one or more grid specifiers. For example, the following input section specifies that fuel pins will occupy all grid positions marked with a ``1`` and cladding components will occupy all grid positions marked with either a ``1`` or a ``2``. This situation may be desirable when some burnable poison pins use the same cladding as the fuel pins. :: blocks: fuel: &block_fuel grid name: fuelgrid fuel: flags: fuel test shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 mult: 169.0 od: 0.86602 latticeIDs: [1] clad: shape: Circle material: HT9 Tinput: 25.0 Thot: 470.0 id: 1.0 mult: fuel.mult od: 1.09 latticeIDs: [1,2] .. note:: A ``grid`` with the name ``fuelgrid`` must be defined as well in the grid input section. .. _naming-flags: Flags and naming ---------------- All objects in the ARMI Reactor Model possess a set of :py:class:`armi.reactor.flags.Flags`, which can be used to affect the way that the various physics kernels treat each object. Most flags are named after common reactor components, like ``FUEL``, or ``CLAD``, and are used to declare `what something is` in the reactor model. Various physics or other framework operations can then be parameterized to target specific types of things. For instance, the fuel handling code can infer that blocks with the ``GRID_PLATE`` flag should be considered stationary and not move them with the rest of the block stack in an assembly. Historically, flags have also been used to describe directly `what should be done` with an object in the reactor model. For instance, an object with the ``DEPLETABLE`` flag set will participate in isotopic depletion analysis, whereas objects without the ``DEPLETION`` flag set will not. This has led to a lot of confusion, as the meaning of various flags is buried deep within the code, and can conflict from place to place. We are trying to align around a `what something is` interpretation, and bind those to specific behaviors with settings. For more details, see :py:mod:`armi.reactor.flags`. The set of specific flags that should be set on an object can be specified in one of two ways for each object defined in the blueprints. The most precise way is to use include a ``flags:`` entry for the object blueprint in question. In the example above, the ``fuel`` component sets the ``FUEL`` and ``TEST`` flags. When specifying flags in this way, the value specified must be completely and unambiguously convertible into valid Flags. If it cannot, it will lead to an error when constructing the object. If ``flags:`` is empty, or not specified, then the name of the object blueprint will be used to infer as many flags as possible. In the above example, the ``clad`` component will get the ``CLAD`` flag from its name. .. note:: Additional flags may be specified from plugins, but this should be done with care; see the :py:mod:`armi.reactor.flags` module and :py:meth:`armi.plugins.ArmiPlugin.defineFlags` plugin hook for more details. .. _assemblies: Assemblies ---------- Once components and blocks are defined, Assemblies can be created as extruded stacks of blocks from bottom to top. The assemblies use YAML anchors to refer to the blocks defined in the previous section. .. note:: We aren't happy with the use of anchors to refer to blocks, and plan to change it (back) to just using the block names directly. However, the use of anchors for input to be applied to multiple assemblies (e.g. heights) is quite nice. A complete definition of an inner-core assembly may be seen below:: assemblies: heights: &standard_heights [10.05, 20.10, 30.15, 20.10, 20.10, 30.15] axial mesh points: &standard_axial_mesh_points [1, 2, 3, 4, 5, 6] inner core: specifier: IC blocks: &inner_core_blocks [*block_shield, *block_fuel, *block_fuel, *block_fuel, *block_fuel, *block_plenum] height: *standard_heights axial mesh points: *standard_axial_mesh_points hotChannelFactors: TWRPclad material modifications: U235_wt_frac: ['', '', 0.001, 0.002, 0.03, ''] ZR_wt_frac: ['', '', 0.1, 0.1, 0.1, 0.1] nozzleType: Inner xs types: [A, B, C, D, E, F] .. note:: While component dimensions are entered as cold dimensions, axial heights may be entered as either cold or hot dimensions. In older versions of ARMI, it was required to enter heights in the hot dimension (this behavior is preserved by setting `inputHeightsConsideredHot: True`). However, with the :py:class:`axial expansion changer <armi.reactor.converters.axialExpansionChanger.AxialExpansionChanger>`, heights may be entered at cold temperatures (`inputHeightsConsideredHot: False`). Each Assembly will then be expanded to its hot dimensions upon construction. For many cases, a shared height and axial mesh point definition is sufficient. These can be included globally as shown above and linked with anchors, or specified explicitly. specifier The Geometry Assembly Specifier, which is a two-letter ID, such as "IC" (for inner core), "SH" (for shield), etc. correspond with labels in the geometry input file that is created by the GUI hex dragger. xs types The **cross-section type** is usually a single capital letter that identifies which cross section (XS) set will be applied to the block. Each cross section set must be defined for at least one block with fissile fuel. When the lattice physics code executes in ARMI, it determines the representative blocks from each cross section type and burnup group and runs it to create the cross section set for all blocks of the same type and in the same burnup group. Generally, it is best to set blocks that have much different compositions to have separate cross section types. The tradeoff is that the more XS types you define, the more CPU time the case will take to run. Representing xsType by a single capital letter (A-Z) or number (0-9) limits users to 36 groups. So ARMI will allow 2-letter xsType designations if and only if the ``buGroups`` setting has length 1 (i.e. no burnup groups are defined). This is useful for high-fidelity XS modeling. ARMI is able to use lower-case letters (a-z) for an additional 26 cross section groups, but this should only be done when working on a case-sensitive file system. On a case-insensitive file system (Windows, and some MacOS systems) this could cause unpredictable errors. axial mesh points Blocks will be broken up into this many uniform mesh points in the deterministic neutronics solvers (e.g. DIF3D). This allows you to define large blocks that have multiple flux points within them. You have to keep the neutronic mesh somewhat uniform in order to maintain numerical stability of the solvers. It is important to note that the axial mesh must be uniform throughout the core for many physics kernels, so be sure all block interfaces are consistent among all assemblies in the core. Blocks deplete and get most state variables on the block mesh defined by the height specification. Provisions for multiple meshes for different physics are being planned. hotChannelFactors A label to define which set of hot channel factors (HCFs) get applied to this block in the thermal/hydraulic calculations. There are various valid sets included with ARMI. nozzleType This is a string that identifies what type of inlet nozzle an assembly has. This parameter could be used in an implementation of a thermal-hydraulics solver with flow orificing to apply different pressure loss coefficients and/or flow rates to different types of assemblies. material modifications These are a variety of modifications that are made to the materials in blocks in these locations. It may include the fuel enrichment (mass frac.), poison enrichment (mass frac.), zirconium mass frac, and any additional options required to fully define the material loaded in the component. The material definitions in the material library define valid modifications for them. .. exec:: from armi.materials import Material from armi.utils.tabulate import tabulate data = [] for m in Material.__subclasses__(): numArgs = m.applyInputParams.__code__.co_argcount if numArgs > 1: modNames = m.applyInputParams.__code__.co_varnames[1:numArgs] data.append((m.__name__, ", ".join(modNames))) for subM in m.__subclasses__(): num = subM.applyInputParams.__code__.co_argcount if num > 1: mods = subM.applyInputParams.__code__.co_varnames[1:num] if numArgs > 1: mods += modNames data.append((subM.__name__, ", ".join(mods))) d = {} for k, v in data: if k not in d: d[k] = v else: d[k] = d[k].split(",") + v.split(",") d[k] = sorted(set([vv.strip() for vv in d[k]])) d[k] = ", ".join(d[k]) data = [(k, v) for k, v in d.items()] data.sort(key=lambda t: t[0]) return tabulate( headers=("Material Name", "Available Modifications"), data=data, tableFmt="rst", ) The class 1/class 2 modifications in fuel materials are used to identify mixtures of custom isotopics labels for input scenarios where a varying blend of a high-reactivity feed with a low-reactivity feed. This is often useful for closed fuel cycles. For example, you can define any fuel material as being made of LWR-derived TRU plus depleted uranium at various weight fractions. Note that this input style only adjusts the heavy metal. To enable the application of different values for the same material modification type on different components within a block, the user may specify material modifications by component. This is useful, for instance, when two pins within an assembly made of the same base material have different fuel enrichments. This is done using the ``by component`` attribute to the material modifications as in:: blocks: fuel: &block_fuel fuel1: &component_fuel_fuel1 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 fuel2: &component_fuel_fuel2 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 assemblies: fuel a: &assembly_a specifier: IC blocks: [*block_fuel] height: [1.0] axial mesh points: [1] xs types: [A] material modifications: by component: fuel1: U235_wt_frac: [0.20] fuel2: Zr_wt_frac: [0.02] U235_wt_frac: [0.30] Material modifications specified on the ``material modifications`` level are referred to as "block default" values and apply to all components on the block not associated with a by-component value. This example would apply an enrichment of 20% to the ``fuel1`` component and an enrichment of 30% to all other components in the block that accept the ``U235_wt_frac`` material modification. All by-component material modifications override any block default material modifications of the same type. In addition, any by-component entries omitted for a given axial block will revert to the block default (or material class default, if no block default value is provided and a material class default exists) value:: blocks: fuel: &block_fuel fuel1: &component_fuel_fuel1 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 fuel2: &component_fuel_fuel2 shape: Hexagon material: UZr Tinput: 600.0 Thot: 600.0 ip: 0.0 mult: 1 op: 10.0 assemblies: fuel a: &assembly_a specifier: IC blocks: [*block_fuel, *block_fuel] height: [0.5, 0.5] axial mesh points: [1, 1] xs types: [A, A] material modifications: by component: fuel1: U235_wt_frac: [0.20, ''] # <-- the U235_wt_frac for the second block will go to the block default value fuel2: # the U235_wt_frac for fuel2 component in both axial blocks will go to the block default values Zr_wt_frac: [0.02, ''] # <-- the Zr_wt_frac for the second block will go to the material class default because there is no block default value U235_wt_frac: [0.30, 0.30] The first block listed is defined at the bottom of the core. This is typically a grid plate or some other structure. .. _systems: Systems ------- Once assemblies are defined they can be grouped together into the Core, the spent fuel pool (SFP), etc. A complete reactor structure with a core and a SFP may be seen below:: systems: core: grid name: core origin: x: 0.0 y: 10.1 z: 1.1 Spent Fuel Pool: type: sfp grid name: sfp origin: x: 1000.0 y: 12.1 z: 1.1 The ``origin`` defines the point of origin in global space in units of cm. This allows you to define the relative position of the various structures. The ``grid name`` inputs are string mappings to the grid definitions described below. Plugin Behavior ^^^^^^^^^^^^^^^ The :meth:`armi.plugins.ArmiPlugin.defineSystemBuilders` method can be provided by plugins to control how ARMI converts the ``systems`` section into ``Composite``\ s to be modeled. By default, the ``type`` field is used to determine what object is created. The default :class:`armi.reactor.ReactorPlugin` provides the following mapping: ================== ====================================================== ``type`` Value Builds ================== ====================================================== ``core`` (default) :class:`~armi.reactor.reactors.Core` ``excore`` :class:`~armi.reactor.excoreStructure.ExcoreStructure` ``sfp`` :class:`~armi.reactor.spentFuelPool.SpentFuelPool` ================== ====================================================== Plugins are able to provide a superset (e.g., ``core``, ``excore``, and ``sfp``) and new mappings of values to builders. .. _grids: Grids ----- Grids are described inside a blueprint file using ``lattice map`` or ``grid contents`` fields to define arrangements in Hex, Cartesian, or R-Z-Theta. The optional ``lattice pitch`` entry allows you to specify spacing between objects that is different from tight packing. This input is required in mixed geometry cases, for example if Hexagonal assemblies are to be loaded into a Cartesian arrangement. The contents of a grid may defined using one of the following: ``lattice map:`` A ASCII map representing the grid contents ``grid contents:`` a direct YAML representation of the contents Example grid definitions are shown below:: grids: control: geom: hex symmetry: full lattice map: | - - - - - - - - - 1 1 1 1 1 1 1 1 1 4 - - - - - - - - 1 1 1 1 1 1 1 1 1 1 1 - - - - - - - 1 8 1 1 1 1 1 1 1 1 1 1 - - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 7 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 6 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 sfp: symmetry: full geom: cartesian lattice pitch: x: 50.0 y: 50.0 grid contents: [0,0]: MC [1,0]: MC [0,1]: MC [1,1]: MC .. tip:: We have gone through some effort to allow both pin and core grid definitions to share this input and it may improve in the future. You may set up some kinds of grids (e.g. 1/3 and full core hex or Cartesian core loadings) using our interactive graphical grid editor described more in :py:mod:`armi.utils.gridEditor`. .. figure:: /.static/gridEditor.png :align: center An example of the Grid Editor being used on a FFTF input file .. _custom-isotopics: Custom Isotopics ---------------- In some cases (such as benchmarking a previous reactor), the default mass fractions from the material library are not what you want to model. In these cases, you may override the isotopic composition provided by the material library in this section. There are three ways to specify the isotopics: ``mass fractions`` (sum to 1.0), ``number densities`` (in atoms/barn-cm), or ``number fractions`` (sum to 1.0). For example:: custom isotopics: LABEL1: input format: mass fractions density: 7.79213903298633 C: 0.000664847887388523 CR: 0.182466356404319 CU: 0.00323253628006144 FE: 0.705266053783901 MN: 0.0171714161260001 MO: 0.00233843050046998 NI: 0.0831976890804466 SI: 0.00566266993741259 See the :py:mod:`List of Nuclides <armi.nucDirectory.nuclideBases>` for all valid entries. Note that ARMI will expand elemental nuclides to their natural isotopics in most cases (to correspond with the nuclear data library). The (mass) ``density`` input is invalid when specifying ``number densities``; the code will present an error message. Material density may be specified in custom isotopics either explicitly in a ``mass fractions`` input format (shown above) or implicitly with ``number densities``. This is fairly straightforward for the ``Custom`` material, as it has no baseline density. Density may also be specified for components using materials which have entries in the materials library. Users should be aware of the following interactions when specifying a custom density for components using a library material: 1. The library material density will not be changed. Only the component(s) with the custom isotopics entry will have the density modification. 2. Density specified by custom isotopics will override all other density modifications in the component construction phase (e.g. ``TD_frac`` entries). 3. Only the component density is changed, not other material properties are altered to account for the change in composition/density. 4. Density can only be specified using custom isotopics for non- ``Custom`` materials that have some initial density. Don't try to make ``Void`` have mass! Densities specified using ``Custom Isotopics`` are applied in component construction, and should be specified at the input temperature for the component. Note that when overriding the density of a library material, all other properties of that material (e.g. expansion coefficients) will continue to be used as if the component consisted of the library material. In other words, ARMI will still think the component is made out of the original material! Advanced topics --------------- Overlapping shapes ^^^^^^^^^^^^^^^^^^ Solids of different compositions in contact with each other present complications during thermal expansion. The ARMI Framework does not perform calculations to see exactly how such scenarios will behave mechanically; it instead focuses on conserving mass. To do this, users should input a zero-dimension component linking the 2 solid components made of the special ``Void`` material. This gap will allow the 2 components to thermally expand independently while keeping track of the overlapping area. It is important to keep track of the areas when a DerivedShape is included in a block design because ARMI calculates the derived area by taking the full area of the block and subtracting the total area of the non-DerivedShapes. If area between thermally-expanding solids was not accounted for, this would non-physically add or subtract coolant into these gaps. To model overlapping components heterogeneously, it is suggested to use a :py:mod:`block converter <armi.reactor.converters.blockConverters>`. Additionally, it should be noted that assigning ``mult: fuel.mult`` will be ever-so-slightly slower than just defining the actual value. This is because ARMI needs to find the sibling component and get the siblings ``mult``. If you are concerned about performance at that level and don't expect ``mult`` to change much in your case, you can replace the constant link (i.e. it does not change over time) with a YAML anchor and alias. Component area modifications ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In some scenarios, it is desired to have one component's area be subtracted or added to another. For example, the area of the skids in a skid duct design needs to be subtracted from the interstitial coolant. The mechanism to handle this involves adding a parameter to the component to be modified after all the required ones in the form of ``<componentName>.add`` or ``<componentName>.sub``. The component to be added or subtracted must be defined before the component that is being modified. This allows fairly complicated configurations to be modeled without explicitly defining new components. :: blocks: rect with 100 holes: holes: shape: Circle material: Sodium Tinput: 600 Thot: 600 mult: 100 od: 0.05 square of steel: shape: Square material: Iron Tinput: 25.0 Thot: 600.0 widthOuter: 3.0 modArea: holes.sub # "holes" is the name of the other component Putting it all together to make a Block ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Here is a complete fuel block definition:: blocks: fuel: &block_fuel bond: shape: Circle material: Sodium Tinput: 450.0 Thot: 450.0 id: fuel.od mult: fuel.mult od: cladding.id clad: shape: Circle material: HT9 Tinput: 25.0 Thot: 450.0 id: 0.905 mult: fuel.mult od: 1.045 coolant: shape: DerivedShape material: Sodium Tinput: 450.0 Thot: 450.0 duct: shape: Hexagon material: HT9 Tinput: 25.0 Thot: 450.0 ip: 15.2 mult: 1.0 op: 16.2 fuel: shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 isotopics: LABEL1 mult: 169.0 od: 0.757 intercoolant: shape: Hexagon material: Sodium Tinput: 450.0 Thot: 450.0 ip: duct.op mult: 1.0 op: 16.79 wire: shape: Helix material: HT9 Tinput: 25.0 Thot: 450.0 axialPitch: 30.0 helixDiameter: 1.145 id: 0.0 mult: fuel.mult od: 0.1 Making blocks with unshaped components ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Sometimes you will want to make a homogeneous block, which is a mixture of multiple materials, and will not want to define an exact shape for each of the components in the block. In this case unshaped components can be used, but ARMI still requires there to be at least one component with shape to define the pitch of the block. In the example below, the block is a rectangular pitch so one of the components is defined as a rectangle to indicate this. Its outer dimensions determine the pitch of the block. The inner dimensions can be whatever is necessary to preserve the area fraction. Note that rectangular blocks have pitch defined by two numbers, since they may not be a square. In this case the rectangle component is half the area fraction and the other two components are one quarter:: blocks: fuel: clad: shape: Rectangle material: HT9 Tinput: 25.0 Thot: 25.0 lengthOuter: 3.0 lengthInner: 2.4 widthOuter: 2.0 widthInner: 1.25 mult:1.0 fuel: shape: UnshapedComponent material: UZr Tinput: 25.0 Thot: 25.0 area = 1.5 coolant: shape: UnshapedComponent material: Sodium Tinput: 25.0 Thot: 25.0 area = 1.5 .. warning:: When using this method avoid thermal expansion by setting TInput=THot, or your pitch component dimensions might change, thus changing your pitch. Alternatively, a void (empty) component with zero area can be added for defining the pitch, and then all three components can be defined as unshaped. The downside, is there are now four components, but only three that have actual area and composition:: blocks: fuel: clad: shape: UnshapedComponent material: HT9 Tinput: 25.0 Thot: 25.0 area: 3.0 fuel: shape: UnshapedComponent material: UZr Tinput: 25.0 Thot: 25.0 area = 1.5 coolant: shape: UnshapedComponent material: Sodium Tinput: 25.0 Thot: 25.0 area = 1.5 PitchDefiningComponent: shape: Rectangle material: Void lengthOuter: 3.0 lengthInner: 3.0 widthOuter: 2.0 widthInner: 2.0 mult:1.0 This can similarly be done for hex geometry and and a hexagon with Outer Pitch (``op``). .. warning:: The rest of the input described below are scheduled to be moved into the settings input file, since their nature is that of a setting. .. _nuclide-flags: Nuclide Flags ------------- The ``nuclide flags`` setting allows the user to choose which nuclides they would like to consider in the problem, and whether or not each nuclide should transmute and decay. For example, sometimes you may not want to deplete trace elements in structural materials, but in other analysis you might. If the nuclide should deplete, it must have ``burn: true``. If it is to be included in the problem at all, it must be have ``xs: true`` All nuclides that will be produced via transmutation/decay must also have ``burn: true``, so if you add Thorium, make sure to add all other actinides in its chain. You can use the ``expandTo:`` section to list a subset of natural nuclides to expand into. If you leave this section out, a default set of nuclide flags will be applied to your problem. Remember this section when you start changing which nuclides are modeled and which ones deplete.:: # this is a YAML comment nuclide flags: AL: {burn: false, xs: true} AM241: {burn: true, xs: true} C: &carbon_flags {burn: false, xs: true} # an anchor to "carbon_flags" CA: *carbon_flags CL: *carbon_flags CO: *carbon_flags # the alias back to "carbon_flags" CR: *carbon_flags CU: *carbon_flags FE: *carbon_flags H: {burn: false, xs: true} MN: {burn: false, xs: true} MO: {burn: false, xs: true} N: {burn: false, xs: true} NA: {burn: false, xs: true} NI: {burn: false, xs: true} O: {burn: false, xs: true, expandTo: ["O16", "O17"]} P: {burn: false, xs: true} PU238: {burn: true, xs: true} PU239: {burn: true, xs: true} PU240: {burn: true, xs: true} PU241: {burn: true, xs: true} PU242: {burn: true, xs: true} S: {burn: false, xs: true} SI: {burn: false, xs: true} U234: {burn: false, xs: true} U235: {burn: true, xs: true} U236: {burn: true, xs: true} U238: {burn: true, xs: true} The code will crash if materials used in :ref:`blocks-and-components` contain nuclides not defined in ``nuclide flags``. A failure can also occur if the burn chain is missing a nuclide. .. tip:: We plan to upgrade the default behavior of this to inherit from all defined materials in a problem to reduce the user-input burden. .. These following are rst substitutions. They're useful for keeping the plaintext readable while getting subscripted text. .. |Tinput| replace:: T\ :sub:`input` .. |Thot| replace:: T\ :sub:`hot` .. _fuel-management-input: Fuel Management Input ===================== Fuel management in ARMI is specified through custom Python scripts or YAML files that often reside in the working directory of a run (but can be anywhere if you use full paths). During a normal run, ARMI checks for several fuel management settings: ``shuffleLogic`` The path to the Python source file or dotted import path to a module that contains the user's custom fuel management logic ``shuffleSequenceFile`` The path to a yaml file containing the user's custom fuel management logic. ``fuelHandlerName`` The name of a FuelHandler class that ARMI will look for in the Fuel Management Input module or file specified by ``shuffleLogic``. Since it's input, it's the user's responsibility to design and place that object in that module or file. .. note:: We consider the limited syntax needed to express fuel management in Python code itself to be sufficiently expressive and simple for non-programmers to actually use. Also, fuel management options are available through YAML input files. The ARMI Operator will call its fuel handler's ``outage`` method before each cycle (and, if requested, during branch search calculations). The :py:meth:`~armi.physics.fuelCycle.fuelHandlers.FuelHandler.outage` method will perform bookkeeping operations, and eventually call the user-defined ``chooseSwaps`` method (located in Fuel Management Input). ``chooseSwaps`` will generally contain calls to :py:meth:`~armi.physics.fuelCycle.fuelHandlers.FuelHandler.findAssembly`, :py:meth:`~armi.physics.fuelCycle.fuelHandlers.FuelHandler.swapAssemblies` , :py:meth:`~armi.physics.fuelCycle.fuelHandlers.FuelHandler.swapCascade`, and :py:meth:`~armi.physics.fuelCycle.fuelHandlers.FuelHandler.dischargeSwap`, which are the primary fuel management operations and can be found in the fuel management module. Also found in the user-defined Fuel Management Input module is a ``getFactors`` method, which is used to control which shuffling routines get called and at which time. .. note:: See the :py:mod:`fuelHandlers module <armi.physics.fuelCycle.fuelHandlers>` for more details. Fuel Management Operations -------------------------- In the ARMI, the assemblies can be moved as units around the reactor with swapAssemblies, dischargeSwap, and swapCascade of a ``FuelHandler`` interface. swapAssemblies ^^^^^^^^^^^^^^ swapAssemblies is the simplest fuel management operation. Given two assembly objects, this method will switch their locations. :: self.swapAssemblies(a1,a2) dischargeSwap ^^^^^^^^^^^^^ A discharge swap is a simple operation that puts a new assembly into the reactor while discharging an outgoing one. :: self.dischargeSwap(newIncoming,oldOutgoing) This operation keeps track of the outgoing assembly in a SpentFuelPool object that the Reactor object has access to so you can see how much of what you discharged. swapCascade ^^^^^^^^^^^ SwapCascade is a more powerful swapping function that can swap a list of assemblies in a "daisy-chain" type of operation. These are useful for doing the main overtone shuffling operations such as convergent shuffling and/or convergent-divergent shuffling. If we load up the list of assemblies, the first one will be put in the last one's position, and all others will shift accordingly. As an example, consider assemblies 1 through 5 in core positions A through E.:: self.swapCascade([a1,a2,a3,a4,a5]) This table shows the positions of the assemblies before and after the swap cascade. ======== ============================ =========================== Assembly Position Before Swap Cascade Position After Swap Cascade ======== ============================ =========================== 1 A E 2 B A 3 C B 4 D C 5 E D ======== ============================ =========================== Arbitrarily complex cascades can thusly be assembled by choosing the order of the assemblies passed into swapCascade. Choosing Assemblies to Move --------------------------- The methods described in the previous section require known assemblies to shuffle. Choosing these assemblies is the essence of fuel shuffling design. The single method used for these purposes is the FuelHandler's ``findAssembly`` method. This method is very general purpose, and ranks in the top 3 most important methods of the ARMI altogether. To use it, just say:: a = self.findAssembly(param='maxPercentBu',compareTo=20) This will return the assembly in the reactor that has a maximum burnup closest to 20%. Other inputs to findAssembly are summarized in the API docs of :py:meth:`~armi.physics.fuelCycle.fuelHandlers.FuelHandler.findAssembly`. Fuel Management Examples ------------------------ Convergent-Divergent ^^^^^^^^^^^^^^^^^^^^ Convergent-divergent shuffling is when fresh assemblies march in from the outside until they approach the jump ring, at which point they jump to the center and diverge until they reach the jump ring again, where they now jump to the outer periphery of the core, or become discharged. If the jump ring is 6, the order of target rings is:: [6, 5, 4, 3, 2, 1, 6, 7, 8, 9, 10, 11, 12, 13] In this case, assemblies converge from ring 13 to 12, to 11, to 10, ..., to 6, and then jump to 1 and diverge until they get back to 6. In a discharging equilibrium case, the highest burned assembly in the jumpRing should get discharged and the lowest should jump by calling a dischargeSwap on cascade[0] and a fresh feed after this cascade is run. The convergent rings in this case are 7 through 13 and the divergent ones are 1 through 5 are the divergent ones. Fuel Management Tips -------------------- Some mistakes are common. Follow these tips. * Always make sure your assembly-level types in the settings file are up to date with the grids in your bluepints file. Otherwise you'll be moving feeds when you want to move igniters, or something. * Use the exclusions list! If you move a cascade and then the next cascade tries to run, it will choose your newly-moved assemblies if they fit your criteria in ``findAssemblies``. This leads to very confusing results. Therefore, once you move assemblies, you should default to adding them to the exclusions list. * Print cascades during debugging. After you've built a cascade to swap, print it out and check the locations and types of each assembly in it. Is it what you want? * Watch ``typeNum`` in the database. You can get good intuition about what is getting moved by viewing this parameter. Running a branch search ----------------------- ARMI can perform a branch search where a number of fuel management operations are performed in parallel and the preferred one is chosen and proceeded with. The key to any branch search is writing a fuel handler that can interpret **fuel management factors**, defined as keyed values between 0 and 1. As an example, a fuel handler may be written to interpret two factors, ``numDischarges`` and ``chargeEnrich``. One method in the fuel handler would then take the value of ``factors['numDischarges']`` and multiply it by the maximum number of discharges (often set by another user setting) and then discharge this many assemblies. Similarly, another method would take the ``factors['chargeEnrich']`` value (between 0 and 1) and multiply it by the maximum allowable enrichment (again, usually controlled by a user setting) to determine which enrichment should be used to fabricate new assemblies. Given a fuel handler that can thusly interpret factors between 0 and 1, the concept of branch searches is simple. They simply build uniformly distributed lists between 0 and 1 across however many CPUs are available and cases on all of them, passing one of each of the factors to each CPU in parallel. When the cases finish, the branch search determines the optimal result and selects the corresponding value of the factor to proceed. Branch searches are controlled by custom ``getFactorList`` methods specified in the ``shuffleLogic`` input modules or files. This method should return two things: * A ``defaultFactors``; a dictionary with user-defined keys and values between 0 and 1 for each key. These factors will be passed to the ``chooseSwaps`` method, which is typically overridden by the user in custom fuel handling code. The fuel handling code should interpret the values and move the fuel according to what is sent. * A ``factorSearchFlags`` list, which lists the keys to be branch searched. The search will optimize the first key first, and then do a second pass on the second key, holding the optimal first value constant, and so on. Such a method may look like this:: def getFactorList(cycle,cs=None): # init default shuffling factors defaultFactors = {'chargeEnrich':0,'numDischarges':1} factorSearchFlags=[] # init factors to run branch searches on # determine when to activate various factors / searches if cycle not in [0,5,6]: # shuffling happens before neutronics so skip the first cycle. defaultFactors['chargeEnrich']=1 else: defaultFactors['numDischarges']=0 factorSearchFlags = ['chargeEnrich'] return defaultFactors,factorSearchFlags Once a proper ``getFactorList`` method exists and a fuel handler object exists that can interpret the factors, activate a branch search during a regular run by selecting the **Branch Search** option on the GUI. The **best** result from the branch search is determined by comparing the *keff* values with the ``targetK`` setting, which is available for setting in the GUI. The branch with *keff* closest to the setting, while still being above 1.0 is chosen. ================================================ FILE: doc/user/manual_data_access.rst ================================================ ********************** Accessing Data in ARMI ********************** A basic user only needs to know the CLI or GUI and can perform basic analysis and design with just that. But a power user will be more interested in programmatically building and manipulating inputs and gathering detailed information out of ARMI results. Let's now go into a bit more detail for the power user. Settings and State Variables ============================ The following links contain large tables describing the various global settings and state parameters in use across ARMI. * :ref:`settings-report` * :ref:`reactor-parameters-report` * :ref:`core-parameters-report` * :ref:`component-parameters-report` * :ref:`assembly-parameters-report` * :ref:`block-parameters-report` Accessing Some Interesting Info =============================== Often times, you may be interested in the geometric dimensions of various blocks. These are stored on the :py:mod:`components <armi.reactor.components>`, and may be accessed as follows:: # This may need to be ``o.r``. b = r.core.getFirstBlock(Flags.FUEL) fuel = b.getComponent(Flags.FUEL) # fuel outer diameter in cm od = fuel.getDimension('od',cold=True) odHot = fuel.getDimension('od') # hot dimension # hot inner diameter at a specific temperature id600 = fuel.getDimension('id',Tc=600) clad = b.getComponent(Flags.CLAD) # number of cladding pins (multiplicity) numClad = clad.getDimension('mult') cladMat = clad.getProperties() # get the cladding material # get the thermal conductivity of the clad material at 500C k = cladMat.thermalConductivity(Tc=500) The dimensions available depend on the shape of the component. Hexagons have `op` and `ip` for outer and inner pitch. Other options are seen at the source at :py:mod:`armi.reactor.components`. ================================================ FILE: doc/user/outputs.rst ================================================ ******* Outputs ******* ARMI output files are described in this section. Many outputs may be generated during an ARMI run. They fall into various categories: Framework outputs Files like the **stdout** and the **database** are produced in nearly all runs. Interface outputs Certain plugins/interfaces produce intermediate output files. Physics kernel outputs If ARMI executes an external physics kernel during a run, its associated output files are often available in the working directory. These files are typically read by ARMI during the run, and relevant data is transferred onto the reactor model (and ends up in the ARMI **database**). If the user desires to retain all of the inputs and outputs associated with the physics kernel runs for a given time step, this can be specified with the ``savePhysicsIO`` setting. For any time step specified in the list under ``savePhysicsIO``, a ``cXnY/`` folder will be created, and ARMI will store all inputs and outputs associated with each physics kernel executed at this time step in a folder inside of ``cXnY/``. The format for specifying a state point is 00X00Y for cycle X, step Y. Together the output fully define the analyzed ARMI case. The Standard Output =================== The Standard Output (or **stdout**) is a running log of things an ARMI run prints out as it executes a case. It shows what happened during a run, which inputs were used, which warnings were issued, and in some cases, what the summary results are. Here is an excerpt:: =========== Completed BOL Event =========== =========== Triggering BOC - cycle 0 Event =========== =========== 01 - main BOC - cycle 0 =========== [impt] Beginning of Cycle 0 =========== 02 - fissionProducts BOC - cycle 0 =========== =========== 03 - xsGroups BOC - cycle 0 =========== [xtra] Generating representative blocks for XS [xtra] Cross section group manager summary In a standard run, the various interfaces will loop through and print out messages according to the `verbosity` setting. In multi-processing runs, the **stdout** shows messages from the primary node first and then shows information from all other nodes below (with verbosity set by the `branchVerbosity` setting). Sometimes a user will want to set the verbosity of just one module (.py file) in the code higher than the rest of ARMI, to do so they can set up a custom logger by placing this line at the top of the file:: runLog = logging.getLogger(__name__) These single-module (file) loggers can be controlled using a the `moduleVerbosity` setting. All of these logger verbosities can be controlled from the settings file, for example:: branchVerbosity: debug moduleVerbosity: armi.reactor.reactors: info verbosity: extra If there is an error, a useful message may be printed in the **stdout**, and a full traceback will be provided in the associated **stderr** file. Some Linux users tend to use the **tail** command to monitor the progress of an ARMI run:: tail -f myRun.stdout This provides live information on the progress. .. _database-file: The Database File ================= The **database** file is a self-contained, binary representation of the state of the ARMI composite model state during a simulation. The database contains full, plain-text of the input files that were used to create the case. And for each time node, the values of all composite parameters as well as layout information to help fully reconstruct the reactor data model. Loading Reactor State --------------------- Among other things, the database file can be used to recover an ARMI reactor model from any of the time nodes that it contains. This can be useful for performing restart runs, or for doing custom post-processing analysis. To load a reactor state, you will need to open the database file into a ``Database`` object. From there, you can call the :py:meth:`armi.bookkeeping.db.Database.load()` method to get a recovered ``Reactor`` object. For instance, given a database file called ``myDatabase.h5``, we could load the reactor state at cycle 5, time node 2 with the following:: from armi.bookkeeping.db import databaseFactory db = databaseFactory("myDatabase.h5", "r") # The underlying file is not left open unless necessary. Use the # handy context manager to temporarily open the file and # interact with the data: with db: r = db.load(5, 2) .. note:: The cycles are 0-indexed, but the time nodes, in practice, are not. Therefore, cycle 5 above is actually the 6th cycle in the simulation. For cycle 5 with two time nodes, there will be three time steps saved to the database: c5n0 (BOC), c5n1 (time node 1), and c5n2 (time node 2). Extracting Reactor History -------------------------- Not only can the database reproduce reactor state for a given time node, it can also extract a history of specific parameters for specific objects through the :py:meth:`armi.bookkeeping.db.Database.getHistory()` and :py:meth:`armi.bookkeeping.db.Database.getHistories()` methods. For example, given the reactor object, ``r`` from the example above, we could get the entire history of an assembly's ring, position and areal power density with the following:: from armi.reactor.flags import Flags # grab a fuel assembly from the reactor a = r.core.getAssemblies(Flags.FUEL) # Don't forget to open the database! with db: aHist = db.getHistory(a, ["ring", "pos", "arealPd"]) Extracting Settings and Blueprints ---------------------------------- As well as the reactor states for each time node, the database file also stores the input files (blueprints and settings files) used to run the case that generated it. These can be recovered using the `extract-inputs` ARMI entry point. Use `python -m armi extract-inputs --help` for more information. File format ----------- The database file format is built on top of the HDF5 format. There are many tools available for viewing, editing, and scripting HDF5 files. The ARMI database uses the `h5py` package for interacting with the underlying data and metadata. At a high level there are 3 things to know about HDF5: 1. **Groups** - Groups are named collections of datasets. Think of a group as a filesystem folder. 2. **Datasets** - Datasets are named values. If a group is a folder, a dataset is a file. Values are strongly typed (think `int`, `float`, `double`, but also whether it is big endian, little endian so that the file is portable across different systems). Values can be scalar, vector, or N-dimensional arrays. 3. **Attributes** - Attributes can exist on a dataset or a group to provide supplemental information about the group or dataset. We use attributes to indicate the ARMI database version that was used to create the database, the time the case was executed, and whether or not the case completed successfully. We also sometimes apply attributes to datasets to indicate if any special formatting or layout was used to store Parameter values or the like. There are many other features of HDF5, but this is enough information to get started. Database Structure ------------------ The broad strokes of the database structure is outlined below. .. list-table:: Database structure :header-rows: 1 :class: longtable * - Name - Type - Description * - ``/`` - H5Group - root node * - ``/inputs/`` - H5Group - A group that contains all inputs * - ``/inputs/settings`` - string - A representation of the settings file that was used to create the case * - ``/inputs/blueprints`` - string - A representation of the blueprints file that used to create the case * - - - * - ``/c{CC}n{NN}/`` - H5Group - A group that contains the ARMI model for a specific cycle {CC} and time node {NN}. For the following, there may be a bit of pseudo-code to explain the origin of data. ``comp`` is any old component within the ARMI model hierarchy. Also, it is important to note that all components are flattened and then grouped by type. * - ``/c{CC}n{NN}EOL/`` - H5Group - A special time node, like the one above, where {CC} is the last cycle and {NN} is the last node. If this exists, it is meant to represent the EOL, which is perhaps a few days after the end of the last cycle, where fuel is decaying non-operationally. * - ``/c{CC}n{NN}/layout/`` - H5Group - A group that contains a description of the ARMI model within this timenode * - ``/c{CC}n{NN}/layout/name`` - list of strings - ``comp.name`` * - ``/c{CC}n{NN}/layout/type`` - list of strings - ``type(comp).__name__`` -- The name of the component type. We can use this to construct a new object when reading. You could also use it to filter down to data that you care about using hdf5 directly. * - ``/c{CC}n{NN}/layout/serialNum`` - list of int - ``comp.p.serialNum`` -- Serial number of the component. This number is unique within a component type. * - ``/c{CC}n{NN}/layout/location`` - list of 3-tuple floats - ``tuple(comp.spatialLocator) or (0, 0, 0)`` -- Gives the location indices for a given component. Note these are relative, so there are duplicates. * - ``/c{CC}n{NN}/layout/locationType`` - list of strings - ``type(comp.spatialLocator).__name__ or "None"`` -- The type name of the location. * - ``/c{CC}n{NN}/layout/indexInData`` - list of int - The components are grouped by ``type(comp).__name__``. The integers are a mapping between the component and its index in the ``/c{CC}n{NN}/{COMP_TYPE}/`` group. * - ``/c{CC}n{NN}/layout/numChildren`` - list of int - ``len(comp)`` -- The number of direct child composites this composite has. Notably, this is not a summation of all the children. * - ``/c{CC}n{NN}/layout/temperatures`` - list of 2-tuple floats - ``(comp.InputTemperatureInC, comp.TemperatureInC) or (-900, -900)`` -- Temperatures in for Component objects. * - ``/c{CC}n{NN}/layout/material`` - list of string - ``type(comp.material).__name__ or ""`` -- Name of the associated material for an Component. * - - - * - ``/c{CC}n{NN}/{COMP_TYPE}/`` - H5Group - ``{COMP_TYPE}`` corresponds to the ``type(comp).__name__``. * - ``/c{CC}n{NN}/{COMP_TYPE}/{PARAMETER}`` - list of inferred data - Values for all parameters for a specific component type, in the order defined by the ``/c{CC}n{NN}/layout/``. See the next table to see a description of the attributes. Python supports a rich and dynamic type system, which is sometimes difficult to represent with the HDF5 format. Namely, HDF5 only supports dense, homogeneous N-dimensional collections of data in any given dataset. Some parameter values do not fit into this mold. Examples of tricky cases are: * Representing ``None`` values interspersed among a bunch of ``floats`` * Jagged arrays, where each "row" of a matrix has a different number of entries (or higher-dimensional analogs) * Dictionaries None of these have a direct representation in HDF5. Therefore, the parameter values on the composite model sometimes need to be manipulated to fit into the HDF5 format, while still being able to faithfully reconstruct the original data. To accomplish this, we use HDF5 dataset attributes to indicate when some manipulation is necessary. Writing such special data to the HDF5 file and reading it back again is accomplished with the :py:func:`armi.bookkeeping.db.database.packSpecialData` and :py:func:`armi.bookkeeping.db.database.unpackSpecialData`. Refer to their implementations and documentation for more details. Loading Reactor State as Read-Only ---------------------------------- Another option you have, though it will probably come up less often, is to lead a ``Reactor`` object from a database file in read-only mode. Mostly what this does is set all the parameters loaded into the reactor data model to a read-only mode. This can be useful to ensure that downstream analysts do not modify the data they are reading. It looks much like the usual database load:: from armi.bookkeeping.db import databaseFactory db = databaseFactory("myDatabase.h5", "r") with db: r = db.loadReadOnly(5, 2) Another common use for ``Database.loadReadOnly()`` is when you want to build a tool for analysts that can open an ARMI database file without the ``App`` that created it. Solving such a problem generically is hard-or-impossible, but assuming you probably know a lot about the ``App`` that created an ARMI output file, this is usually doable in practice. To do so, you will want to look at the :py:class:`PassiveDBLoadPlugin <armi.bookkeeping.db.passiveDBLoadPlugin.PassiveDBLoadPlugin>`. This tool allows you to passively load an output database even if there are parameters or blueprint sections that are unknown. ================================================ FILE: doc/user/params_report.rst ================================================ .. _params-report: ================= Parameters Report ================= .. exec:: from armi.reactor import assemblies from armi.reactor import assemblyParameters from armi.reactor import blockParameters from armi.reactor import blocks from armi.reactor import reactorParameters from armi.reactor import reactors from armi.reactor.components import Component from armi.reactor.components.componentParameters import getComponentParameterDefinitions from dochelpers import generateParamTable s = generateParamTable(reactors.Reactor, reactorParameters.defineReactorParameters()) numR = s.count(" * - ") - 1 s = generateParamTable(reactors.Core, reactorParameters.defineCoreParameters()) numC = s.count(" * - ") - 1 s = generateParamTable(assemblies.Assembly, assemblyParameters.getAssemblyParameterDefinitions()) numA = s.count(" * - ") - 1 s = generateParamTable(blocks.Block, blockParameters.getBlockParameterDefinitions()) numB = s.count(" * - ") - 1 s = generateParamTable(Component, getComponentParameterDefinitions()) numComp = s.count(" * - ") - 1 numParams = numR + numC + numA + numB + numComp txt = f"This document lists all {numParams} Parameters in ARMI:\n\n" txt += f"* {numR} Reactor Parameters.\n" txt += f"* {numC} Core Parameters.\n" txt += f"* {numA} Assembly Parameters.\n" txt += f"* {numB} Block Parameters.\n" txt += f"* {numComp} Component Parameters.\n\n" return txt Users of the ARMI Framework are not required to use all of these parameters. And the system is easy to extend to add new Parameters for your use-cases. These are simply the default Parameters that come with ARMI. See :py:mod:`armi.reactor.parameters` for use. .. _reactor-parameters-report: ****************** Reactor Parameters ****************** This is a list of all of the Reactor Parameters that are provided by the ARMI Framework. .. exec:: from armi.reactor import reactors from armi.reactor import reactorParameters from dochelpers import generateParamTable return generateParamTable(reactors.Reactor, reactorParameters.defineReactorParameters()) .. _core-parameters-report: *************** Core Parameters *************** This is a list of all of the Core Parameters that are provided by the ARMI Framework. .. exec:: from armi.reactor import reactors from armi.reactor import reactorParameters from dochelpers import generateParamTable return generateParamTable(reactors.Core, reactorParameters.defineCoreParameters()) .. _assembly-parameters-report: ******************* Assembly Parameters ******************* This is a list of all of the Assembly Parameters that are provided by the ARMI Framework. .. exec:: from armi.reactor import assemblies from armi.reactor import assemblyParameters from dochelpers import generateParamTable return generateParamTable(assemblies.Assembly, assemblyParameters.getAssemblyParameterDefinitions()) .. _block-parameters-report: **************** Block Parameters **************** This is a list of all of the Block Parameters that are provided by the ARMI Framework. .. exec:: from armi.reactor import blocks from armi.reactor import blockParameters from dochelpers import generateParamTable return generateParamTable(blocks.Block, blockParameters.getBlockParameterDefinitions()) .. _component-parameters-report: ******************** Component Parameters ******************** This is a list of all of the Component Parameters that are provided by the ARMI Framework. .. exec:: from armi.reactor.components import Component from armi.reactor.components.componentParameters import getComponentParameterDefinitions from dochelpers import generateParamTable return generateParamTable(Component, getComponentParameterDefinitions()) ================================================ FILE: doc/user/physics_coupling.rst ================================================ **************** Physics Coupling **************** Loose Coupling ============== ARMI supports loose and tight coupling. Loose coupling is interpreted as one-way coupling between physics for a single time node. For example, a power distribution in cycle 0 node 0 is used to calculate a temperature distribution in cycle 0 node 0. This temperature is then used in cycle 0 node 1 to compute new cross sections and a new power distribution. This process repeats itself for the lifetime of the simulation. .. graphviz:: /.static/looseCouplingIllustration.dot Loose coupling is enabled by default in ARMI simulations. Tight Coupling ============== Tight coupling is interpreted as two-way communication between physics within a given time node. Revisiting our previous example, enabling tight coupling results in the temperature distribution being used to generate updated cross sections (new temperatures induce changes such as Doppler broadening feedback) and ultimately an updated power distribution. This process is repeated iteratively until a numerical convergence criteria is met. .. graphviz:: /.static/tightCouplingIllustration.dot The following settings are involved with enabling tight coupling in ARMI: 1. ``tightCoupling``: When ``True``, tight coupling is enabled. 2. ``tightCouplingSettings``: Used to specify which parameters and convergence criteria will be used to measure the convergence of a given interface. .. code-block:: yaml tightCoupling: true tightCouplingSettings: globalFlux: parameter: power convergence: 1.0e-4 thermalHydraulics: parameter: THmassFlowRate convergence: 1.0e-2 The ``tightCouplingSettings`` settings interact with the interfaces available in ARMI (or an ARMI app). The interface headers (i.e., "globalFlux" and "thermalHydraulics") must match the value prescribed for :py:attr:`Interface.purpose <armi.interfaces.interface.purpose>`. The option, ``parameter``, can be a registered parameter. The ``convergence`` option is expected to be any float value. In the current implementation, different interfaces may have different developer intended restrictions. For example, the global flux interface currently only allows the eigenvalue (i.e. :math:`k_{\text{eff}}`) or block-wise power to be valid ``parameter`` values. .. warning:: The inherent limitations of the above interface-based tight coupling settings have been documented and a new and improved user-interface is currently being developed. In the global flux interface, the following norms are used to compute the convergence of :math:`k_{\text{eff}}` and block-wise power. Eigenvalue ---------- The convergence of the eigenvalue is measured through an L2-norm. .. math:: \epsilon = \| k_\text{eff} \|_2 = \left( \left( k_\text{eff,old} - k_\text{eff,new} \right)^2 \right) ^ \frac{1}{2} Block-wise Power ---------------- The block-wise power can be used as a convergence mechanism to avoid the integral effects of :math:`k_{\text{eff}}` (i.e., over and under predictions cancelling each other out) and in turn, can have a different convergence rate. To measure the convergence of the power distribution with the prescribed tolerances (e.g., 1e-4), the power is scaled in the following manner (otherwise the calculation struggles to converge). For an assembly, :math:`a`, we compute the total power of the assembly, .. math:: a_{\text{power},i} = \sum_{j}b_{\text{power},(i,j)}, where :math:`i` is the :math:`i^{\text{th}}` assembly and :math:`j` is the :math:`j^{\text{th}}` block within assembly, :math:`i`. With the assembly power, we scale the block power and obtain an array of scaled block powers for a given assembly, :math:`\mathbf{b}_{i}`, .. math:: \mathbf{b}_{i} = \left\lbrace \frac{b_{\text{power},(i,j)}}{a_{\text{power},i}} \right\rbrace, \quad \forall j \in a_i. We can now calculate a convergence parameter for each assembly, .. math:: \epsilon_i &= \| \textbf{b}_{i,\text{old}} - \textbf{b}_{i,\text{new}} \|_2 \\ &=\sqrt{\sum_{i}\left( \textbf{b}_{i,\text{old}} - \textbf{b}_{i,\text{new}} \right)^2}. These assembly-wise convergence parameters are then stored in an array of convergence values, .. math:: \xi = \left\lbrace \epsilon_i \right\rbrace,\quad \forall i \in \text{Core}. The total convergence of the power distribution is finally measured through the infinity norm (i.e, the max) of :math:`\xi`, .. math:: \epsilon = \| \xi \|_{\inf} = \max \xi. The Global Flux Interface ------------------------- The :py:class:`Global Flux Interface <armi.physics.neutronics.globalFlux.globalFluxInterface.GlobalFluxInterface>` class will attempt to set its own ``TightCoupler`` based on ``keff``. To see the specifics, see: :py:meth:`_setTightCouplingDefaults <armi.physics.neutronics.globalFlux.globalFluxInterface.GlobalFluxInterface._setTightCouplingDefaults>`. If you want to change the tight coupling performance of the ``GlobalFluxInterface``, it would be easiest to just subclass the interface and over-write the `_setTightCouplingDefaults` method. ================================================ FILE: doc/user/radial_and_axial_expansion.rst ================================================ ****************************************** Radial and Axial Expansion and Contraction ****************************************** ARMI natively supports linear expansion in both the radial and axial dimensions for pin-type reactors. These expansion types function independently of one another and each have their own set of underlying assumptions and use-cases. Radial expansion happens by default but there are several settings that control axial expansion: * ``inputHeightsConsideredHot`` - Indicates whether blueprints heights have already been thermally expanded. If ``False``, ARMI will expand components at BOL consistent with provided temperatures. * ``assemFlagsToSkipAxialExpansion`` - Assemblies with a flag in this list will not be axially expanded. * ``detailedAxialExpansion`` - Allow each assembly to expand independently. This will result in a non-uniform mesh. If they happen, ARMI runs radial and axial expansion when objects are created from blueprints. That is, when the reactor is created from blueprints at BOL, these calculations are performed. But also at BOC if new assemblies are added to the core, then expansion will happen again when the assembly object is created from blueprints. Thermal Expansion ================= ARMI treats thermal expansion as a linear phenomena using a standard linear expansion relationship, .. math:: \frac{\Delta L}{L_0} = \alpha(T) \Delta T, :label: linearExp where, :math:`\Delta L` and :math:`\Delta T` are the change in length and temperature from the reference state, respectively, and :math:`\alpha` is the thermal expansion coefficient relative to :math:`T_0`. Expanding and rearranging Equation :eq:`linearExp`, we can obtain an expression for the new length, :math:`L_1`, .. math:: L_1 = L_0\left[1 + \alpha(T_1)\left(T_1 - T_0\right) \right]. :label: newLength Given Equation :eq:`linearExp`, we can create expressions for the change in length between our "hot" temperature (Equation :eq:`hotExp`) .. math:: \begin{aligned} \frac{L_h - L_0}{L_0} &= \alpha(T_h)\left(T_h - T_0\right),\\ \frac{L_h}{L_0} &= 1 + \alpha(T_h)\left(T_h - T_0\right). \end{aligned} :label: hotExp and "non-reference" temperature, :math:`T_c` (Equation :eq:`nonRefExp`), .. math:: \begin{aligned} \frac{L_c - L_0}{L_0} &= \alpha(T_c)\left(T_c - T_0\right),\\ \frac{L_c}{L_0} &= 1 + \alpha(T_c)\left(T_c - T_0\right). \end{aligned} :label: nonRefExp These are used within ARMI to enable thermal expansion and contraction with a temperature not equal to the reference temperature, :math:`T_0`. By taking the difference between Equation :eq:`hotExp` and :eq:`nonRefExp`, we can obtain an expression relating the change in length, :math:`L_h - L_c`, to the reference length, :math:`L_0`, .. math:: \begin{aligned} \frac{L_h - L_0}{L_0} - \frac{L_c - L_0}{L_0} &= \frac{L_h}{L_0} - 1 - \frac{L_c}{L_0} + 1, \\ &= \frac{L_h - L_c}{L_0}. \end{aligned} :label: diffHotNonRef Using Equations :eq:`diffHotNonRef` and :eq:`nonRefExp`, we can obtain an expression for the change in length, :math:`L_h - L_c`, relative to the non-reference temperature, .. math:: \frac{L_h - L_c}{L_c} &= \frac{L_h - L_c}{L_0} \frac{L_0}{L_c}\\ &= \left( \frac{L_h}{L_0} - \frac{L_c}{L_0} \right) \left( 1 + \alpha(T_c)\left(T_c - T_0\right) \right)^{-1}. :label: expNewRelative Using Equations :eq:`hotExp` and :eq:`nonRefExp`, we can simplify Equation :eq:`expNewRelative` to find, .. math:: \frac{L_h - L_c}{L_c} = \frac{\alpha(T_h) \left(T_h - T_0\right) - \alpha(T_c)\left(T_c - T_0\right)}{1 + \alpha(T_c)\left(T_c - T_0\right)}. :label: linearExpansionFactor Equation :eq:`linearExpansionFactor` is the expression used by ARMI in :py:meth:`linearExpansionFactor <armi.materials.material.Material.linearExpansionFactor>`. .. note:: :py:meth:`linearExpansionPercent <armi.materials.material.Material.linearExpansionPercent>` returns :math:`\frac{L - L_0}{L_0}` in %. Given that thermal expansion (or contraction) of solid components must conserve mass throughout the system, the density of the component is adjusted as a function of temperature based on Equation :eq:`hot_density_general`, assuming isotropic thermal expansion. .. math:: \rho(T_h) = \frac{\rho(T_0)}{\left(1 + \frac{\Delta L}{L_0}\right)^3} = \frac{\rho(T_0)}{\left(1 + \alpha_m (T_h) (T_h - T_0)\right)^3} :label: hot_density_general where, :math:`\rho(T_h)` is the component density in :math:`\frac{kg}{m^3}` at the given temperature :math:`T_h`, :math:`\rho(T_0)` is the component density in :math:`\frac{kg}{m^3}` at the reference temperature :math:`T_0`, and :math:`\alpha(T_h)` is the mean coefficient of thermal expansion at the specified temperature :math:`T_h` relative to the material's reference temperature. An update to mass densities is applied for all solid components given the assumption of isotropic thermal expansion. Here we assume the masses of non-solid components (e.g., fluids or gases) are allowed to change within the reactor core model based on changes to solid volume changes. For instance, if solids change volume due to temperature changes, there is a change in the amount of volume left for fluid components. Implementation Discussion and Example of Radial and Axial Thermal Expansion =========================================================================== This section provides an example thermal expansion calculation for a simple cylindrical component from a reference temperature of 20°C to 1000°C with example material properties and dimensions as shown in the table below. .. list-table:: Example Component Properties for Thermal Expansion :widths: 50 50 :header-rows: 1 :name: thermal_exp_comp_properties * - Property - Example * - Material - Steel * - Radius - 0.25 cm * - Height - 5.0 cm * - Reference Temperature - 20°C * - Density - 1.0 g/cc * - Mean Coefficient Thermal Expansion - :math:`2\times 10^{-6}` 1/°C The figure below illustrates the thermal expansion phenomena in both the radial and axial directions. .. figure:: /.static/axial_expansion_simple.png Illustration of radial and axial thermal expansion for a cylinder in ARMI. Thermal expansion calculations are performed for each component in the ARMI reactor data model as component temperatures change. Since components are constrained within blocks, the height of components are determined by the height of their parent block. Equations :eq:`hot_radius` through :eq:`hot_density` illustrate how the radius, height, volume, density, and mass are updated for a Component during thermal expansion, respectively. .. list-table:: Example Calculation of Radial and Axial Thermal Expansion for a Cylindrical Component :widths: 33 33 33 :header-rows: 1 * - Component Temperature - 20°C - 1000°C * - Radius - 0.25 cm - 0.251 cm * - Height - 5.0 cm - 5.01 cm * - Volume - 0.982 cc - 0.988 cc * - Density - 1.0 g/cc - 0.994 g/cc * - Mass - 0.982 g - 0.982 g .. math:: :name: hot_radius r(T_h) = 0.25 \left(1 + \left(2\times 10^{-6}(1000 − 20)\right)\right) = 0.251 cm .. math:: :name: hot_height h(T_h) = 5.0 \left(1 + \left(2\times 10^{-6}(1000 − 20)\right)\right) = 5.01 cm .. math:: :name: hot_volume V(T_h) = \pi (0.251)^2 5.01 = 0.988 cm^3 .. math:: :name: hot_density \rho(T_h) = \frac{1.0}{\left(1 + 2\times 10^{-6}(1000 − 20)\right)^3} = 0.994 \frac{g}{cc} .. math:: :name: hot_mass m(T_h) = 0.994 \times 0.988 = 0.982 g Radial thermal expansion occurs for each Component in a given Block. Mechanical contact between components is not accounted for, meaning that the radial expansion of one Component is independent from the radial expansion of the others. Solid components may be radially linked to gas/fluid components (i.e., sodium bond, helium) and the gas/fluid area is allowed to radially expand and contract with changes in Component temperature. It is worth noting that void components are allowed to have negative areas in cases where the expansion of two solid components overlap each other. Axial thermal expansion occurs for each solid Component with a given Block. Axial mechanical contact between components is accounted for as the expansion or contraction of a Component affects the positions of components in mechanical contact in axially neighboring blocks. The logic for determining Component-to-Component mechanical contact is described in Section :ref:`axialLink`. When two or more solid components exist within the Block, the change in Block height is driven by an axial expansion "target Component" (e.g., fuel). The logic for determining the axial expansion "target Component" is provided in Section :ref:`axialExpTargetComp`. Figures :ref:`components_for_exp_illustration` and :ref:`axial_exp_illustration` provide illustrations of the axial thermal expansion process for an example core assembly. In this example there are four main block types defined: Shield, Fuel, Plenum, and Dummy. .. note:: The "dummy" Block is necessary to maintain a consistent core-wide assembly height as this is a common necessity for physics solvers utilizing discrete-ordinates discretization methods. .. figure:: /.static/axial_expansion_components.png :name: components_for_exp_illustration Illustration of Components for Axial Thermal Expansion Process .. figure:: /.static/axial_expansion_process.png :name: axial_exp_illustration Simplified Illustration of Axial Thermal Expansion Process for a Core Assembly The target components for each Block type are provided in the following table: .. list-table:: Example Assignment of Target Components within Blocks :widths: 50 50 :header-rows: 1 * - Block - Target Component * - Shield - Shield * - Fuel - Fuel * - Plenum - Clad * - Dummy - N/A The axial thermal expansion algorithm is applied in four steps: #. Expand the axial dimensions of each solid Component within each block independently. #. Align blocks axially such that axially-linked components have consistent alignments (e.g., overlapping radial dimensions). #. Assign the Block lower and upper elevations to account for the thermal expansion of blocks below each Block. * Create new mesh lines (i.e., Block bounds) that track the target component. #. Adjust the "dummy" Block located at the top of the assembly to maintain a consistent core-wide assembly height before and after axial thermal expansion is applied. .. _axialLink: Component-to-Component Axial Linking ------------------------------------ For components to be in mechanical contact, and therefore axially linked, they need to meet the following criteria: #. The same Component class. E.g., both are :py:class:`basicShapes.Circle`. #. Both solid materials. If those are met, then geometric overlap may be checked if the following are met: #. The components are not :py:class:`components.UnshapedComponent` #. The components have the same multiplicity #. Or, they share the same grid indices, as specified by a Block :py:class:`<grid> grids.locations.MultiIndexLocation`. Finally, geometric overlap is established if the biggest inner bounding diameter of the components is less than the smallest outer bounding diameter of the components. Limitations ^^^^^^^^^^^ A current limitation of the axial linking logic is that multiple Components may not be linked to a single Component. E.g., consider the following: #. A solid cylinder with an outer diameter of 1.0 cm. #. Above, a solid cylinder wrapped with an annular cylinder (separate ARMI components) each with the following dimensions: * Solid cylinder with an outer diameter of 0.5 cm. * Annulus with inner diameter of 0.5 cm and outer diameter of 0.75 cm. For the above example, in reality, the annulus wrapped pin (two separate ARMI components) would be affected by any changes in height from the solid cylinder. However, this set up is not allowed by the current implementation and will raise a ``RuntimeError``. A second limitation of the component linking implementation involves the Block grid based approach. When Block grids are used to specify a pin lattice, the Block-grid should be used throughout the Assembly definition; i.e., a mixture of the Block-grid and multiplicity assignment should not be used (and will likely produce unexpected results and may even fail). For example, in the following partial blueprint definition, in reality, each shield pin should be in mechanical contact with the fuel pins. However, since there is a mixture of mulitiplicity and Block-grid approaches, they are assumed to be not-linked. In order to ensure properly linking, ``block_fuel_axial_shield`` needs to be redefined with the Block-grid based approach. .. code-block:: yaml axial shield: &block_fuel_axial_shield shield: shape: Circle material: HT9 Tinput: 25.0 Thot: 600.0 id: 0.0 mult: 169.0 od: 0.86602 fuel multiPin: &block_fuel_multiPin grid name: twoPin fuel 1: &component_fuelmultiPin shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 od: 0.86602 latticeIDs: [1] fuel 2: <<: *component_fuelmultiPin latticeIDs: [2] The following incorporates the fix for ``block_fuel_axial_shield`` and illustrates another potentially undesirable situation where unexpected results or runtime failure may occur. Here a plenum block is added above the fuel and while it does utilize a Block-grid, ``clad`` will not be axially linked to either the ``fuel 1`` or ``fuel 2`` components below it. This is because the ``clad`` and ``fuel*`` components have different grids via their ``grid.spatialLocator`` values. As in the previous example, similar unexpected behavior would also occur if a multiplicity-based definition were used for ``clad``. .. code-block:: yaml axial shield multiPin: &block_fuel_multiPin_axial_shield grid name: twoPin shield 1: &component_shield_shield1 shape: Circle material: HT9 Tinput: 25.0 Thot: 600.0 id: 0.0 od: 0.8 latticeIDs: [1] shield 2: <<: *component_shield_shield1 latticeIDs: [2] fuel multiPin: &block_fuel_multiPin grid name: twoPin fuel 1: &component_fuelmultiPin shape: Circle material: UZr Tinput: 25.0 Thot: 600.0 id: 0.0 od: 0.8 latticeIDs: [1] fuel 2: <<: *component_fuelmultiPin latticeIDs: [2] plenum 2pin: &block_plenum_multiPin grid name: twoPin clad: shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: 0.9 od: 1.0 latticeIDs: [1,2] To resolve this potential issue, ``block_plenum_multiPin`` should be replaced with the following definition. See the ``multi pin fuel`` assembly definition within ``armi/tests/detailedAxialExpansion/refSmallReactorBase.yaml`` for a complete example. .. code-block:: yaml plenum 2pin: &block_plenum_multiPin grid name: twoPin clad 1: &component_plenummultiPin_clad1 shape: Circle material: Void Tinput: 25.0 Thot: 600.0 id: 0.9 od: 1.0 latticeIDs: [1] clad 2: <<: *component_plenummultiPin_clad1 latticeIDs: [2] .. _axialExpTargetComp: Target Component Logic ---------------------- When two or more solid components exist within a Block, the overall height change of the Block is driven by an "axial expansion target component" (e.g., fuel). This Component may either be inferred from the flags prescribed in the blueprints or manually set using the ``axial expansion target component`` block blueprint attribute. The following logic is used to infer the target component: #. Search Component flags for neutronically important components. These are defined in :py:data:`expansionData.TARGET_FLAGS_IN_PREFERRED_ORDER`. #. Compare the Block and Component flags. If a Block and Component contain the same flags, that Component is selected as the axial expansion target Component. #. If a Block has :py:data:`flags.flags.PLENUM` or :py:data:`flags.flags.ACLP`, the :py:data:`flags.flags.CLAD` Component is hard-coded to be the axial expansion target component. If one does not exist, an error is raised. #. "Dummy Blocks" are intended to only contain fluid (generally coolant fluid), and do not contain solid components, and therefore do not have an axial expansion target component. .. _mass_conservation: Mass Conservation ----------------- Due to the fact that all components within a Block are the same height, the conservation of mass post-axial expansion is not trivial. The ``axial expansion target component`` plays a critical role in the conservation of mass. For pinned-blocks, this is typically chosen to be the most neutronically important Component; e.g., in a fuel Block this is typically the fuel Component. Generally speaking, components which are not the axial expansion target will exhibit non-conservation on the Block-level as mass is redistributed across the axially- neighboring blocks; this is discussed in more detail in :numref:`mass_redistribution`. However, the mass of all solid components are designed to be conserved at the assembly-level if the following are met for a given assembly design. #. Axial continuity of like-objects. E.g., pins, clad, etc. #. Components that may expand at different rates axially terminate in unique blocks * E.g., the clad extends above the termination of the fuel and the radial duct encasing an assembly extends past the termination of the clad. #. The top-most Block must be a "dummy Block" containing fluid (typically coolant). See `armi.tests.detailedAxialExpansion <https://github.com/terrapower/armi/tree/main/armi/tests/detailedAxialExpansion>`_ for an example blueprint which satisfy the above requirements. .. _mass_redistribution: Block-Level Mass Redistribution ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Figure :ref:`mass_redistribution_illustration` illustrates the mass redistribution process for axial expansion in ARMI given a uniform axial expansion of 10% for fuel components. .. figure:: /.static/mass_redistribution_illustration.png :name: mass_redistribution_illustration Illustration of mass redistribution for axial expansion in ARMI. The redistribution process can be written mathematically. In Figure :ref:`mass_redistribution_illustration`, consider the exchange of mass between the clad in Block 0 and Block 1, .. math:: :name: cMass0 \hat{c}_{0,m} = c_{0,m} + 0.1c_{1,m} .. math:: :name: cMass1 \hat{c}_{1,m} = 0.9c_{1,m}, where :math:`c_{0/1,m}` represents the clad mass in Block 0/1 prior to redistribution and :math:`\hat{c}_{0/1,m}` represents the clad mass in Block 0/1 after redistribution, respectively. To compute the post-redistribution mass on-the-fly, the post-mass redistribution number densities, :math:`\hat{N}_{i,0/1}`, where the subscript :math:`i,0/1` represents isotope :math:`i` for Block 0/1, need to be computed. Computing :math:`\hat{N}_{i,1}` satisfying :math:`\hat{c}_{1,m}` can be found by scaling the pre-redistribution number densities by the expansion factor. In practice however, the number densities are not changed and the mass is decreased through the reduction in the height of the parent Block. .. note:: Recall, component mass in ARMI is calculated as the product of the mass density of the component, the area of the component, and the height of the block. The mass of components can be tuned through either of these three parameters. Computing :math:`\hat{N}_{i,0}` is non-trivial as, in general, :math:`c_0` and :math:`c_1` are at different temperatures. Consider, .. math:: :name: newCMass \hat{c}_{0,m} &= c_{0,m} + 0.1c_{1,m},\\ &= \sum_{x=0}^M N_{x,0} A_0(T_0) h_0 + 0.1 \sum_{j=0}^K N_{j,1} A_1(T_1) h_1,\\ \sum_{i=0}^P \hat{N}_{i,0} A_0(\hat{T}_0) \hat{h}_0 &= \sum_{x=0}^N N_{x,0} A_0(T_0) h_0 + \sum_{j=0}^K 0.1 N_{j,1} A_1(T_1) h_1 \big), where, * :math:`A_{0/1}(T_{0/1})` is the area of Component 0/1 at temperature 0/1, * :math:`h_{0/1}` is the height of Component 0/1, * :math:`N`, :math:`K` are the total number of isotopes in Component 0/1, respectively, * :math:`P` is the union of the isotopes in Component 0/1, * and :math:`\hat{\square}` represents post-redistribution values. The post-redistribution height, :math:`\hat{h}_0` is found to be the sum of the pre-expansion height, :math:`h_0`, and the different in z-elevation between it and the ``axial expansion target component`` for the Block, :math:`b`, .. math:: \hat{h}_0 &= h_0 + \delta,\\ &= h_0 + \left(b_{\text{ztop}} - c_{\text{ztop}}\right). .. note:: #. Recall, axial block bounds are determined by the ``axial expansion target component`` so the top z-elevation ``ztop`` for the block is the same as the top of the ``axial expansion target component``. #. In the axial expansion module, components are given z-elevation attributes. This information is not serialized to the database. With :math:`\hat{h}_0` known, the two remaining unknowns in Equation :eq:`newCMass` are the post-redistribution temperature, :math:`\hat{T}_0`, and number densities, :math:`\hat{N}_{i,0}`. The latter are solved by using the expected post-redistribution per-isotope mass and component volume. The mass of isotope, :math:`i`, for Block 0/1 is calculated as follows, .. math:: m_{i,0/1} = N_{i,0/1} V_{0/1} \alpha_i \chi, where :math:`\alpha_i` is the atomic weight for isotope, :math:`i`, and :math:`\chi` is a constant scaling from moles per cc to atoms per barn per cm. Given :math:`m_i`, the post redistribution number density is calculated as follows, .. math:: \hat{N}_{i,0} = \frac{\left( m_{i,0} + m_{i,1} \right) \chi}{ \big(A_1(T_1) h_1 + A_2(T_2)\delta\big) \alpha_i}. The post redistribution temperature, :math:`\hat{T}_0`, is computed by minimizing the residual of the difference between the actual post-redistribution area of the Component and its expected area, .. math:: :name: newTemp A_0(\hat{T}_0) \left( h_1 + \delta \right) &= A_1(T_1) h_1 + A_2(T_2)\delta,\\ A_0(\hat{T}_0) &= \frac{A_1(T_1) h_1 + A_2(T_2)\delta}{h_1 + \delta}. The minimization of Equation :eq:`newTemp` is solved using Brent's method within ``scipy`` where the bounds of the solve are the temperatures of the two components exchanging mass, :math:`T_0` and :math:`T_1`. In some instances, the minimization may fail. In this case, a mass weighted temperature is used instead, .. math:: :name: consolationPrize \hat{T}_0 = \frac{m_{i,0}T_0 + m_{i,1}T_1}{m_{i,0} + m_{i,1}}. Warnings and Runtime Error Messages ----------------------------------- Mass Redistribution Between Like Materials ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Mass redistribution is currently only possible between components that are the same material. This restriction is to ensure that material properties post-redistribution are known (e.g., mixing different alloys of metal may result in a material with unknown properties). If components of different materials are attempted to have their mass redistributed, the following warning is populated to the stdout: .. code-block:: Cannot redistribute mass between components that are different materials! Trying to redistribute mass between the following components in <Assembly>: from --> {<Block 0>} : {<Component 0>} : {<Material 0>} to --> {<Block 1>} : {<Component 1>} : {<Material 1>} Instead, mass will be removed from (<Component 0> | <Material 0>) and (<Component 1> | <Material 1> will be artificially expanded. The consequence is that mass conservation is no longer guaranteed for the <Component 1> component type on this assembly! Post-Redistribution Temperature Search Failure ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ As described in :numref:`mass_redistribution`, the minimization of Equation :eq:`newTemp` may fail. The two mechanisms in which Brent's method may fail are if Equation :eq:`newTemp` does not have opposite signs at each prescribed temperature bound of if Equation :eq:`newTemp` is discontinuous. If the minimization routine fails, the following warning is printed to the stdout: .. code-block:: Temperature search algorithm in axial expansion has failed in <Assembly> Trying to search for new temp between from --> <Block 0> : <Component 0> : <Material 0> at <Temperature 0> C to --> <Block 1> : <Component 1> : <Material 1> at <Temperature 1> C f(<Temperature 0>) = {Area 0(Tc=<Temperature 0>) - targetArea} f(<Temperature 1>) = {Area 0(Tc=<Temperature 1>) - targetArea} Instead, a mass weighted average temperature of {Component 0} will be used. The consequence is that mass conservation is no longer guaranteed for this component type on this assembly! .. note:: The above warning has been limited to only components which have the ``FUEL`` or ``CONTROL`` flag. These are determined to be the most neutronically important components where the impact of this warning are the most relevant. An example of where this warning may raise is in the following: #. If two axially linked components have the same ``Thot`` values and different ``Tinput`` values, they will be the same temperature and have different areas. The range for the temperature search is null and will be impossible to find a temperature satisfying Equation :eq:`newTemp`. #. If the coefficient of thermal expansion for a material is sufficiently small relative the difference in temperature between two component, the bounds of Equation :eq:`newTemp` may not generate opposite signs and Brent's method will fail. Negative Block or Component Heights ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If a Block or Component height becomes negative, an ``ArithmeticError`` is raised indicating which Block and/or Component has a negative height. Both signal a non-physical condition that is un-resolveable in the current implementation. This is often caused by thermal expansion of a solid component being drastically different that the other components nearby. Inconsistent Component and Block Heights ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The current implementation is designed such that the heights of each Component and their parent block remain consistent. However, these can go out of sync and have been found to be due incompatible blueprints definitions. As stated in :numref:`mass_conservation`, in order for mass to be conserved, each component must axially terminate in unique blocks. If a given blueprint does not meet this condition, the following warning may be raised for non-isothermal conditions: .. code-block:: The height of <Component> has gone out of sync with its parent block! Assembly: <Assembly> Block: <Block> Component: <Component> Block Height = <Block Height> Component Height = <Component Height> The difference in height is <height difference> cm. This difference will result in an artificial <"increase" or "decrease"> in the mass of <Component>. This is indicative that there are multiple axial component terminations in <Block>. Per the ARMI User Manual, to preserve mass there can only be one axial component termination per block. If the different in height is positive, then the Component in question extends above the bounds of its parent Block and its mass will be artificially chopped proportional to the difference in height. If the difference in height is negative, the the Component in question stops below the bounds of the parent Block and its mass with artificially increase proportional to the different in height. .. note:: The above warning has been limited to only components which have the ``FUEL`` or ``CONTROL`` flag. These are determined to be the most neutronically important components where the impact of this warning are the most relevant. ================================================ FILE: doc/user/settings_report.rst ================================================ .. _settings-report: =============== Settings Report =============== .. exec:: from armi import settings cs = settings.Settings() numSettings = len(cs.values()) return f"This document lists all {numSettings} `settings <#the-settings-input-file>`_ in ARMI.\n" They are all accessible to developers through the :py:class:`armi.settings.caseSettings.Settings` object, which is typically stored in a variable named ``cs``. Interfaces have access to a simulation's settings through ``self.cs``. .. exec:: import textwrap from dochelpers import escapeSpecialCharacters from armi import settings def looks_like_path(s): """Super quick, not robust, check if a string looks like a file path.""" if s.startswith("\\\\") or s.startswith("//") or s[1:].startswith(":\\"): return True return False subclassTables = {} cs = settings.Settings() # User textwrap to split up long words that mess up the table. ws = " " ws2 = ws + " " ws3 = ws2 + " " wrapper = textwrap.TextWrapper(width=25, subsequent_indent='') wrapper2 = textwrap.TextWrapper(width=10, subsequent_indent='') content = '\n.. container:: break_before ssp-landscape\n\n' content += ws + '.. list-table:: ARMI Settings\n' content += ws2 + ':widths: 30 40 15 15\n' content += ws2 + ':class: ssp-tiny\n' content += ws2 + ':header-rows: 1\n\n' content += ws2 + '* - Name\n' + ws3 + '- Description\n' + ws3 + '- Default\n' + ws3 + '- Options\n' for setting in sorted(cs.values(), key=lambda s: s.name): content += ws2 + '* - {}\n'.format(' '.join(wrapper.wrap(setting.name))) description = escapeSpecialCharacters(str(setting.description) or "") content += ws3 + "- {}\n".format(" ".join(wrapper.wrap(description))) default = str(getattr(setting, 'default', None)).split("/")[-1] options = str(getattr(setting,'options','') or '') if looks_like_path(default): # We don't want to display default file paths in this table. default = "" options = "" content += ws3 + '- {}\n'.format(' '.join(['``{}``'.format(wrapped) for wrapped in wrapper2.wrap(default)])) content += ws3 + '- {}\n'.format(' '.join(['``{}``'.format(wrapped) for wrapped in wrapper2.wrap(options)])) content += '\n' return content ================================================ FILE: doc/user/spatial_block_data.rst ================================================ ****************** Spatial block data ****************** Many parameters assigned on a ``Block`` are scalar quantities that are useful for visualization and simple queries (e.g., block with the maximum burnup in an assembly). Spatial parameters in a block, such as power produced by each pin, is also of interest. Especially when communicating data to physics codes that support sub-block geometric modeling. This page will talk about how spatial information is assigned to components on a block, how spatial data can be assigned and accessed, and how those data may or may not be updated by the framework. Sub-block spatial grid ====================== There are two ways to create the block grid: explicitly via blueprints or via an automated builder. The former is recommended, but the later can work in some specific circumstances. Blueprints ---------- In your blueprints file, you likely have a core grid that defines where assemblies reside in the reactor. Assemblies are assigned to locations on that grid according to their ``specifier`` blueprint attribute. Below is an example of a "flats up" hexagonal core grid of fuel assemblies with 1/3 symmetry. .. code:: yaml grids: core: geom: hex symmetry: third periodic lattice map: | F F F F F F F We can similarly define a grid for the block with a similar entry in the ``grids`` portion of the blueprints. .. code:: yaml pins: geom: hex_corners_up symmetry: full lattice map: | - - - - - - - - - 1 1 1 1 1 1 1 1 1 1 - - - - - - - - 1 1 1 1 1 1 1 1 1 1 1 - - - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 - - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 This creates a ten-ring hexagonal lattice in a "corners up" orientation. While the resulting geometry may look like a flats up lattice, the individual hexagons that make up a lattice site are corners up. .. note:: The sub-block grid does not need to be of a different orientation of the parent block. A flats up hex block can have a flats up pin lattice. In most cases, an assembly full of pins will have a pin lattice that is off a different type to maximally load pins into the block. Say we wanted to have a guide tube at the center lattice site with cladding surrounding void and every other lattice site to contain a fuel pin. We need to add the following items to our block definition to link the grid, and to assign components to sites on the grid. 1. The block needs a ``grid name`` entry that points to the grid we want to use for this block. 2. Each component that wants to be placed on a lattice site needs a ``latticeIDs`` entry that contains the IDs, like assembly specifiers in the core grid, for that component. In the example above, we have two lattice IDs: ``0`` for the center site and ``1`` for the other pins. These are chosen for brevity but we could have also done ``fuel`` and ``guide`` or ``F`` and ``G``. Do what makes sense for you. .. note:: Like with assembly specifiers, keeping the lattice IDs to have the same number of characters will help the grid render nicer in text editors. This is not a requirement, but it may make life easier for you and your team. Our complete block definition would start like:: blocks: &block_fuel grid name: pins fuel: shape: Circle material: UO2 Tinput: 20 Thot: 20 od: 0.819 latticeIDs: [1] clad: shape: Circle material: UO2 Tinput: 20 Thot: 20 id: 0.819 od: 0.9 latticeIDs: [0, 1] void: shape: Circle material: Void Tinput: 20 Thot: 20 od: 0.819 latticeIDs: [0] Note that we can assign the same component to multiple lattice sites with multiple entries in the ``latticeIDs`` list. Also note that we do not need to assign a ``mult`` entry to these components. Their multiplicity will be determined based on the number of lattice sites they occupy! .. seealso:: The :ref:`LWR tutorial <walkthrough-lwr>` contains additional examples for working with sub-block grids. Auto grid --------- In some cases, you may have an assembly that contains one pin type. The framework provides a mechanism for automatically constructing a spatial grid for the block based only on the multiplicity of pin-like components. When constructing a block from blueprints, a grid may be added to the block depending on: 1. The existence of an explicitly defined block grid, like in the previously discussed section, and 2. If the ``autoGenerateBlockGrids`` setting is active. Should either of these conditions be met, the framework will attempt to add a grid by calling :meth:`armi.reactor.blocks.Block.autoCreateSpatialGrids`. However, this behavior is not generalized and only implemented on :class:`armi.reactor.blocks.HexBlock`, which makes the following assumptions: 1. You want a corners up hexagonal lattice grid. 2. The pitch of your hexagonal lattice is determined by :meth:`armi.reactor.blocks.HexBlock.getPinPitch` which may place restrictions on what constitutes a pin. 3. The number of pins is determined by :meth:`armi.reactor.blocks.HexBlock.getNumPins` which may place similar restrictions on what constitutes a pin. If the auto grid creation is successful, components with a multiplicity equal to the number of pins will be assigned locations on the lattice grid. .. warning:: Consider subclassing :class:`~armi.reactor.blocks.HexBlock` with specific pin-like methods and overriding the :meth:`~armi.reactor.blocks.HexBlock.autoCreateSpatialGrids` if you want complete control over this process. Alternatively, use an explicit grid in blueprints. Interacting with spatial data ============================= This section will focus on accessing locations of components in the block, locations of specifically pins, and examples of some pin data that may be assigned to a block's parameter set. Component locations ------------------- Components that live on a spatial grid have a ``spatialLocator`` attribute to help indicate where that component exists in space. If we grab the fuel component from the UO2 block in the :ref:`ANL AFCI 177 example <walkthrough-inputs>` we can see where it exists in the block:: >>> import armi >>> armi.configure() >>> from armi.reactor.flags import Flags >>> r = armi.init(fName="anl-afci-177.yaml").r >>> fuelAssem = r.core[5] >>> fuelBlock = fuelAssem[1] >>> fuelBlock.spatialGrid <HexGrid -- 2046645914880 Bounds: None None None Steps: [ 0.4444 -0.4444 0. ] [0.76972338 0.76972338 0. ] [0. 0. 0.] Anchor: <fuel B0009-001 at 008-040-001 XS: C ENV GP: A> Offset: [0. 0. 0.] Num Locations: 400> >>> fuel = fuelBlock.getChildrenWithFlags(Flags.FUEL)[0] >>> fuel.getDimension("mult") 271 >>> fuel.spatialLocator <MultiIndexLocation with 271 locations> This :class:`~armi.reactor.grids.MultiIndexLocation` is a way to indicate this Component exists at multiple sites. Each item in this locator is one location on the underlying grid where we could find this component:: >>> fuel.spatialLocator[0] <IndexLocation @ (0,0,0)> >>> fuel.spatialLocator[0].getLocalCoordinates() array([0., 0., 0.]) >>> coordsFromFuel = fuel.spatialLocator.getLocalCoordinates() >>> coordsFromFuel.shape (271, 3) We get a ``(271, 3)`` array because we have 271 of these fuel components in the block, and each row contains one (x, y, z) location for that component. We can do this for every component, though some may only exist at a single site on the grid and be assigned a :class:`~armi.reactor.grids.CoordinateLocation` spatial locator instead. The API is mostly the same, but attempts to signify such an object does not live on the grid e.g., duct or derived shape objects:: >>> duct = fuelBlock.getChildrenWithFlags(Flags.DUCT)[0] >>> duct.spatialLocator <CoordinateLocation @ (0.0,0.0,0.0)> Pin locations ------------- Everything in the before section works for finding center points of pins in your assembly. But often times you have multiple components that may exist at the same lattice site (e.g., fuel, gap, clad, maybe a wire?). Or you may have multiple cladded-things that count as pins and but exist in multiple components. In some circumstances, :meth:`armi.reactor.blocks.HexBlock.getPinCoordinates` may be useful to find the unique centroids of pins in a block. Using our example above, we get a very similar set of coordinates when comparing to the coordinates of the fuel pin:: >>> coordsFromPin = fuel.spatialLocator.getLocalCoordinates() >>> coordsFromBlock = fuelBlock.getPinCoordinates() >>> (coordsFromPin == coordsFromBlock).all() True In this specific case :meth:`~armi.reactor.blocks.HexBlock.getPinCoordinates` looks at components with ``Flags.CLAD`` and obtains their locations, and we have one cladding component and it exists at each of the 271 sites we care about. However, if you have multiple cladding components per lattice site, such as in the :ref:`C5G7 example <walkthrough-lwr>`, you may see an incorrect number of locations returned. .. note:: Consider making application-specific subclasses of ``Block``, ``HexBlock``, and/or ``CartesianBlock`` with more targeted implementations of :meth:`~armi.reactor.blocks.Block.getNumPins`, :meth:`~armi.reactor.blocks.Block.getPinPitch`, :meth:`~armi.reactor.blocks.Blocks.getPinLocations` and other pin-specific methods. Pin parameter data ------------------ The ARMI framework defines a few parameters that live on the block, but define data for each of the child pin components. Two examples are ``Block.p.linPowByPin`` and ``Block.p.pinMgFluxes``. These parameters are structured and related to the output of ``getPinCoordinates`` such that 1. Pin ``i`` can be found at ``Block.getPinCoordinates()[i]``. 2. Parameter data for pin ``i`` can be found at location ``i`` in the parameter array, e.g., ``Block.p.linPowByPin[i]``. Parameters like ``Block.p.pinMgFluxes`` may be higher dimensional, storing mutli-group flux for each pin. In this case, the parameter data array has shape ``(nPins, nGroups)`` such that ``Block.p.pinMgFluxes[i, g]`` has the group ``g`` flux in pin ``i``, found at ``Block.getPinCoordinates()[i]``. Block rotation ============== .. warning:: Rotation is currently only supported for hexagonal blocks Using the logic from the previous section on pin parameter data, it may be useful to know how rotating a block changes the data stored on that block. Spatial locators ---------------- First, rotating a block will update the ``spatialLocator`` attribute on every child of the block. For objects defined at the center of the block, they will still be located at the center. Objects with a ``MultiIndexLocator`` will have new locations such that ``spatialLocator[i]`` will be consistent before and after rotation:: >>> import math >>> # zeroth location is the origin so pick a location that >>> # changes through rotation >>> fuel.spatialLocator[1] <IndexLocation @ (1,0,0)> >>> fuel.spatialLocator[1].getLocalCoordinates() array([0.4444 , 0.76972338, 0. ])) >>> fuelBlock.rotate(math.radians(60)) >>> fuel.spatialLocator[1] <IndexLocation @ (0,1,0)> >>> fuel.spatialLocator[1].getLocalCoordinates() array([-0.4444 , 0.76972338, 0. ]) Because this sub-block grid is a corners up hex grid, to tightly fit inside the flats up hex block, one rotation from the north east location, ``(1,0,0)``, reflects this pin across the y-axis. Pin parameters -------------- Parameter data that are defined on children of the block are not updated. Therefore data for pin ``i`` will be found in e.g., ``Block.p.pinMgFluxes[i]`` before and after rotation. Corners and edges ----------------- Parameters defined on the edges and corners of the block, i.e., those with :attr:`armi.reactor.parameters.ParamLocation.CORNERS` and :attr:`~armi.reactor.parameters.ParamLocation.EDGES` will be shuffled in place to reflect the new rotation. For hexagonal blocks, these parameters should have six entries, e.g., one value for each corner, starting at the upper right and moving counter clockwise. Let's assign some fake data to our fuel block from above and see what happens:: >>> import numpy as np >>> fuelBlock.p.cornerFastFlux = np.arange(6, dtype=float) >>> fuelBlock.p.cornerFastFlux array([0., 1., 2., 3., 4., 5.]) >>> # Two clockwise rotations of 60 degrees >>> fuelBlock.rotate(math.radians(-120)) >>> fuelBlock.p.cornerFastFlux array([2., 3., 4., 5., 0., 1.]) Visually, the upper right corner, number ``0``, has been rotated to the lower right corner, number ``4``. And the corner ``2``, the leftmost corner, has been moved to corner ``0``, the upper right corner. Other rotated parameters ------------------------ Other parameters may be updated to reflect some geometric state. The second position of ``Block.p.orientation`` reflects the cumulative rotation around the z-axis and is updated through rotation. Displacement parameters like ``Block.p.displacementX`` are updated as the displacement vector rotates through space. ================================================ FILE: doc/user/symmetry_handling.rst ================================================ ***************** Symmetry Handling ***************** This section will describe how partial core symmetry is handled in ARMI. Introduction ============ A partial core may be specified in the blueprints file using the ``symmetry`` attribute, as shown below. .. code:: yaml grids: core: geom: hex symmetry: third periodic lattice map: | F F F F F C F Specifying a core this way is useful for saving computation time, so long as the core state being modeled is truly symmetric. Because of this, assemblies and blocks have a ``symmetryFactor`` attribute that is used to track how much of the object is present in the currently modeled core. For example, the central assembly (labeled "C") in the core lattice definition above would have a symmetry factor of 3, representing that only 1/3rd of the assembly is in the core model. The blocks within that assembly would have the same symmetry factor. Reactors do not have symmetry factors, Cores have symmetry factors but no core parameters are adjusted due to symmetry currently, and Components always have a symmetry factor of 1. That is, only parameters for Assembly and Block objects are adjusted for symmetry. Symmetry-Aware Operations ========================= Because some assemblies may be partially in a partial core (e.g. the central assembly in a 1/3rd hex core) certain core and assembly operations must adjust parameters to maintain accurate bookkeeping. The third core hex converter methods :py:meth:`convert <armi.reactor.converters.geometryConverters.ThirdCoreHexToFullCoreChanger.convert>` and :py:meth:`restorePreviousGeometry <armi.reactor.converters.geometryConverters.ThirdCoreHexToFullCoreChanger.restorePreviousGeometry>` are both core-level operations that account for symmetry when calculating the values of parameters on assemblies and blocks in the converted core. On an assembly level, the assembly method :py:meth:`moveTo <armi.reactor.assemblies.Assembly.moveTo>` adjusts parameters as necessary when moving an assembly between locations with differing symmetry factors. Parameters Adjusted With Symmetry ================================= Only some parameters need to be adjusted with symmetry. A parameter must be on either an Assembly or Block object and have the flag ``VOLUME_INTEGRATED`` to be adjusted in the operations listed in the previous section. ================================================ FILE: doc/user/user_install.rst ================================================ ************ Installation ************ This section will guide you through installing the ARMI Framework on your machine. Prerequisites ============= These instructions target users with some software development knowledge. In particular, we assume familiarity with `Python <https://www.python.org/>`__, `virtual environments <https://docs.python.org/3/tutorial/venv.html>`_, and `Git <https://git-scm.com/>`_. You must have the following installed before proceeding: * `Python <https://www.python.org/downloads/>`__ version 3.9 or newer. .. admonition:: The right Python command Python 2 and Python 3 often co-exist on the same system. Whether the ``python`` command refers to Python 2 or 3 depends on operating system and configuration. Under some circumstances ``python3`` or ``pip3`` will need to be used in place of ``python`` or ``pip`` to target the correct version. You can verify your version by running ``python -VV``. You can also refer to the Python executable with a full path. You also likely need the following for interacting with the source code repository: * `Git <https://git-scm.com/>`_ Preparing a Virtual Environment =============================== While not *technically* required, we highly recommend installing ARMI into a `virtual environment <https://docs.python.org/3/library/venv.html>`_ to assist in dependency management. In short, virtual environments are a mechanism by which a Python user can maintain separate sets of Python packages for various applications on the same machine. This prevents dependencies from various tools conflicting with one another. ARMI has a lot of requirements and may conflict with other libraries on your system unless you do this step. Start a terminal and navigate to the directory you'd like to install ARMI into. To create a new virtual environment, use a command like:: $ python -m venv armi-venv The result is a folder named ``armi-venv``, which contains a minimal set of Python packages, and a set of scripts for activating and deactivating that environment. To activate the environment, invoke the appropriate script. On Windows:: $ armi-venv\Scripts\activate.bat Or on Linux:: $ source armi-venv/bin/activate .. note:: You'll have to activate the venv every time you open a new command line. Many people set up scripts to activate this automatically. If you will be running ARMI in parallel over MPI, you must also install the ``mpi4py`` Python library. On Linux, doing so will require some MPI development libraries (e.g. ``sudo apt install libopenmpi-dev``). Getting the code ================ Choose one of the following two installation methods depending on your needs. Step 0: Update PIP ------------------ If you are using an older version of Python, say 3.9 or older, you should ensure that you are using a version of PIP that is at least 22.1:: (armi-venv) $ pip install pip>=22.1 or it will be enough to just do:: (armi-venv) $ pip install -U pip Option 1: Install as a library ------------------------------ If you plan on running ARMI without viewing or modifying source code, you may install it with ``pip``, which will automatically discover and install the dependencies. This is useful for quick evaluations or to use it as a dependency in another project:: (armi-venv) $ pip install https://github.com/terrapower/armi/archive/main.zip Option 2: Install as a repository (for developers) -------------------------------------------------- If you'd like to view or change the ARMI source code (common!), you need to clone the ARMI source and then install its dependencies. Clone the ARMI source code from the git repository with:: (armi-venv) $ git clone https://github.com/terrapower/armi .. tip:: If you plan to contribute to ARMI (please do!), you may want to use SSH keys and use ``git clone git@github.com:terrapower/armi.git``. Now install ARMI with all its dependencies:: (armi-venv) $ cd armi (armi-venv) $ pip install -e ".[test]" .. tip:: If you don't want to install ARMI into your venv, you will need to add the ARMI source location to your system's ``PYTHONPATH`` environment variable so that Python will be able to find the code when you import it from other directories. In Windows, click *Start* and type ``Edit Environmental Variable`` to adjust ``PYTHONPATH``. In Linux, add ``export PYTHONPATH=/path/to/armi/source`` in a user profile script (like ``.bashrc``). Verifying installation ---------------------- Check the installation status by running:: (armi-venv) $ armi or, equivalently:: (armi-venv) $ python -m armi If it worked, you should see the ARMI splash screen and no errors:: --------------------------------------------------- | _ ____ __ __ ___ | | / \ | _ \ | \/ | |_ _| | | / _ \ | |_) | | |\/| | | | | | / ___ \ | _ < | | | | | | | | /_/ \_\ |_| \_\ |_| |_| |___| | | Advanced Reactor Modeling Interface | --------------------------------------------------- If it works, congrats! So far so good. Optional Setup ============== This subsection provides setup for optional items. GUI input --------- To use the :py:mod:`graphical core-map editor <armi.utils.gridEditor>` you will need to also install `wxPython <https://wxpython.org/pages/downloads/index.html>`_. This is not installed by default during armi installation because it can cause installation complexities on some platforms. In any case, all GUI dependencies can be installed by:: (armi-venv) $ pip install armi[grids] GUI output ---------- ARMI can write VTK and XDMF output files which can be viewed in tools such as `ParaView <https://www.paraview.org/>`_ and `VisIT <https://wci.llnl.gov/simulation/computer-codes/visit>`_. Download and install those tools from their websites. ================================================ FILE: pyproject.toml ================================================ # Copyright 2023 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ####################################################################### # GENERAL PYTHON CONFIG # ####################################################################### [build-system] requires = ["setuptools>=61.2"] build-backend = "setuptools.build_meta" [project] name = "armi" version = "0.7.0" description = "An open-source nuclear reactor analysis automation framework that helps design teams increase efficiency and quality." license-files = ["LICENSE.md", "AUTHORS"] requires-python = ">3.8" readme = "README.rst" authors = [ { name="TerraPower, LLC", email="armi-devs@terrapower.com" }, ] dependencies = [ "coverage>=7.2.0", # Code coverage tool. Sadly baked into every Case. "h5py>=3.0,<=3.9 ; python_version < '3.11.0'", "h5py>=3.9 ; python_version >= '3.11.0'", # Needed because our database files are H5 format "matplotlib>=3.5.3,<3.8.0 ; python_version < '3.11.0'", "matplotlib>=3.5.3 ; python_version >= '3.11.0'", # Important plotting library "numpy>=1.21", # Important math library "ordered-set>=3.1.1", # A useful data structure "pluggy>=1.2.0", # Central tool behind the ARMI Plugin system "pyevtk>=1.2.0", # Handles binary VTK visualization files "ruamel.yaml>=0.19.1 ; python_version >= '3.11.0'", # Our foundational YAML library "ruamel.yaml<=0.17.21 ; python_version < '3.11.0'", # Our foundational YAML library "scipy>=1.7.0", # Used for curve-fitting and matrix math "sympy>=1.14", # Used to represent mathematical curves for material properties "toml>0.9.5", # Needed to parse the pyproject.toml file "voluptuous>=0.12.1", # Used to validate YAML data files "yamlize==0.7.1", # Custom YAML-to-object library ] classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", "Topic :: Scientific/Engineering :: Information Analysis", ] [project.urls] Homepage = "https://terrapower.github.io/armi/" Documentation = "https://terrapower.github.io/armi" Changelog = "https://github.com/terrapower/armi/releases" Repository = "https://github.com/terrapower/armi" "Bug Tracker" = "https://github.com/terrapower/armi/issues" [project.optional-dependencies] grids = ["wxpython==4.2.1"] memprof = ["psutil"] mpi = ["mpi4py"] test = [ "ipykernel>=6.0.0", # IPython Kernel (We run test notebooks from the doc tutorials.) "jupyter_client>=7.0.0", # Reference implementation of the Jupyter protocol "nbconvert>=7.0.0", # Converting Jupyter Notebooks to other formats "nbformat>=5.5.0", # Jupyter Notebook reader "pytest-cov>=4.0.0", # coverage plugin "pytest-xdist>=3.0.0", # To spread our tests over multiple CPUs "pytest>=7.0.0", # Our primary test tooling "ruff==0.9.7", # Linting and code formatting (version-pinned) ] docs = [ ###################################################################################### # These are most specified that usual, because Sphinx docs seem to be quite fragile. # # # # Officially, we build our docs with Python 3.13. # ###################################################################################### "docutils==0.21.2", # Needed by sphinx-rtd-them "ipykernel>=6.0.0", # iPython kernel to run Jupyter notebooks "Jinja2==3.1.5", # Used in numpydoc and nbconvert "nbsphinx-link==1.3.1", # Adds Jupyter NBs to Sphinx source root "nbsphinx==0.9.6", # Parses Jupyter notebooks "pandoc", # Must be in the path (to convert file formats) "pylint", # Generates UML diagrams "pypdf==5.3.1", # Generating a single PDF file for the Sphinx documentation "setuptools", # needed for conf.py tooling "sphinx-data-viewer==0.1.5", "sphinx-gallery==0.13.0", # Builds an HTML version of a Python script and puts it into a gallery "sphinx-needs==4.2.0", # Requirements traceability matrices for QA "sphinx-rtd-theme==3.0.2", # Read-The-Docs theme for Sphinx "sphinx-test-reports==1.1.0", # sphinx-needs test reports in the STR "Sphinx==7.4.7", # central library used to build our docs "sphinxcontrib-apidoc==0.5.0", # More easily document our API "sphinxcontrib-applehelp==2.0.0", "sphinxcontrib-devhelp==2.0.0", "sphinxcontrib-htmlhelp==2.1.0", "sphinxcontrib-jquery==4.1", # Handle missing jquery errors "sphinxcontrib-jsmath==1.0.1", "sphinxcontrib-plantuml==0.30", # UML support in sphinx-needs "sphinxcontrib-qthelp==2.0.0", "sphinxcontrib-serializinghtml==2.0.0", "sphinxext-opengraph==0.9.1", # Generates OpenGraph metadata to make cards for social media "unittest-xml-reporting==3.2.0", # Allows us to generate junit XML test reports ] [project.scripts] armi = "armi.__main__:main" [tool.setuptools.packages] find = {} ####################################################################### # RUFF CONFIG # ####################################################################### [tool.ruff] # This is the exact version of Ruff we use. required-version = "0.9.7" # Assume Python 3.13 target-version = "py313" # Setting line-length to 120 line-length = 120 # Exclude a variety of commonly ignored directories. exclude = [ ".bzr", ".direnv", ".eggs", ".git", ".git-rewrite", ".hg", ".mypy_cache", ".nox", ".pants.d", ".pytype", ".ruff_cache", ".svn", ".tox", ".venv", "__pycache__", "__pypackages__", "_build", "buck-out", "build", "dist", "doc/tutorials/armi-example-app", "node_modules", "venv", ] [tool.ruff.lint] # Enable pycodestyle (E) and Pyflakes (F) codes by default. # D - NumPy docstring rules # I - Sorting imports # N801 - Class name should use CapWords convention # SIM - code simplification rules # TID - tidy imports select = ["D", "E", "F", "I", "N801", "SIM", "TID"] # Ruff rules we ignore (for now) because they are not 100% automatable # # D100 - Missing docstring in public module # D101 - Missing docstring in public class # D102 - Missing docstring in public method # D103 - Missing docstring in public function # D106 - Missing docstring in public nested class # D401 - First line of docstring should be in imperative mood # D404 - First word of the docstring should not be "This" # SIM102 - Use a single if statement instead of nested if statements # SIM105 - Use contextlib.suppress({exception}) instead of try-except-pass # SIM108 - Use ternary operator {contents} instead of if-else-block # SIM114 - Combine if branches using logical or operator # SIM115 - Use context handler for opening files # SIM117 - Use a single with statement with multiple contexts instead of nested with statements # Ruff rules we ignore because we don't want them # # D105 - we don't need to document well-known magic methods # D205 - 1 blank line required between summary line and description # E731 - we can use lambdas however we want # RUF100 - no unused noqa statements (not consistent enough yet) # SIM118 - this does not work where we overload the .keys() method # ignore = ["D100", "D101", "D102", "D103", "D105", "D106", "D205", "D401", "D404", "E731", "RUF100", "SIM102", "SIM105", "SIM108", "SIM114", "SIM115", "SIM117", "SIM118"] [tool.ruff.lint.per-file-ignores] # D1XX - enforces writing docstrings # E741 - ambiguous variable name # N - We have our own naming conventions for unit tests. # SLF001 - private member access "*/tests/*" = ["D1", "E741", "N", "SLF001"] "doc/gallery-src/*" = ["D400"] [tool.ruff.lint.flake8-tidy-imports] ban-relative-imports = "all" [tool.ruff.lint.pydocstyle] convention = "numpy" [tool.ruff.format] docstring-code-format = true docstring-code-line-length = 120 ####################################################################### # PYTEST CONFIG # ####################################################################### [tool.pytest.ini_options] python_files = "test_*.py" python_functions = "nothing matches this pattern" addopts = "--durations=30 --tb=native" filterwarnings = [ "ignore: the matrix subclass is not the recommended way:PendingDeprecationWarning", ] [tool.coverage.run] exclude_also = [ "armi/cli/gridGui.py", "armi/utils/gridEditor.py", "armi/utils/tests/test_gridGui.py", "venv/", ] source = ["armi"] parallel = true # Change default .coverage file to something that doesn't have a dot because some Windows services can't handle dots. data_file = "coverage_results.cov" [tool.coverage.report] # Regexes for lines to exclude from consideration omit = [ "*/tests/*", "armi/cli/gridGui.py", "armi/utils/gridEditor.py", ] exclude_also = [ # Don't complain about missing debug-only code: "def __repr__", "if self\\.debug", # Don't complain if non-runnable code isn't run: "if __name__ == .__main__.:", # Don't complain about missing type checking-only code: "if TYPE_CHECKING", # Don't complain if tests don't hit defensive assertion code: "except ImportError", "pass", "raise AssertionError", "raise KeyboardInterrupt", "raise NotImplementedError", ] ignore_errors = true ####################################################################### # DATA FILES TO BE INCLUDED WITH THE PROJECT # ####################################################################### [tool.setuptools.package-data] armi = [ "bookkeeping/tests/armiRunSmallest-A0000-aHist-ref.txt", "matProps/tests/invalidTestFiles/*", "matProps/tests/testDir1/*", "matProps/tests/testDir2/*", "matProps/tests/testDir3/*", "matProps/tests/testDir4/*", "matProps/tests/testMaterialsData/*", "nuclearDataIO/cccc/tests/fixtures/labels.ascii", "nuclearDataIO/cccc/tests/fixtures/labels.binary", "nuclearDataIO/cccc/tests/fixtures/mc2v3.dlayxs", "nuclearDataIO/cccc/tests/fixtures/simple_cartesian.pwdint", "nuclearDataIO/cccc/tests/fixtures/simple_cartesian.rtflux", "nuclearDataIO/cccc/tests/fixtures/simple_cartesian.rzflux", "nuclearDataIO/cccc/tests/fixtures/simple_hexz.dif3d", "nuclearDataIO/cccc/tests/fixtures/simple_hexz.geodst", "nuclearDataIO/cccc/tests/fixtures/simple_hexz.nhflux", "nuclearDataIO/tests/fixtures/AA.gamiso", "nuclearDataIO/tests/fixtures/AA.pmatrx", "nuclearDataIO/tests/fixtures/AB.gamiso", "nuclearDataIO/tests/fixtures/AB.pmatrx", "nuclearDataIO/tests/fixtures/combined-AA-AB.gamiso", "nuclearDataIO/tests/fixtures/combined-AA-AB.isotxs", "nuclearDataIO/tests/fixtures/combined-AA-AB.pmatrx", "nuclearDataIO/tests/fixtures/combined-and-lumped-AA-AB.gamiso", "nuclearDataIO/tests/fixtures/combined-and-lumped-AA-AB.isotxs", "nuclearDataIO/tests/fixtures/combined-and-lumped-AA-AB.pmatrx", "nuclearDataIO/tests/fixtures/ISOAA", "nuclearDataIO/tests/fixtures/ISOAB", "nuclearDataIO/tests/fixtures/mc2v3-AA.flux_ufg", "nuclearDataIO/tests/fixtures/mc2v3-AA.gamiso", "nuclearDataIO/tests/fixtures/mc2v3-AA.isotxs", "nuclearDataIO/tests/fixtures/mc2v3-AA.pmatrx", "nuclearDataIO/tests/fixtures/mc2v3-AB.gamiso", "nuclearDataIO/tests/fixtures/mc2v3-AB.isotxs", "nuclearDataIO/tests/fixtures/mc2v3-AB.pmatrx", "nuclearDataIO/tests/library-file-generation", "nuclearDataIO/tests/library-file-generation/combine-AA-AB.inp", "nuclearDataIO/tests/library-file-generation/combine-and-lump-AA-AB.inp", "nuclearDataIO/tests/library-file-generation/mc2v2-dlayxs.inp", "nuclearDataIO/tests/library-file-generation/mc2v3-AA.inp", "nuclearDataIO/tests/library-file-generation/mc2v3-AB.inp", "nuclearDataIO/tests/library-file-generation/mc2v3-dlayxs.inp", "nuclearDataIO/tests/simple_hexz.inp", "physics/neutronics/tests/ISOXA", "physics/neutronics/tests/rzmflxYA", "resources/burn-chain.yaml", "resources/elements.dat", "resources/mcc-nuclides.yaml", "resources/nuclides.dat", "resources/referenceFissionProducts.dat", "testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml", "testing/reactors/anl-afci-177/anl-afci-177-coreMap.yaml", "testing/reactors/anl-afci-177/anl-afci-177-fuelManagement.py", "testing/reactors/anl-afci-177/anl-afci-177.yaml", "testing/reactors/c5g7/c5g7-blueprints.yaml", "testing/reactors/c5g7/c5g7-settings.yaml", "testing/reactors/godiva/godiva-blueprints.yaml", "testing/reactors/godiva/godiva.armi.unittest.yaml", "testing/reactors/smallHexReactor/smallHexReactor-bp.yaml", "testing/reactors/smallHexReactor/smallHexReactor.yaml", "testing/resources/armiRun-SHUFFLES.txt", "testing/resources/armiRun-SHUFFLES.yaml", "testing/resources/COMPXS.ascii", "tests/1DslabXSByCompTest.yaml", "tests/armiRun.yaml", "tests/detailedAxialExpansion/armiRun.yaml", "tests/detailedAxialExpansion/refSmallCoreGrid.yaml", "tests/detailedAxialExpansion/refSmallReactor.yaml", "tests/detailedAxialExpansion/refSmallReactorBase.yaml", "tests/ISOAA", "tests/refSmallCartesian.yaml", "tests/refSmallCoreGrid.yaml", "tests/refSmallReactor.yaml", "tests/refSmallReactorBase.yaml", "tests/refSmallSfpGrid.yaml", "tests/refTestCartesian.yaml", "tests/smallestTestReactor/armiRunSmallest.yaml", "tests/smallestTestReactor/refOneBlockReactor.yaml", "tests/smallestTestReactor/refSmallestReactor.yaml", "tests/tutorials", "tests/tutorials/data_model.ipynb", "tests/zpprTest.yaml", "tests/zpprTestGeom.yaml", ]